Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * pathnode.c
4 : * Routines to manipulate pathlists and create path nodes
5 : *
6 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/optimizer/util/pathnode.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : #include "postgres.h"
16 :
17 : #include "access/htup_details.h"
18 : #include "executor/nodeSetOp.h"
19 : #include "foreign/fdwapi.h"
20 : #include "miscadmin.h"
21 : #include "nodes/extensible.h"
22 : #include "optimizer/appendinfo.h"
23 : #include "optimizer/clauses.h"
24 : #include "optimizer/cost.h"
25 : #include "optimizer/optimizer.h"
26 : #include "optimizer/pathnode.h"
27 : #include "optimizer/paths.h"
28 : #include "optimizer/planmain.h"
29 : #include "optimizer/tlist.h"
30 : #include "parser/parsetree.h"
31 : #include "utils/memutils.h"
32 : #include "utils/selfuncs.h"
33 :
34 : typedef enum
35 : {
36 : COSTS_EQUAL, /* path costs are fuzzily equal */
37 : COSTS_BETTER1, /* first path is cheaper than second */
38 : COSTS_BETTER2, /* second path is cheaper than first */
39 : COSTS_DIFFERENT, /* neither path dominates the other on cost */
40 : } PathCostComparison;
41 :
42 : /*
43 : * STD_FUZZ_FACTOR is the normal fuzz factor for compare_path_costs_fuzzily.
44 : * XXX is it worth making this user-controllable? It provides a tradeoff
45 : * between planner runtime and the accuracy of path cost comparisons.
46 : */
47 : #define STD_FUZZ_FACTOR 1.01
48 :
49 : static int append_total_cost_compare(const ListCell *a, const ListCell *b);
50 : static int append_startup_cost_compare(const ListCell *a, const ListCell *b);
51 : static List *reparameterize_pathlist_by_child(PlannerInfo *root,
52 : List *pathlist,
53 : RelOptInfo *child_rel);
54 : static bool pathlist_is_reparameterizable_by_child(List *pathlist,
55 : RelOptInfo *child_rel);
56 :
57 :
58 : /*****************************************************************************
59 : * MISC. PATH UTILITIES
60 : *****************************************************************************/
61 :
62 : /*
63 : * compare_path_costs
64 : * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
65 : * or more expensive than path2 for the specified criterion.
66 : */
67 : int
68 1160208 : compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
69 : {
70 : /* Number of disabled nodes, if different, trumps all else. */
71 1160208 : if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
72 : {
73 2712 : if (path1->disabled_nodes < path2->disabled_nodes)
74 2712 : return -1;
75 : else
76 0 : return +1;
77 : }
78 :
79 1157496 : if (criterion == STARTUP_COST)
80 : {
81 583448 : if (path1->startup_cost < path2->startup_cost)
82 357304 : return -1;
83 226144 : if (path1->startup_cost > path2->startup_cost)
84 111812 : return +1;
85 :
86 : /*
87 : * If paths have the same startup cost (not at all unlikely), order
88 : * them by total cost.
89 : */
90 114332 : if (path1->total_cost < path2->total_cost)
91 58010 : return -1;
92 56322 : if (path1->total_cost > path2->total_cost)
93 5282 : return +1;
94 : }
95 : else
96 : {
97 574048 : if (path1->total_cost < path2->total_cost)
98 533396 : return -1;
99 40652 : if (path1->total_cost > path2->total_cost)
100 10886 : return +1;
101 :
102 : /*
103 : * If paths have the same total cost, order them by startup cost.
104 : */
105 29766 : if (path1->startup_cost < path2->startup_cost)
106 2686 : return -1;
107 27080 : if (path1->startup_cost > path2->startup_cost)
108 26 : return +1;
109 : }
110 78094 : return 0;
111 : }
112 :
113 : /*
114 : * compare_fractional_path_costs
115 : * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
116 : * or more expensive than path2 for fetching the specified fraction
117 : * of the total tuples.
118 : *
119 : * If fraction is <= 0 or > 1, we interpret it as 1, ie, we select the
120 : * path with the cheaper total_cost.
121 : */
122 : int
123 5736 : compare_fractional_path_costs(Path *path1, Path *path2,
124 : double fraction)
125 : {
126 : Cost cost1,
127 : cost2;
128 :
129 : /* Number of disabled nodes, if different, trumps all else. */
130 5736 : if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
131 : {
132 36 : if (path1->disabled_nodes < path2->disabled_nodes)
133 36 : return -1;
134 : else
135 0 : return +1;
136 : }
137 :
138 5700 : if (fraction <= 0.0 || fraction >= 1.0)
139 1512 : return compare_path_costs(path1, path2, TOTAL_COST);
140 4188 : cost1 = path1->startup_cost +
141 4188 : fraction * (path1->total_cost - path1->startup_cost);
142 4188 : cost2 = path2->startup_cost +
143 4188 : fraction * (path2->total_cost - path2->startup_cost);
144 4188 : if (cost1 < cost2)
145 3514 : return -1;
146 674 : if (cost1 > cost2)
147 674 : return +1;
148 0 : return 0;
149 : }
150 :
151 : /*
152 : * compare_path_costs_fuzzily
153 : * Compare the costs of two paths to see if either can be said to
154 : * dominate the other.
155 : *
156 : * We use fuzzy comparisons so that add_path() can avoid keeping both of
157 : * a pair of paths that really have insignificantly different cost.
158 : *
159 : * The fuzz_factor argument must be 1.0 plus delta, where delta is the
160 : * fraction of the smaller cost that is considered to be a significant
161 : * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit
162 : * be 1% of the smaller cost.
163 : *
164 : * The two paths are said to have "equal" costs if both startup and total
165 : * costs are fuzzily the same. Path1 is said to be better than path2 if
166 : * it has fuzzily better startup cost and fuzzily no worse total cost,
167 : * or if it has fuzzily better total cost and fuzzily no worse startup cost.
168 : * Path2 is better than path1 if the reverse holds. Finally, if one path
169 : * is fuzzily better than the other on startup cost and fuzzily worse on
170 : * total cost, we just say that their costs are "different", since neither
171 : * dominates the other across the whole performance spectrum.
172 : *
173 : * This function also enforces a policy rule that paths for which the relevant
174 : * one of parent->consider_startup and parent->consider_param_startup is false
175 : * cannot survive comparisons solely on the grounds of good startup cost, so
176 : * we never return COSTS_DIFFERENT when that is true for the total-cost loser.
177 : * (But if total costs are fuzzily equal, we compare startup costs anyway,
178 : * in hopes of eliminating one path or the other.)
179 : */
180 : static PathCostComparison
181 4800422 : compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
182 : {
183 : #define CONSIDER_PATH_STARTUP_COST(p) \
184 : ((p)->param_info == NULL ? (p)->parent->consider_startup : (p)->parent->consider_param_startup)
185 :
186 : /* Number of disabled nodes, if different, trumps all else. */
187 4800422 : if (unlikely(path1->disabled_nodes != path2->disabled_nodes))
188 : {
189 37190 : if (path1->disabled_nodes < path2->disabled_nodes)
190 23192 : return COSTS_BETTER1;
191 : else
192 13998 : return COSTS_BETTER2;
193 : }
194 :
195 : /*
196 : * Check total cost first since it's more likely to be different; many
197 : * paths have zero startup cost.
198 : */
199 4763232 : if (path1->total_cost > path2->total_cost * fuzz_factor)
200 : {
201 : /* path1 fuzzily worse on total cost */
202 2500762 : if (CONSIDER_PATH_STARTUP_COST(path1) &&
203 137236 : path2->startup_cost > path1->startup_cost * fuzz_factor)
204 : {
205 : /* ... but path2 fuzzily worse on startup, so DIFFERENT */
206 91428 : return COSTS_DIFFERENT;
207 : }
208 : /* else path2 dominates */
209 2409334 : return COSTS_BETTER2;
210 : }
211 2262470 : if (path2->total_cost > path1->total_cost * fuzz_factor)
212 : {
213 : /* path2 fuzzily worse on total cost */
214 1164516 : if (CONSIDER_PATH_STARTUP_COST(path2) &&
215 60376 : path1->startup_cost > path2->startup_cost * fuzz_factor)
216 : {
217 : /* ... but path1 fuzzily worse on startup, so DIFFERENT */
218 39594 : return COSTS_DIFFERENT;
219 : }
220 : /* else path1 dominates */
221 1124922 : return COSTS_BETTER1;
222 : }
223 : /* fuzzily the same on total cost ... */
224 1097954 : if (path1->startup_cost > path2->startup_cost * fuzz_factor)
225 : {
226 : /* ... but path1 fuzzily worse on startup, so path2 wins */
227 395864 : return COSTS_BETTER2;
228 : }
229 702090 : if (path2->startup_cost > path1->startup_cost * fuzz_factor)
230 : {
231 : /* ... but path2 fuzzily worse on startup, so path1 wins */
232 69326 : return COSTS_BETTER1;
233 : }
234 : /* fuzzily the same on both costs */
235 632764 : return COSTS_EQUAL;
236 :
237 : #undef CONSIDER_PATH_STARTUP_COST
238 : }
239 :
240 : /*
241 : * set_cheapest
242 : * Find the minimum-cost paths from among a relation's paths,
243 : * and save them in the rel's cheapest-path fields.
244 : *
245 : * cheapest_total_path is normally the cheapest-total-cost unparameterized
246 : * path; but if there are no unparameterized paths, we assign it to be the
247 : * best (cheapest least-parameterized) parameterized path. However, only
248 : * unparameterized paths are considered candidates for cheapest_startup_path,
249 : * so that will be NULL if there are no unparameterized paths.
250 : *
251 : * The cheapest_parameterized_paths list collects all parameterized paths
252 : * that have survived the add_path() tournament for this relation. (Since
253 : * add_path ignores pathkeys for a parameterized path, these will be paths
254 : * that have best cost or best row count for their parameterization. We
255 : * may also have both a parallel-safe and a non-parallel-safe path in some
256 : * cases for the same parameterization in some cases, but this should be
257 : * relatively rare since, most typically, all paths for the same relation
258 : * will be parallel-safe or none of them will.)
259 : *
260 : * cheapest_parameterized_paths always includes the cheapest-total
261 : * unparameterized path, too, if there is one; the users of that list find
262 : * it more convenient if that's included.
263 : *
264 : * This is normally called only after we've finished constructing the path
265 : * list for the rel node.
266 : */
267 : void
268 2186428 : set_cheapest(RelOptInfo *parent_rel)
269 : {
270 : Path *cheapest_startup_path;
271 : Path *cheapest_total_path;
272 : Path *best_param_path;
273 : List *parameterized_paths;
274 : ListCell *p;
275 :
276 : Assert(IsA(parent_rel, RelOptInfo));
277 :
278 2186428 : if (parent_rel->pathlist == NIL)
279 0 : elog(ERROR, "could not devise a query plan for the given query");
280 :
281 2186428 : cheapest_startup_path = cheapest_total_path = best_param_path = NULL;
282 2186428 : parameterized_paths = NIL;
283 :
284 4963790 : foreach(p, parent_rel->pathlist)
285 : {
286 2777362 : Path *path = (Path *) lfirst(p);
287 : int cmp;
288 :
289 2777362 : if (path->param_info)
290 : {
291 : /* Parameterized path, so add it to parameterized_paths */
292 146876 : parameterized_paths = lappend(parameterized_paths, path);
293 :
294 : /*
295 : * If we have an unparameterized cheapest-total, we no longer care
296 : * about finding the best parameterized path, so move on.
297 : */
298 146876 : if (cheapest_total_path)
299 30550 : continue;
300 :
301 : /*
302 : * Otherwise, track the best parameterized path, which is the one
303 : * with least total cost among those of the minimum
304 : * parameterization.
305 : */
306 116326 : if (best_param_path == NULL)
307 106022 : best_param_path = path;
308 : else
309 : {
310 10304 : switch (bms_subset_compare(PATH_REQ_OUTER(path),
311 10304 : PATH_REQ_OUTER(best_param_path)))
312 : {
313 60 : case BMS_EQUAL:
314 : /* keep the cheaper one */
315 60 : if (compare_path_costs(path, best_param_path,
316 : TOTAL_COST) < 0)
317 0 : best_param_path = path;
318 60 : break;
319 764 : case BMS_SUBSET1:
320 : /* new path is less-parameterized */
321 764 : best_param_path = path;
322 764 : break;
323 6 : case BMS_SUBSET2:
324 : /* old path is less-parameterized, keep it */
325 6 : break;
326 9474 : case BMS_DIFFERENT:
327 :
328 : /*
329 : * This means that neither path has the least possible
330 : * parameterization for the rel. We'll sit on the old
331 : * path until something better comes along.
332 : */
333 9474 : break;
334 : }
335 : }
336 : }
337 : else
338 : {
339 : /* Unparameterized path, so consider it for cheapest slots */
340 2630486 : if (cheapest_total_path == NULL)
341 : {
342 2173094 : cheapest_startup_path = cheapest_total_path = path;
343 2173094 : continue;
344 : }
345 :
346 : /*
347 : * If we find two paths of identical costs, try to keep the
348 : * better-sorted one. The paths might have unrelated sort
349 : * orderings, in which case we can only guess which might be
350 : * better to keep, but if one is superior then we definitely
351 : * should keep that one.
352 : */
353 457392 : cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST);
354 457392 : if (cmp > 0 ||
355 400 : (cmp == 0 &&
356 400 : compare_pathkeys(cheapest_startup_path->pathkeys,
357 : path->pathkeys) == PATHKEYS_BETTER2))
358 81860 : cheapest_startup_path = path;
359 :
360 457392 : cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST);
361 457392 : if (cmp > 0 ||
362 48 : (cmp == 0 &&
363 48 : compare_pathkeys(cheapest_total_path->pathkeys,
364 : path->pathkeys) == PATHKEYS_BETTER2))
365 0 : cheapest_total_path = path;
366 : }
367 : }
368 :
369 : /* Add cheapest unparameterized path, if any, to parameterized_paths */
370 2186428 : if (cheapest_total_path)
371 2173094 : parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
372 :
373 : /*
374 : * If there is no unparameterized path, use the best parameterized path as
375 : * cheapest_total_path (but not as cheapest_startup_path).
376 : */
377 2186428 : if (cheapest_total_path == NULL)
378 13334 : cheapest_total_path = best_param_path;
379 : Assert(cheapest_total_path != NULL);
380 :
381 2186428 : parent_rel->cheapest_startup_path = cheapest_startup_path;
382 2186428 : parent_rel->cheapest_total_path = cheapest_total_path;
383 2186428 : parent_rel->cheapest_parameterized_paths = parameterized_paths;
384 2186428 : }
385 :
386 : /*
387 : * add_path
388 : * Consider a potential implementation path for the specified parent rel,
389 : * and add it to the rel's pathlist if it is worthy of consideration.
390 : *
391 : * A path is worthy if it has a better sort order (better pathkeys) or
392 : * cheaper cost (as defined below), or generates fewer rows, than any
393 : * existing path that has the same or superset parameterization rels. We
394 : * also consider parallel-safe paths more worthy than others.
395 : *
396 : * Cheaper cost can mean either a cheaper total cost or a cheaper startup
397 : * cost; if one path is cheaper in one of these aspects and another is
398 : * cheaper in the other, we keep both. However, when some path type is
399 : * disabled (e.g. due to enable_seqscan=false), the number of times that
400 : * a disabled path type is used is considered to be a higher-order
401 : * component of the cost. Hence, if path A uses no disabled path type,
402 : * and path B uses 1 or more disabled path types, A is cheaper, no matter
403 : * what we estimate for the startup and total costs. The startup and total
404 : * cost essentially act as a tiebreak when comparing paths that use equal
405 : * numbers of disabled path nodes; but in practice this tiebreak is almost
406 : * always used, since normally no path types are disabled.
407 : *
408 : * In addition to possibly adding new_path, we also remove from the rel's
409 : * pathlist any old paths that are dominated by new_path --- that is,
410 : * new_path is cheaper, at least as well ordered, generates no more rows,
411 : * requires no outer rels not required by the old path, and is no less
412 : * parallel-safe.
413 : *
414 : * In most cases, a path with a superset parameterization will generate
415 : * fewer rows (since it has more join clauses to apply), so that those two
416 : * figures of merit move in opposite directions; this means that a path of
417 : * one parameterization can seldom dominate a path of another. But such
418 : * cases do arise, so we make the full set of checks anyway.
419 : *
420 : * There are two policy decisions embedded in this function, along with
421 : * its sibling add_path_precheck. First, we treat all parameterized paths
422 : * as having NIL pathkeys, so that they cannot win comparisons on the
423 : * basis of sort order. This is to reduce the number of parameterized
424 : * paths that are kept; see discussion in src/backend/optimizer/README.
425 : *
426 : * Second, we only consider cheap startup cost to be interesting if
427 : * parent_rel->consider_startup is true for an unparameterized path, or
428 : * parent_rel->consider_param_startup is true for a parameterized one.
429 : * Again, this allows discarding useless paths sooner.
430 : *
431 : * The pathlist is kept sorted by disabled_nodes and then by total_cost,
432 : * with cheaper paths at the front. Within this routine, that's simply a
433 : * speed hack: doing it that way makes it more likely that we will reject
434 : * an inferior path after a few comparisons, rather than many comparisons.
435 : * However, add_path_precheck relies on this ordering to exit early
436 : * when possible.
437 : *
438 : * NOTE: discarded Path objects are immediately pfree'd to reduce planner
439 : * memory consumption. We dare not try to free the substructure of a Path,
440 : * since much of it may be shared with other Paths or the query tree itself;
441 : * but just recycling discarded Path nodes is a very useful savings in
442 : * a large join tree. We can recycle the List nodes of pathlist, too.
443 : *
444 : * As noted in optimizer/README, deleting a previously-accepted Path is
445 : * safe because we know that Paths of this rel cannot yet be referenced
446 : * from any other rel, such as a higher-level join. However, in some cases
447 : * it is possible that a Path is referenced by another Path for its own
448 : * rel; we must not delete such a Path, even if it is dominated by the new
449 : * Path. Currently this occurs only for IndexPath objects, which may be
450 : * referenced as children of BitmapHeapPaths as well as being paths in
451 : * their own right. Hence, we don't pfree IndexPaths when rejecting them.
452 : *
453 : * 'parent_rel' is the relation entry to which the path corresponds.
454 : * 'new_path' is a potential path for parent_rel.
455 : *
456 : * Returns nothing, but modifies parent_rel->pathlist.
457 : */
458 : void
459 4794808 : add_path(RelOptInfo *parent_rel, Path *new_path)
460 : {
461 4794808 : bool accept_new = true; /* unless we find a superior old path */
462 4794808 : int insert_at = 0; /* where to insert new item */
463 : List *new_path_pathkeys;
464 : ListCell *p1;
465 :
466 : /*
467 : * This is a convenient place to check for query cancel --- no part of the
468 : * planner goes very long without calling add_path().
469 : */
470 4794808 : CHECK_FOR_INTERRUPTS();
471 :
472 : /* Pretend parameterized paths have no pathkeys, per comment above */
473 4794808 : new_path_pathkeys = new_path->param_info ? NIL : new_path->pathkeys;
474 :
475 : /*
476 : * Loop to check proposed new path against old paths. Note it is possible
477 : * for more than one old path to be tossed out because new_path dominates
478 : * it.
479 : */
480 7392440 : foreach(p1, parent_rel->pathlist)
481 : {
482 4408248 : Path *old_path = (Path *) lfirst(p1);
483 4408248 : bool remove_old = false; /* unless new proves superior */
484 : PathCostComparison costcmp;
485 : PathKeysComparison keyscmp;
486 : BMS_Comparison outercmp;
487 :
488 : /*
489 : * Do a fuzzy cost comparison with standard fuzziness limit.
490 : */
491 4408248 : costcmp = compare_path_costs_fuzzily(new_path, old_path,
492 : STD_FUZZ_FACTOR);
493 :
494 : /*
495 : * If the two paths compare differently for startup and total cost,
496 : * then we want to keep both, and we can skip comparing pathkeys and
497 : * required_outer rels. If they compare the same, proceed with the
498 : * other comparisons. Row count is checked last. (We make the tests
499 : * in this order because the cost comparison is most likely to turn
500 : * out "different", and the pathkeys comparison next most likely. As
501 : * explained above, row count very seldom makes a difference, so even
502 : * though it's cheap to compare there's not much point in checking it
503 : * earlier.)
504 : */
505 4408248 : if (costcmp != COSTS_DIFFERENT)
506 : {
507 : /* Similarly check to see if either dominates on pathkeys */
508 : List *old_path_pathkeys;
509 :
510 4277292 : old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
511 4277292 : keyscmp = compare_pathkeys(new_path_pathkeys,
512 : old_path_pathkeys);
513 4277292 : if (keyscmp != PATHKEYS_DIFFERENT)
514 : {
515 4082350 : switch (costcmp)
516 : {
517 428310 : case COSTS_EQUAL:
518 428310 : outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
519 428310 : PATH_REQ_OUTER(old_path));
520 428310 : if (keyscmp == PATHKEYS_BETTER1)
521 : {
522 9188 : if ((outercmp == BMS_EQUAL ||
523 9188 : outercmp == BMS_SUBSET1) &&
524 9188 : new_path->rows <= old_path->rows &&
525 9180 : new_path->parallel_safe >= old_path->parallel_safe)
526 9180 : remove_old = true; /* new dominates old */
527 : }
528 419122 : else if (keyscmp == PATHKEYS_BETTER2)
529 : {
530 20966 : if ((outercmp == BMS_EQUAL ||
531 20966 : outercmp == BMS_SUBSET2) &&
532 20966 : new_path->rows >= old_path->rows &&
533 20964 : new_path->parallel_safe <= old_path->parallel_safe)
534 20964 : accept_new = false; /* old dominates new */
535 : }
536 : else /* keyscmp == PATHKEYS_EQUAL */
537 : {
538 398156 : if (outercmp == BMS_EQUAL)
539 : {
540 : /*
541 : * Same pathkeys and outer rels, and fuzzily
542 : * the same cost, so keep just one; to decide
543 : * which, first check parallel-safety, then
544 : * rows, then do a fuzzy cost comparison with
545 : * very small fuzz limit. (We used to do an
546 : * exact cost comparison, but that results in
547 : * annoying platform-specific plan variations
548 : * due to roundoff in the cost estimates.) If
549 : * things are still tied, arbitrarily keep
550 : * only the old path. Notice that we will
551 : * keep only the old path even if the
552 : * less-fuzzy comparison decides the startup
553 : * and total costs compare differently.
554 : */
555 392524 : if (new_path->parallel_safe >
556 392524 : old_path->parallel_safe)
557 42 : remove_old = true; /* new dominates old */
558 392482 : else if (new_path->parallel_safe <
559 392482 : old_path->parallel_safe)
560 54 : accept_new = false; /* old dominates new */
561 392428 : else if (new_path->rows < old_path->rows)
562 36 : remove_old = true; /* new dominates old */
563 392392 : else if (new_path->rows > old_path->rows)
564 218 : accept_new = false; /* old dominates new */
565 392174 : else if (compare_path_costs_fuzzily(new_path,
566 : old_path,
567 : 1.0000000001) == COSTS_BETTER1)
568 17618 : remove_old = true; /* new dominates old */
569 : else
570 374556 : accept_new = false; /* old equals or
571 : * dominates new */
572 : }
573 5632 : else if (outercmp == BMS_SUBSET1 &&
574 1192 : new_path->rows <= old_path->rows &&
575 1172 : new_path->parallel_safe >= old_path->parallel_safe)
576 1172 : remove_old = true; /* new dominates old */
577 4460 : else if (outercmp == BMS_SUBSET2 &&
578 3718 : new_path->rows >= old_path->rows &&
579 3432 : new_path->parallel_safe <= old_path->parallel_safe)
580 3432 : accept_new = false; /* old dominates new */
581 : /* else different parameterizations, keep both */
582 : }
583 428310 : break;
584 1172192 : case COSTS_BETTER1:
585 1172192 : if (keyscmp != PATHKEYS_BETTER2)
586 : {
587 796496 : outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
588 796496 : PATH_REQ_OUTER(old_path));
589 796496 : if ((outercmp == BMS_EQUAL ||
590 683764 : outercmp == BMS_SUBSET1) &&
591 683764 : new_path->rows <= old_path->rows &&
592 678968 : new_path->parallel_safe >= old_path->parallel_safe)
593 676252 : remove_old = true; /* new dominates old */
594 : }
595 1172192 : break;
596 2481848 : case COSTS_BETTER2:
597 2481848 : if (keyscmp != PATHKEYS_BETTER1)
598 : {
599 1586146 : outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
600 1586146 : PATH_REQ_OUTER(old_path));
601 1586146 : if ((outercmp == BMS_EQUAL ||
602 1496060 : outercmp == BMS_SUBSET2) &&
603 1496060 : new_path->rows >= old_path->rows &&
604 1413230 : new_path->parallel_safe <= old_path->parallel_safe)
605 1411392 : accept_new = false; /* old dominates new */
606 : }
607 2481848 : break;
608 0 : case COSTS_DIFFERENT:
609 :
610 : /*
611 : * can't get here, but keep this case to keep compiler
612 : * quiet
613 : */
614 0 : break;
615 : }
616 : }
617 : }
618 :
619 : /*
620 : * Remove current element from pathlist if dominated by new.
621 : */
622 4408248 : if (remove_old)
623 : {
624 704300 : parent_rel->pathlist = foreach_delete_current(parent_rel->pathlist,
625 : p1);
626 :
627 : /*
628 : * Delete the data pointed-to by the deleted cell, if possible
629 : */
630 704300 : if (!IsA(old_path, IndexPath))
631 678852 : pfree(old_path);
632 : }
633 : else
634 : {
635 : /*
636 : * new belongs after this old path if it has more disabled nodes
637 : * or if it has the same number of nodes but a greater total cost
638 : */
639 3703948 : if (new_path->disabled_nodes > old_path->disabled_nodes ||
640 3689950 : (new_path->disabled_nodes == old_path->disabled_nodes &&
641 3688976 : new_path->total_cost >= old_path->total_cost))
642 3084806 : insert_at = foreach_current_index(p1) + 1;
643 : }
644 :
645 : /*
646 : * If we found an old path that dominates new_path, we can quit
647 : * scanning the pathlist; we will not add new_path, and we assume
648 : * new_path cannot dominate any other elements of the pathlist.
649 : */
650 4408248 : if (!accept_new)
651 1810616 : break;
652 : }
653 :
654 4794808 : if (accept_new)
655 : {
656 : /* Accept the new path: insert it at proper place in pathlist */
657 2984192 : parent_rel->pathlist =
658 2984192 : list_insert_nth(parent_rel->pathlist, insert_at, new_path);
659 : }
660 : else
661 : {
662 : /* Reject and recycle the new path */
663 1810616 : if (!IsA(new_path, IndexPath))
664 1704782 : pfree(new_path);
665 : }
666 4794808 : }
667 :
668 : /*
669 : * add_path_precheck
670 : * Check whether a proposed new path could possibly get accepted.
671 : * We assume we know the path's pathkeys and parameterization accurately,
672 : * and have lower bounds for its costs.
673 : *
674 : * Note that we do not know the path's rowcount, since getting an estimate for
675 : * that is too expensive to do before prechecking. We assume here that paths
676 : * of a superset parameterization will generate fewer rows; if that holds,
677 : * then paths with different parameterizations cannot dominate each other
678 : * and so we can simply ignore existing paths of another parameterization.
679 : * (In the infrequent cases where that rule of thumb fails, add_path will
680 : * get rid of the inferior path.)
681 : *
682 : * At the time this is called, we haven't actually built a Path structure,
683 : * so the required information has to be passed piecemeal.
684 : */
685 : bool
686 5483918 : add_path_precheck(RelOptInfo *parent_rel, int disabled_nodes,
687 : Cost startup_cost, Cost total_cost,
688 : List *pathkeys, Relids required_outer)
689 : {
690 : List *new_path_pathkeys;
691 : bool consider_startup;
692 : ListCell *p1;
693 :
694 : /* Pretend parameterized paths have no pathkeys, per add_path policy */
695 5483918 : new_path_pathkeys = required_outer ? NIL : pathkeys;
696 :
697 : /* Decide whether new path's startup cost is interesting */
698 5483918 : consider_startup = required_outer ? parent_rel->consider_param_startup : parent_rel->consider_startup;
699 :
700 6993152 : foreach(p1, parent_rel->pathlist)
701 : {
702 6632986 : Path *old_path = (Path *) lfirst(p1);
703 : PathKeysComparison keyscmp;
704 :
705 : /*
706 : * Since the pathlist is sorted by disabled_nodes and then by
707 : * total_cost, we can stop looking once we reach a path with more
708 : * disabled nodes, or the same number of disabled nodes plus a
709 : * total_cost larger than the new path's.
710 : */
711 6632986 : if (unlikely(old_path->disabled_nodes != disabled_nodes))
712 : {
713 8604 : if (disabled_nodes < old_path->disabled_nodes)
714 328 : break;
715 : }
716 6624382 : else if (total_cost <= old_path->total_cost * STD_FUZZ_FACTOR)
717 1988648 : break;
718 :
719 : /*
720 : * We are looking for an old_path with the same parameterization (and
721 : * by assumption the same rowcount) that dominates the new path on
722 : * pathkeys as well as both cost metrics. If we find one, we can
723 : * reject the new path.
724 : *
725 : * Cost comparisons here should match compare_path_costs_fuzzily.
726 : */
727 : /* new path can win on startup cost only if consider_startup */
728 4644010 : if (startup_cost > old_path->startup_cost * STD_FUZZ_FACTOR ||
729 2192250 : !consider_startup)
730 : {
731 : /* new path loses on cost, so check pathkeys... */
732 : List *old_path_pathkeys;
733 :
734 4541550 : old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
735 4541550 : keyscmp = compare_pathkeys(new_path_pathkeys,
736 : old_path_pathkeys);
737 4541550 : if (keyscmp == PATHKEYS_EQUAL ||
738 : keyscmp == PATHKEYS_BETTER2)
739 : {
740 : /* new path does not win on pathkeys... */
741 3204778 : if (bms_equal(required_outer, PATH_REQ_OUTER(old_path)))
742 : {
743 : /* Found an old path that dominates the new one */
744 3134776 : return false;
745 : }
746 : }
747 : }
748 : }
749 :
750 2349142 : return true;
751 : }
752 :
753 : /*
754 : * add_partial_path
755 : * Like add_path, our goal here is to consider whether a path is worthy
756 : * of being kept around, but the considerations here are a bit different.
757 : * A partial path is one which can be executed in any number of workers in
758 : * parallel such that each worker will generate a subset of the path's
759 : * overall result.
760 : *
761 : * As in add_path, the partial_pathlist is kept sorted with the cheapest
762 : * total path in front. This is depended on by multiple places, which
763 : * just take the front entry as the cheapest path without searching.
764 : *
765 : * We don't generate parameterized partial paths for several reasons. Most
766 : * importantly, they're not safe to execute, because there's nothing to
767 : * make sure that a parallel scan within the parameterized portion of the
768 : * plan is running with the same value in every worker at the same time.
769 : * Fortunately, it seems unlikely to be worthwhile anyway, because having
770 : * each worker scan the entire outer relation and a subset of the inner
771 : * relation will generally be a terrible plan. The inner (parameterized)
772 : * side of the plan will be small anyway. There could be rare cases where
773 : * this wins big - e.g. if join order constraints put a 1-row relation on
774 : * the outer side of the topmost join with a parameterized plan on the inner
775 : * side - but we'll have to be content not to handle such cases until
776 : * somebody builds an executor infrastructure that can cope with them.
777 : *
778 : * Because we don't consider parameterized paths here, we also don't
779 : * need to consider the row counts as a measure of quality: every path will
780 : * produce the same number of rows. Neither do we need to consider startup
781 : * costs: parallelism is only used for plans that will be run to completion.
782 : * Therefore, this routine is much simpler than add_path: it needs to
783 : * consider only disabled nodes, pathkeys and total cost.
784 : *
785 : * As with add_path, we pfree paths that are found to be dominated by
786 : * another partial path; this requires that there be no other references to
787 : * such paths yet. Hence, GatherPaths must not be created for a rel until
788 : * we're done creating all partial paths for it. Unlike add_path, we don't
789 : * take an exception for IndexPaths as partial index paths won't be
790 : * referenced by partial BitmapHeapPaths.
791 : */
792 : void
793 311576 : add_partial_path(RelOptInfo *parent_rel, Path *new_path)
794 : {
795 311576 : bool accept_new = true; /* unless we find a superior old path */
796 311576 : int insert_at = 0; /* where to insert new item */
797 : ListCell *p1;
798 :
799 : /* Check for query cancel. */
800 311576 : CHECK_FOR_INTERRUPTS();
801 :
802 : /* Path to be added must be parallel safe. */
803 : Assert(new_path->parallel_safe);
804 :
805 : /* Relation should be OK for parallelism, too. */
806 : Assert(parent_rel->consider_parallel);
807 :
808 : /*
809 : * As in add_path, throw out any paths which are dominated by the new
810 : * path, but throw out the new path if some existing path dominates it.
811 : */
812 433174 : foreach(p1, parent_rel->partial_pathlist)
813 : {
814 254656 : Path *old_path = (Path *) lfirst(p1);
815 254656 : bool remove_old = false; /* unless new proves superior */
816 : PathKeysComparison keyscmp;
817 :
818 : /* Compare pathkeys. */
819 254656 : keyscmp = compare_pathkeys(new_path->pathkeys, old_path->pathkeys);
820 :
821 : /* Unless pathkeys are incompatible, keep just one of the two paths. */
822 254656 : if (keyscmp != PATHKEYS_DIFFERENT)
823 : {
824 254410 : if (unlikely(new_path->disabled_nodes != old_path->disabled_nodes))
825 : {
826 2192 : if (new_path->disabled_nodes > old_path->disabled_nodes)
827 902 : accept_new = false;
828 : else
829 1290 : remove_old = true;
830 : }
831 252218 : else if (new_path->total_cost > old_path->total_cost
832 252218 : * STD_FUZZ_FACTOR)
833 : {
834 : /* New path costs more; keep it only if pathkeys are better. */
835 124204 : if (keyscmp != PATHKEYS_BETTER1)
836 90026 : accept_new = false;
837 : }
838 128014 : else if (old_path->total_cost > new_path->total_cost
839 128014 : * STD_FUZZ_FACTOR)
840 : {
841 : /* Old path costs more; keep it only if pathkeys are better. */
842 85188 : if (keyscmp != PATHKEYS_BETTER2)
843 30014 : remove_old = true;
844 : }
845 42826 : else if (keyscmp == PATHKEYS_BETTER1)
846 : {
847 : /* Costs are about the same, new path has better pathkeys. */
848 24 : remove_old = true;
849 : }
850 42802 : else if (keyscmp == PATHKEYS_BETTER2)
851 : {
852 : /* Costs are about the same, old path has better pathkeys. */
853 2064 : accept_new = false;
854 : }
855 40738 : else if (old_path->total_cost > new_path->total_cost * 1.0000000001)
856 : {
857 : /* Pathkeys are the same, and the old path costs more. */
858 672 : remove_old = true;
859 : }
860 : else
861 : {
862 : /*
863 : * Pathkeys are the same, and new path isn't materially
864 : * cheaper.
865 : */
866 40066 : accept_new = false;
867 : }
868 : }
869 :
870 : /*
871 : * Remove current element from partial_pathlist if dominated by new.
872 : */
873 254656 : if (remove_old)
874 : {
875 32000 : parent_rel->partial_pathlist =
876 32000 : foreach_delete_current(parent_rel->partial_pathlist, p1);
877 32000 : pfree(old_path);
878 : }
879 : else
880 : {
881 : /* new belongs after this old path if it has cost >= old's */
882 222656 : if (new_path->total_cost >= old_path->total_cost)
883 166286 : insert_at = foreach_current_index(p1) + 1;
884 : }
885 :
886 : /*
887 : * If we found an old path that dominates new_path, we can quit
888 : * scanning the partial_pathlist; we will not add new_path, and we
889 : * assume new_path cannot dominate any later path.
890 : */
891 254656 : if (!accept_new)
892 133058 : break;
893 : }
894 :
895 311576 : if (accept_new)
896 : {
897 : /* Accept the new path: insert it at proper place */
898 178518 : parent_rel->partial_pathlist =
899 178518 : list_insert_nth(parent_rel->partial_pathlist, insert_at, new_path);
900 : }
901 : else
902 : {
903 : /* Reject and recycle the new path */
904 133058 : pfree(new_path);
905 : }
906 311576 : }
907 :
908 : /*
909 : * add_partial_path_precheck
910 : * Check whether a proposed new partial path could possibly get accepted.
911 : *
912 : * Unlike add_path_precheck, we can ignore startup cost and parameterization,
913 : * since they don't matter for partial paths (see add_partial_path). But
914 : * we do want to make sure we don't add a partial path if there's already
915 : * a complete path that dominates it, since in that case the proposed path
916 : * is surely a loser.
917 : */
918 : bool
919 447966 : add_partial_path_precheck(RelOptInfo *parent_rel, int disabled_nodes,
920 : Cost total_cost, List *pathkeys)
921 : {
922 : ListCell *p1;
923 :
924 : /*
925 : * Our goal here is twofold. First, we want to find out whether this path
926 : * is clearly inferior to some existing partial path. If so, we want to
927 : * reject it immediately. Second, we want to find out whether this path
928 : * is clearly superior to some existing partial path -- at least, modulo
929 : * final cost computations. If so, we definitely want to consider it.
930 : *
931 : * Unlike add_path(), we always compare pathkeys here. This is because we
932 : * expect partial_pathlist to be very short, and getting a definitive
933 : * answer at this stage avoids the need to call add_path_precheck.
934 : */
935 571746 : foreach(p1, parent_rel->partial_pathlist)
936 : {
937 465382 : Path *old_path = (Path *) lfirst(p1);
938 : PathKeysComparison keyscmp;
939 :
940 465382 : keyscmp = compare_pathkeys(pathkeys, old_path->pathkeys);
941 465382 : if (keyscmp != PATHKEYS_DIFFERENT)
942 : {
943 465154 : if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR &&
944 : keyscmp != PATHKEYS_BETTER1)
945 341602 : return false;
946 236814 : if (old_path->total_cost > total_cost * STD_FUZZ_FACTOR &&
947 : keyscmp != PATHKEYS_BETTER2)
948 113262 : return true;
949 : }
950 : }
951 :
952 : /*
953 : * This path is neither clearly inferior to an existing partial path nor
954 : * clearly good enough that it might replace one. Compare it to
955 : * non-parallel plans. If it loses even before accounting for the cost of
956 : * the Gather node, we should definitely reject it.
957 : *
958 : * Note that we pass the total_cost to add_path_precheck twice. This is
959 : * because it's never advantageous to consider the startup cost of a
960 : * partial path; the resulting plans, if run in parallel, will be run to
961 : * completion.
962 : */
963 106364 : if (!add_path_precheck(parent_rel, disabled_nodes, total_cost, total_cost,
964 : pathkeys, NULL))
965 2632 : return false;
966 :
967 103732 : return true;
968 : }
969 :
970 :
971 : /*****************************************************************************
972 : * PATH NODE CREATION ROUTINES
973 : *****************************************************************************/
974 :
975 : /*
976 : * create_seqscan_path
977 : * Creates a path corresponding to a sequential scan, returning the
978 : * pathnode.
979 : */
980 : Path *
981 449434 : create_seqscan_path(PlannerInfo *root, RelOptInfo *rel,
982 : Relids required_outer, int parallel_workers)
983 : {
984 449434 : Path *pathnode = makeNode(Path);
985 :
986 449434 : pathnode->pathtype = T_SeqScan;
987 449434 : pathnode->parent = rel;
988 449434 : pathnode->pathtarget = rel->reltarget;
989 449434 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
990 : required_outer);
991 449434 : pathnode->parallel_aware = (parallel_workers > 0);
992 449434 : pathnode->parallel_safe = rel->consider_parallel;
993 449434 : pathnode->parallel_workers = parallel_workers;
994 449434 : pathnode->pathkeys = NIL; /* seqscan has unordered result */
995 :
996 449434 : cost_seqscan(pathnode, root, rel, pathnode->param_info);
997 :
998 449434 : return pathnode;
999 : }
1000 :
1001 : /*
1002 : * create_samplescan_path
1003 : * Creates a path node for a sampled table scan.
1004 : */
1005 : Path *
1006 306 : create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
1007 : {
1008 306 : Path *pathnode = makeNode(Path);
1009 :
1010 306 : pathnode->pathtype = T_SampleScan;
1011 306 : pathnode->parent = rel;
1012 306 : pathnode->pathtarget = rel->reltarget;
1013 306 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
1014 : required_outer);
1015 306 : pathnode->parallel_aware = false;
1016 306 : pathnode->parallel_safe = rel->consider_parallel;
1017 306 : pathnode->parallel_workers = 0;
1018 306 : pathnode->pathkeys = NIL; /* samplescan has unordered result */
1019 :
1020 306 : cost_samplescan(pathnode, root, rel, pathnode->param_info);
1021 :
1022 306 : return pathnode;
1023 : }
1024 :
1025 : /*
1026 : * create_index_path
1027 : * Creates a path node for an index scan.
1028 : *
1029 : * 'index' is a usable index.
1030 : * 'indexclauses' is a list of IndexClause nodes representing clauses
1031 : * to be enforced as qual conditions in the scan.
1032 : * 'indexorderbys' is a list of bare expressions (no RestrictInfos)
1033 : * to be used as index ordering operators in the scan.
1034 : * 'indexorderbycols' is an integer list of index column numbers (zero based)
1035 : * the ordering operators can be used with.
1036 : * 'pathkeys' describes the ordering of the path.
1037 : * 'indexscandir' is either ForwardScanDirection or BackwardScanDirection.
1038 : * 'indexonly' is true if an index-only scan is wanted.
1039 : * 'required_outer' is the set of outer relids for a parameterized path.
1040 : * 'loop_count' is the number of repetitions of the indexscan to factor into
1041 : * estimates of caching behavior.
1042 : * 'partial_path' is true if constructing a parallel index scan path.
1043 : *
1044 : * Returns the new path node.
1045 : */
1046 : IndexPath *
1047 832142 : create_index_path(PlannerInfo *root,
1048 : IndexOptInfo *index,
1049 : List *indexclauses,
1050 : List *indexorderbys,
1051 : List *indexorderbycols,
1052 : List *pathkeys,
1053 : ScanDirection indexscandir,
1054 : bool indexonly,
1055 : Relids required_outer,
1056 : double loop_count,
1057 : bool partial_path)
1058 : {
1059 832142 : IndexPath *pathnode = makeNode(IndexPath);
1060 832142 : RelOptInfo *rel = index->rel;
1061 :
1062 832142 : pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan;
1063 832142 : pathnode->path.parent = rel;
1064 832142 : pathnode->path.pathtarget = rel->reltarget;
1065 832142 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1066 : required_outer);
1067 832142 : pathnode->path.parallel_aware = false;
1068 832142 : pathnode->path.parallel_safe = rel->consider_parallel;
1069 832142 : pathnode->path.parallel_workers = 0;
1070 832142 : pathnode->path.pathkeys = pathkeys;
1071 :
1072 832142 : pathnode->indexinfo = index;
1073 832142 : pathnode->indexclauses = indexclauses;
1074 832142 : pathnode->indexorderbys = indexorderbys;
1075 832142 : pathnode->indexorderbycols = indexorderbycols;
1076 832142 : pathnode->indexscandir = indexscandir;
1077 :
1078 832142 : cost_index(pathnode, root, loop_count, partial_path);
1079 :
1080 832142 : return pathnode;
1081 : }
1082 :
1083 : /*
1084 : * create_bitmap_heap_path
1085 : * Creates a path node for a bitmap scan.
1086 : *
1087 : * 'bitmapqual' is a tree of IndexPath, BitmapAndPath, and BitmapOrPath nodes.
1088 : * 'required_outer' is the set of outer relids for a parameterized path.
1089 : * 'loop_count' is the number of repetitions of the indexscan to factor into
1090 : * estimates of caching behavior.
1091 : *
1092 : * loop_count should match the value used when creating the component
1093 : * IndexPaths.
1094 : */
1095 : BitmapHeapPath *
1096 367550 : create_bitmap_heap_path(PlannerInfo *root,
1097 : RelOptInfo *rel,
1098 : Path *bitmapqual,
1099 : Relids required_outer,
1100 : double loop_count,
1101 : int parallel_degree)
1102 : {
1103 367550 : BitmapHeapPath *pathnode = makeNode(BitmapHeapPath);
1104 :
1105 367550 : pathnode->path.pathtype = T_BitmapHeapScan;
1106 367550 : pathnode->path.parent = rel;
1107 367550 : pathnode->path.pathtarget = rel->reltarget;
1108 367550 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1109 : required_outer);
1110 367550 : pathnode->path.parallel_aware = (parallel_degree > 0);
1111 367550 : pathnode->path.parallel_safe = rel->consider_parallel;
1112 367550 : pathnode->path.parallel_workers = parallel_degree;
1113 367550 : pathnode->path.pathkeys = NIL; /* always unordered */
1114 :
1115 367550 : pathnode->bitmapqual = bitmapqual;
1116 :
1117 367550 : cost_bitmap_heap_scan(&pathnode->path, root, rel,
1118 : pathnode->path.param_info,
1119 : bitmapqual, loop_count);
1120 :
1121 367550 : return pathnode;
1122 : }
1123 :
1124 : /*
1125 : * create_bitmap_and_path
1126 : * Creates a path node representing a BitmapAnd.
1127 : */
1128 : BitmapAndPath *
1129 51790 : create_bitmap_and_path(PlannerInfo *root,
1130 : RelOptInfo *rel,
1131 : List *bitmapquals)
1132 : {
1133 51790 : BitmapAndPath *pathnode = makeNode(BitmapAndPath);
1134 51790 : Relids required_outer = NULL;
1135 : ListCell *lc;
1136 :
1137 51790 : pathnode->path.pathtype = T_BitmapAnd;
1138 51790 : pathnode->path.parent = rel;
1139 51790 : pathnode->path.pathtarget = rel->reltarget;
1140 :
1141 : /*
1142 : * Identify the required outer rels as the union of what the child paths
1143 : * depend on. (Alternatively, we could insist that the caller pass this
1144 : * in, but it's more convenient and reliable to compute it here.)
1145 : */
1146 155370 : foreach(lc, bitmapquals)
1147 : {
1148 103580 : Path *bitmapqual = (Path *) lfirst(lc);
1149 :
1150 103580 : required_outer = bms_add_members(required_outer,
1151 103580 : PATH_REQ_OUTER(bitmapqual));
1152 : }
1153 51790 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1154 : required_outer);
1155 :
1156 : /*
1157 : * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1158 : * parallel-safe if and only if rel->consider_parallel is set. So, we can
1159 : * set the flag for this path based only on the relation-level flag,
1160 : * without actually iterating over the list of children.
1161 : */
1162 51790 : pathnode->path.parallel_aware = false;
1163 51790 : pathnode->path.parallel_safe = rel->consider_parallel;
1164 51790 : pathnode->path.parallel_workers = 0;
1165 :
1166 51790 : pathnode->path.pathkeys = NIL; /* always unordered */
1167 :
1168 51790 : pathnode->bitmapquals = bitmapquals;
1169 :
1170 : /* this sets bitmapselectivity as well as the regular cost fields: */
1171 51790 : cost_bitmap_and_node(pathnode, root);
1172 :
1173 51790 : return pathnode;
1174 : }
1175 :
1176 : /*
1177 : * create_bitmap_or_path
1178 : * Creates a path node representing a BitmapOr.
1179 : */
1180 : BitmapOrPath *
1181 1040 : create_bitmap_or_path(PlannerInfo *root,
1182 : RelOptInfo *rel,
1183 : List *bitmapquals)
1184 : {
1185 1040 : BitmapOrPath *pathnode = makeNode(BitmapOrPath);
1186 1040 : Relids required_outer = NULL;
1187 : ListCell *lc;
1188 :
1189 1040 : pathnode->path.pathtype = T_BitmapOr;
1190 1040 : pathnode->path.parent = rel;
1191 1040 : pathnode->path.pathtarget = rel->reltarget;
1192 :
1193 : /*
1194 : * Identify the required outer rels as the union of what the child paths
1195 : * depend on. (Alternatively, we could insist that the caller pass this
1196 : * in, but it's more convenient and reliable to compute it here.)
1197 : */
1198 2922 : foreach(lc, bitmapquals)
1199 : {
1200 1882 : Path *bitmapqual = (Path *) lfirst(lc);
1201 :
1202 1882 : required_outer = bms_add_members(required_outer,
1203 1882 : PATH_REQ_OUTER(bitmapqual));
1204 : }
1205 1040 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1206 : required_outer);
1207 :
1208 : /*
1209 : * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1210 : * parallel-safe if and only if rel->consider_parallel is set. So, we can
1211 : * set the flag for this path based only on the relation-level flag,
1212 : * without actually iterating over the list of children.
1213 : */
1214 1040 : pathnode->path.parallel_aware = false;
1215 1040 : pathnode->path.parallel_safe = rel->consider_parallel;
1216 1040 : pathnode->path.parallel_workers = 0;
1217 :
1218 1040 : pathnode->path.pathkeys = NIL; /* always unordered */
1219 :
1220 1040 : pathnode->bitmapquals = bitmapquals;
1221 :
1222 : /* this sets bitmapselectivity as well as the regular cost fields: */
1223 1040 : cost_bitmap_or_node(pathnode, root);
1224 :
1225 1040 : return pathnode;
1226 : }
1227 :
1228 : /*
1229 : * create_tidscan_path
1230 : * Creates a path corresponding to a scan by TID, returning the pathnode.
1231 : */
1232 : TidPath *
1233 872 : create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, List *tidquals,
1234 : Relids required_outer)
1235 : {
1236 872 : TidPath *pathnode = makeNode(TidPath);
1237 :
1238 872 : pathnode->path.pathtype = T_TidScan;
1239 872 : pathnode->path.parent = rel;
1240 872 : pathnode->path.pathtarget = rel->reltarget;
1241 872 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1242 : required_outer);
1243 872 : pathnode->path.parallel_aware = false;
1244 872 : pathnode->path.parallel_safe = rel->consider_parallel;
1245 872 : pathnode->path.parallel_workers = 0;
1246 872 : pathnode->path.pathkeys = NIL; /* always unordered */
1247 :
1248 872 : pathnode->tidquals = tidquals;
1249 :
1250 872 : cost_tidscan(&pathnode->path, root, rel, tidquals,
1251 : pathnode->path.param_info);
1252 :
1253 872 : return pathnode;
1254 : }
1255 :
1256 : /*
1257 : * create_tidrangescan_path
1258 : * Creates a path corresponding to a scan by a range of TIDs, returning
1259 : * the pathnode.
1260 : */
1261 : TidRangePath *
1262 2052 : create_tidrangescan_path(PlannerInfo *root, RelOptInfo *rel,
1263 : List *tidrangequals, Relids required_outer,
1264 : int parallel_workers)
1265 : {
1266 2052 : TidRangePath *pathnode = makeNode(TidRangePath);
1267 :
1268 2052 : pathnode->path.pathtype = T_TidRangeScan;
1269 2052 : pathnode->path.parent = rel;
1270 2052 : pathnode->path.pathtarget = rel->reltarget;
1271 2052 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1272 : required_outer);
1273 2052 : pathnode->path.parallel_aware = (parallel_workers > 0);
1274 2052 : pathnode->path.parallel_safe = rel->consider_parallel;
1275 2052 : pathnode->path.parallel_workers = parallel_workers;
1276 2052 : pathnode->path.pathkeys = NIL; /* always unordered */
1277 :
1278 2052 : pathnode->tidrangequals = tidrangequals;
1279 :
1280 2052 : cost_tidrangescan(&pathnode->path, root, rel, tidrangequals,
1281 : pathnode->path.param_info);
1282 :
1283 2052 : return pathnode;
1284 : }
1285 :
1286 : /*
1287 : * create_append_path
1288 : * Creates a path corresponding to an Append plan, returning the
1289 : * pathnode.
1290 : *
1291 : * Note that we must handle subpaths = NIL, representing a dummy access path.
1292 : * Also, there are callers that pass root = NULL.
1293 : *
1294 : * 'rows', when passed as a non-negative number, will be used to overwrite the
1295 : * returned path's row estimate. Otherwise, the row estimate is calculated
1296 : * by totalling the row estimates from the 'subpaths' list.
1297 : */
1298 : AppendPath *
1299 93250 : create_append_path(PlannerInfo *root,
1300 : RelOptInfo *rel,
1301 : List *subpaths, List *partial_subpaths,
1302 : List *pathkeys, Relids required_outer,
1303 : int parallel_workers, bool parallel_aware,
1304 : double rows)
1305 : {
1306 93250 : AppendPath *pathnode = makeNode(AppendPath);
1307 : ListCell *l;
1308 :
1309 : Assert(!parallel_aware || parallel_workers > 0);
1310 :
1311 93250 : pathnode->path.pathtype = T_Append;
1312 93250 : pathnode->path.parent = rel;
1313 93250 : pathnode->path.pathtarget = rel->reltarget;
1314 :
1315 : /*
1316 : * If this is for a baserel (not a join or non-leaf partition), we prefer
1317 : * to apply get_baserel_parampathinfo to construct a full ParamPathInfo
1318 : * for the path. This supports building a Memoize path atop this path,
1319 : * and if this is a partitioned table the info may be useful for run-time
1320 : * pruning (cf make_partition_pruneinfo()).
1321 : *
1322 : * However, if we don't have "root" then that won't work and we fall back
1323 : * on the simpler get_appendrel_parampathinfo. There's no point in doing
1324 : * the more expensive thing for a dummy path, either.
1325 : */
1326 93250 : if (rel->reloptkind == RELOPT_BASEREL && root && subpaths != NIL)
1327 40886 : pathnode->path.param_info = get_baserel_parampathinfo(root,
1328 : rel,
1329 : required_outer);
1330 : else
1331 52364 : pathnode->path.param_info = get_appendrel_parampathinfo(rel,
1332 : required_outer);
1333 :
1334 93250 : pathnode->path.parallel_aware = parallel_aware;
1335 93250 : pathnode->path.parallel_safe = rel->consider_parallel;
1336 93250 : pathnode->path.parallel_workers = parallel_workers;
1337 93250 : pathnode->path.pathkeys = pathkeys;
1338 :
1339 : /*
1340 : * For parallel append, non-partial paths are sorted by descending total
1341 : * costs. That way, the total time to finish all non-partial paths is
1342 : * minimized. Also, the partial paths are sorted by descending startup
1343 : * costs. There may be some paths that require to do startup work by a
1344 : * single worker. In such case, it's better for workers to choose the
1345 : * expensive ones first, whereas the leader should choose the cheapest
1346 : * startup plan.
1347 : */
1348 93250 : if (pathnode->path.parallel_aware)
1349 : {
1350 : /*
1351 : * We mustn't fiddle with the order of subpaths when the Append has
1352 : * pathkeys. The order they're listed in is critical to keeping the
1353 : * pathkeys valid.
1354 : */
1355 : Assert(pathkeys == NIL);
1356 :
1357 33574 : list_sort(subpaths, append_total_cost_compare);
1358 33574 : list_sort(partial_subpaths, append_startup_cost_compare);
1359 : }
1360 93250 : pathnode->first_partial_path = list_length(subpaths);
1361 93250 : pathnode->subpaths = list_concat(subpaths, partial_subpaths);
1362 :
1363 : /*
1364 : * Apply query-wide LIMIT if known and path is for sole base relation.
1365 : * (Handling this at this low level is a bit klugy.)
1366 : */
1367 93250 : if (root != NULL && bms_equal(rel->relids, root->all_query_rels))
1368 47262 : pathnode->limit_tuples = root->limit_tuples;
1369 : else
1370 45988 : pathnode->limit_tuples = -1.0;
1371 :
1372 317228 : foreach(l, pathnode->subpaths)
1373 : {
1374 223978 : Path *subpath = (Path *) lfirst(l);
1375 :
1376 407302 : pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1377 183324 : subpath->parallel_safe;
1378 :
1379 : /* All child paths must have same parameterization */
1380 : Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1381 : }
1382 :
1383 : Assert(!parallel_aware || pathnode->path.parallel_safe);
1384 :
1385 : /*
1386 : * If there's exactly one child path then the output of the Append is
1387 : * necessarily ordered the same as the child's, so we can inherit the
1388 : * child's pathkeys if any, overriding whatever the caller might've said.
1389 : * Furthermore, if the child's parallel awareness matches the Append's,
1390 : * then the Append is a no-op and will be discarded later (in setrefs.c).
1391 : * Then we can inherit the child's size and cost too, effectively charging
1392 : * zero for the Append. Otherwise, we must do the normal costsize
1393 : * calculation.
1394 : */
1395 93250 : if (list_length(pathnode->subpaths) == 1)
1396 : {
1397 22842 : Path *child = (Path *) linitial(pathnode->subpaths);
1398 :
1399 22842 : if (child->parallel_aware == parallel_aware)
1400 : {
1401 22386 : pathnode->path.rows = child->rows;
1402 22386 : pathnode->path.startup_cost = child->startup_cost;
1403 22386 : pathnode->path.total_cost = child->total_cost;
1404 : }
1405 : else
1406 456 : cost_append(pathnode, root);
1407 : /* Must do this last, else cost_append complains */
1408 22842 : pathnode->path.pathkeys = child->pathkeys;
1409 : }
1410 : else
1411 70408 : cost_append(pathnode, root);
1412 :
1413 : /* If the caller provided a row estimate, override the computed value. */
1414 93250 : if (rows >= 0)
1415 576 : pathnode->path.rows = rows;
1416 :
1417 93250 : return pathnode;
1418 : }
1419 :
1420 : /*
1421 : * append_total_cost_compare
1422 : * list_sort comparator for sorting append child paths
1423 : * by total_cost descending
1424 : *
1425 : * For equal total costs, we fall back to comparing startup costs; if those
1426 : * are equal too, break ties using bms_compare on the paths' relids.
1427 : * (This is to avoid getting unpredictable results from list_sort.)
1428 : */
1429 : static int
1430 22098 : append_total_cost_compare(const ListCell *a, const ListCell *b)
1431 : {
1432 22098 : Path *path1 = (Path *) lfirst(a);
1433 22098 : Path *path2 = (Path *) lfirst(b);
1434 : int cmp;
1435 :
1436 22098 : cmp = compare_path_costs(path1, path2, TOTAL_COST);
1437 22098 : if (cmp != 0)
1438 19694 : return -cmp;
1439 2404 : return bms_compare(path1->parent->relids, path2->parent->relids);
1440 : }
1441 :
1442 : /*
1443 : * append_startup_cost_compare
1444 : * list_sort comparator for sorting append child paths
1445 : * by startup_cost descending
1446 : *
1447 : * For equal startup costs, we fall back to comparing total costs; if those
1448 : * are equal too, break ties using bms_compare on the paths' relids.
1449 : * (This is to avoid getting unpredictable results from list_sort.)
1450 : */
1451 : static int
1452 45576 : append_startup_cost_compare(const ListCell *a, const ListCell *b)
1453 : {
1454 45576 : Path *path1 = (Path *) lfirst(a);
1455 45576 : Path *path2 = (Path *) lfirst(b);
1456 : int cmp;
1457 :
1458 45576 : cmp = compare_path_costs(path1, path2, STARTUP_COST);
1459 45576 : if (cmp != 0)
1460 21906 : return -cmp;
1461 23670 : return bms_compare(path1->parent->relids, path2->parent->relids);
1462 : }
1463 :
1464 : /*
1465 : * create_merge_append_path
1466 : * Creates a path corresponding to a MergeAppend plan, returning the
1467 : * pathnode.
1468 : */
1469 : MergeAppendPath *
1470 10028 : create_merge_append_path(PlannerInfo *root,
1471 : RelOptInfo *rel,
1472 : List *subpaths,
1473 : List *pathkeys,
1474 : Relids required_outer)
1475 : {
1476 10028 : MergeAppendPath *pathnode = makeNode(MergeAppendPath);
1477 : int input_disabled_nodes;
1478 : Cost input_startup_cost;
1479 : Cost input_total_cost;
1480 : ListCell *l;
1481 :
1482 : /*
1483 : * We don't currently support parameterized MergeAppend paths, as
1484 : * explained in the comments for generate_orderedappend_paths.
1485 : */
1486 : Assert(bms_is_empty(rel->lateral_relids) && bms_is_empty(required_outer));
1487 :
1488 10028 : pathnode->path.pathtype = T_MergeAppend;
1489 10028 : pathnode->path.parent = rel;
1490 10028 : pathnode->path.pathtarget = rel->reltarget;
1491 10028 : pathnode->path.param_info = NULL;
1492 10028 : pathnode->path.parallel_aware = false;
1493 10028 : pathnode->path.parallel_safe = rel->consider_parallel;
1494 10028 : pathnode->path.parallel_workers = 0;
1495 10028 : pathnode->path.pathkeys = pathkeys;
1496 10028 : pathnode->subpaths = subpaths;
1497 :
1498 : /*
1499 : * Apply query-wide LIMIT if known and path is for sole base relation.
1500 : * (Handling this at this low level is a bit klugy.)
1501 : */
1502 10028 : if (bms_equal(rel->relids, root->all_query_rels))
1503 4614 : pathnode->limit_tuples = root->limit_tuples;
1504 : else
1505 5414 : pathnode->limit_tuples = -1.0;
1506 :
1507 : /*
1508 : * Add up the sizes and costs of the input paths.
1509 : */
1510 10028 : pathnode->path.rows = 0;
1511 10028 : input_disabled_nodes = 0;
1512 10028 : input_startup_cost = 0;
1513 10028 : input_total_cost = 0;
1514 36156 : foreach(l, subpaths)
1515 : {
1516 26128 : Path *subpath = (Path *) lfirst(l);
1517 : int presorted_keys;
1518 : Path sort_path; /* dummy for result of
1519 : * cost_sort/cost_incremental_sort */
1520 :
1521 : /* All child paths should be unparameterized */
1522 : Assert(bms_is_empty(PATH_REQ_OUTER(subpath)));
1523 :
1524 26128 : pathnode->path.rows += subpath->rows;
1525 49600 : pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1526 23472 : subpath->parallel_safe;
1527 :
1528 26128 : if (!pathkeys_count_contained_in(pathkeys, subpath->pathkeys,
1529 : &presorted_keys))
1530 : {
1531 : /*
1532 : * We'll need to insert a Sort node, so include costs for that. We
1533 : * choose to use incremental sort if it is enabled and there are
1534 : * presorted keys; otherwise we use full sort.
1535 : *
1536 : * We can use the parent's LIMIT if any, since we certainly won't
1537 : * pull more than that many tuples from any child.
1538 : */
1539 314 : if (enable_incremental_sort && presorted_keys > 0)
1540 : {
1541 18 : cost_incremental_sort(&sort_path,
1542 : root,
1543 : pathkeys,
1544 : presorted_keys,
1545 : subpath->disabled_nodes,
1546 : subpath->startup_cost,
1547 : subpath->total_cost,
1548 : subpath->rows,
1549 18 : subpath->pathtarget->width,
1550 : 0.0,
1551 : work_mem,
1552 : pathnode->limit_tuples);
1553 : }
1554 : else
1555 : {
1556 296 : cost_sort(&sort_path,
1557 : root,
1558 : pathkeys,
1559 : subpath->disabled_nodes,
1560 : subpath->total_cost,
1561 : subpath->rows,
1562 296 : subpath->pathtarget->width,
1563 : 0.0,
1564 : work_mem,
1565 : pathnode->limit_tuples);
1566 : }
1567 :
1568 314 : subpath = &sort_path;
1569 : }
1570 :
1571 26128 : input_disabled_nodes += subpath->disabled_nodes;
1572 26128 : input_startup_cost += subpath->startup_cost;
1573 26128 : input_total_cost += subpath->total_cost;
1574 : }
1575 :
1576 : /*
1577 : * Now we can compute total costs of the MergeAppend. If there's exactly
1578 : * one child path and its parallel awareness matches that of the
1579 : * MergeAppend, then the MergeAppend is a no-op and will be discarded
1580 : * later (in setrefs.c); otherwise we do the normal cost calculation.
1581 : */
1582 10028 : if (list_length(subpaths) == 1 &&
1583 128 : ((Path *) linitial(subpaths))->parallel_aware ==
1584 128 : pathnode->path.parallel_aware)
1585 : {
1586 128 : pathnode->path.disabled_nodes = input_disabled_nodes;
1587 128 : pathnode->path.startup_cost = input_startup_cost;
1588 128 : pathnode->path.total_cost = input_total_cost;
1589 : }
1590 : else
1591 9900 : cost_merge_append(&pathnode->path, root,
1592 : pathkeys, list_length(subpaths),
1593 : input_disabled_nodes,
1594 : input_startup_cost, input_total_cost,
1595 : pathnode->path.rows);
1596 :
1597 10028 : return pathnode;
1598 : }
1599 :
1600 : /*
1601 : * create_group_result_path
1602 : * Creates a path representing a Result-and-nothing-else plan.
1603 : *
1604 : * This is only used for degenerate grouping cases, in which we know we
1605 : * need to produce one result row, possibly filtered by a HAVING qual.
1606 : */
1607 : GroupResultPath *
1608 194758 : create_group_result_path(PlannerInfo *root, RelOptInfo *rel,
1609 : PathTarget *target, List *havingqual)
1610 : {
1611 194758 : GroupResultPath *pathnode = makeNode(GroupResultPath);
1612 :
1613 194758 : pathnode->path.pathtype = T_Result;
1614 194758 : pathnode->path.parent = rel;
1615 194758 : pathnode->path.pathtarget = target;
1616 194758 : pathnode->path.param_info = NULL; /* there are no other rels... */
1617 194758 : pathnode->path.parallel_aware = false;
1618 194758 : pathnode->path.parallel_safe = rel->consider_parallel;
1619 194758 : pathnode->path.parallel_workers = 0;
1620 194758 : pathnode->path.pathkeys = NIL;
1621 194758 : pathnode->quals = havingqual;
1622 :
1623 : /*
1624 : * We can't quite use cost_resultscan() because the quals we want to
1625 : * account for are not baserestrict quals of the rel. Might as well just
1626 : * hack it here.
1627 : */
1628 194758 : pathnode->path.rows = 1;
1629 194758 : pathnode->path.startup_cost = target->cost.startup;
1630 194758 : pathnode->path.total_cost = target->cost.startup +
1631 194758 : cpu_tuple_cost + target->cost.per_tuple;
1632 :
1633 : /*
1634 : * Add cost of qual, if any --- but we ignore its selectivity, since our
1635 : * rowcount estimate should be 1 no matter what the qual is.
1636 : */
1637 194758 : if (havingqual)
1638 : {
1639 : QualCost qual_cost;
1640 :
1641 632 : cost_qual_eval(&qual_cost, havingqual, root);
1642 : /* havingqual is evaluated once at startup */
1643 632 : pathnode->path.startup_cost += qual_cost.startup + qual_cost.per_tuple;
1644 632 : pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
1645 : }
1646 :
1647 194758 : return pathnode;
1648 : }
1649 :
1650 : /*
1651 : * create_material_path
1652 : * Creates a path corresponding to a Material plan, returning the
1653 : * pathnode.
1654 : */
1655 : MaterialPath *
1656 695526 : create_material_path(RelOptInfo *rel, Path *subpath, bool enabled)
1657 : {
1658 695526 : MaterialPath *pathnode = makeNode(MaterialPath);
1659 :
1660 : Assert(subpath->parent == rel);
1661 :
1662 695526 : pathnode->path.pathtype = T_Material;
1663 695526 : pathnode->path.parent = rel;
1664 695526 : pathnode->path.pathtarget = rel->reltarget;
1665 695526 : pathnode->path.param_info = subpath->param_info;
1666 695526 : pathnode->path.parallel_aware = false;
1667 1333262 : pathnode->path.parallel_safe = rel->consider_parallel &&
1668 637736 : subpath->parallel_safe;
1669 695526 : pathnode->path.parallel_workers = subpath->parallel_workers;
1670 695526 : pathnode->path.pathkeys = subpath->pathkeys;
1671 :
1672 695526 : pathnode->subpath = subpath;
1673 :
1674 695526 : cost_material(&pathnode->path,
1675 : enabled,
1676 : subpath->disabled_nodes,
1677 : subpath->startup_cost,
1678 : subpath->total_cost,
1679 : subpath->rows,
1680 695526 : subpath->pathtarget->width);
1681 :
1682 695526 : return pathnode;
1683 : }
1684 :
1685 : /*
1686 : * create_memoize_path
1687 : * Creates a path corresponding to a Memoize plan, returning the pathnode.
1688 : */
1689 : MemoizePath *
1690 320676 : create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
1691 : List *param_exprs, List *hash_operators,
1692 : bool singlerow, bool binary_mode, Cardinality est_calls)
1693 : {
1694 320676 : MemoizePath *pathnode = makeNode(MemoizePath);
1695 :
1696 : Assert(subpath->parent == rel);
1697 :
1698 320676 : pathnode->path.pathtype = T_Memoize;
1699 320676 : pathnode->path.parent = rel;
1700 320676 : pathnode->path.pathtarget = rel->reltarget;
1701 320676 : pathnode->path.param_info = subpath->param_info;
1702 320676 : pathnode->path.parallel_aware = false;
1703 627066 : pathnode->path.parallel_safe = rel->consider_parallel &&
1704 306390 : subpath->parallel_safe;
1705 320676 : pathnode->path.parallel_workers = subpath->parallel_workers;
1706 320676 : pathnode->path.pathkeys = subpath->pathkeys;
1707 :
1708 320676 : pathnode->subpath = subpath;
1709 320676 : pathnode->hash_operators = hash_operators;
1710 320676 : pathnode->param_exprs = param_exprs;
1711 320676 : pathnode->singlerow = singlerow;
1712 320676 : pathnode->binary_mode = binary_mode;
1713 :
1714 : /*
1715 : * For now we set est_entries to 0. cost_memoize_rescan() does all the
1716 : * hard work to determine how many cache entries there are likely to be,
1717 : * so it seems best to leave it up to that function to fill this field in.
1718 : * If left at 0, the executor will make a guess at a good value.
1719 : */
1720 320676 : pathnode->est_entries = 0;
1721 :
1722 320676 : pathnode->est_calls = clamp_row_est(est_calls);
1723 :
1724 : /* These will also be set later in cost_memoize_rescan() */
1725 320676 : pathnode->est_unique_keys = 0.0;
1726 320676 : pathnode->est_hit_ratio = 0.0;
1727 :
1728 : /*
1729 : * We should not be asked to generate this path type when memoization is
1730 : * disabled, so set our count of disabled nodes equal to the subpath's
1731 : * count.
1732 : *
1733 : * It would be nice to also Assert that memoization is enabled, but the
1734 : * value of enable_memoize is not controlling: what we would need to check
1735 : * is that the JoinPathExtraData's pgs_mask included PGS_NESTLOOP_MEMOIZE.
1736 : */
1737 320676 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
1738 :
1739 : /*
1740 : * Add a small additional charge for caching the first entry. All the
1741 : * harder calculations for rescans are performed in cost_memoize_rescan().
1742 : */
1743 320676 : pathnode->path.startup_cost = subpath->startup_cost + cpu_tuple_cost;
1744 320676 : pathnode->path.total_cost = subpath->total_cost + cpu_tuple_cost;
1745 320676 : pathnode->path.rows = subpath->rows;
1746 :
1747 320676 : return pathnode;
1748 : }
1749 :
1750 : /*
1751 : * create_gather_merge_path
1752 : *
1753 : * Creates a path corresponding to a gather merge scan, returning
1754 : * the pathnode.
1755 : */
1756 : GatherMergePath *
1757 18928 : create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
1758 : PathTarget *target, List *pathkeys,
1759 : Relids required_outer, double *rows)
1760 : {
1761 18928 : GatherMergePath *pathnode = makeNode(GatherMergePath);
1762 18928 : int input_disabled_nodes = 0;
1763 18928 : Cost input_startup_cost = 0;
1764 18928 : Cost input_total_cost = 0;
1765 :
1766 : Assert(subpath->parallel_safe);
1767 : Assert(pathkeys);
1768 :
1769 : /*
1770 : * The subpath should guarantee that it is adequately ordered either by
1771 : * adding an explicit sort node or by using presorted input. We cannot
1772 : * add an explicit Sort node for the subpath in createplan.c on additional
1773 : * pathkeys, because we can't guarantee the sort would be safe. For
1774 : * example, expressions may be volatile or otherwise parallel unsafe.
1775 : */
1776 18928 : if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
1777 0 : elog(ERROR, "gather merge input not sufficiently sorted");
1778 :
1779 18928 : pathnode->path.pathtype = T_GatherMerge;
1780 18928 : pathnode->path.parent = rel;
1781 18928 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1782 : required_outer);
1783 18928 : pathnode->path.parallel_aware = false;
1784 :
1785 18928 : pathnode->subpath = subpath;
1786 18928 : pathnode->num_workers = subpath->parallel_workers;
1787 18928 : pathnode->path.pathkeys = pathkeys;
1788 18928 : pathnode->path.pathtarget = target ? target : rel->reltarget;
1789 :
1790 18928 : input_disabled_nodes += subpath->disabled_nodes;
1791 18928 : input_startup_cost += subpath->startup_cost;
1792 18928 : input_total_cost += subpath->total_cost;
1793 :
1794 18928 : cost_gather_merge(pathnode, root, rel, pathnode->path.param_info,
1795 : input_disabled_nodes, input_startup_cost,
1796 : input_total_cost, rows);
1797 :
1798 18928 : return pathnode;
1799 : }
1800 :
1801 : /*
1802 : * create_gather_path
1803 : * Creates a path corresponding to a gather scan, returning the
1804 : * pathnode.
1805 : *
1806 : * 'rows' may optionally be set to override row estimates from other sources.
1807 : */
1808 : GatherPath *
1809 26788 : create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
1810 : PathTarget *target, Relids required_outer, double *rows)
1811 : {
1812 26788 : GatherPath *pathnode = makeNode(GatherPath);
1813 :
1814 : Assert(subpath->parallel_safe);
1815 :
1816 26788 : pathnode->path.pathtype = T_Gather;
1817 26788 : pathnode->path.parent = rel;
1818 26788 : pathnode->path.pathtarget = target;
1819 26788 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1820 : required_outer);
1821 26788 : pathnode->path.parallel_aware = false;
1822 26788 : pathnode->path.parallel_safe = false;
1823 26788 : pathnode->path.parallel_workers = 0;
1824 26788 : pathnode->path.pathkeys = NIL; /* Gather has unordered result */
1825 :
1826 26788 : pathnode->subpath = subpath;
1827 26788 : pathnode->num_workers = subpath->parallel_workers;
1828 26788 : pathnode->single_copy = false;
1829 :
1830 26788 : if (pathnode->num_workers == 0)
1831 : {
1832 0 : pathnode->path.pathkeys = subpath->pathkeys;
1833 0 : pathnode->num_workers = 1;
1834 0 : pathnode->single_copy = true;
1835 : }
1836 :
1837 26788 : cost_gather(pathnode, root, rel, pathnode->path.param_info, rows);
1838 :
1839 26788 : return pathnode;
1840 : }
1841 :
1842 : /*
1843 : * create_subqueryscan_path
1844 : * Creates a path corresponding to a scan of a subquery,
1845 : * returning the pathnode.
1846 : *
1847 : * Caller must pass trivial_pathtarget = true if it believes rel->reltarget to
1848 : * be trivial, ie just a fetch of all the subquery output columns in order.
1849 : * While we could determine that here, the caller can usually do it more
1850 : * efficiently (or at least amortize it over multiple calls).
1851 : */
1852 : SubqueryScanPath *
1853 63192 : create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
1854 : bool trivial_pathtarget,
1855 : List *pathkeys, Relids required_outer)
1856 : {
1857 63192 : SubqueryScanPath *pathnode = makeNode(SubqueryScanPath);
1858 :
1859 63192 : pathnode->path.pathtype = T_SubqueryScan;
1860 63192 : pathnode->path.parent = rel;
1861 63192 : pathnode->path.pathtarget = rel->reltarget;
1862 63192 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1863 : required_outer);
1864 63192 : pathnode->path.parallel_aware = false;
1865 105182 : pathnode->path.parallel_safe = rel->consider_parallel &&
1866 41990 : subpath->parallel_safe;
1867 63192 : pathnode->path.parallel_workers = subpath->parallel_workers;
1868 63192 : pathnode->path.pathkeys = pathkeys;
1869 63192 : pathnode->subpath = subpath;
1870 :
1871 63192 : cost_subqueryscan(pathnode, root, rel, pathnode->path.param_info,
1872 : trivial_pathtarget);
1873 :
1874 63192 : return pathnode;
1875 : }
1876 :
1877 : /*
1878 : * create_functionscan_path
1879 : * Creates a path corresponding to a sequential scan of a function,
1880 : * returning the pathnode.
1881 : */
1882 : Path *
1883 53512 : create_functionscan_path(PlannerInfo *root, RelOptInfo *rel,
1884 : List *pathkeys, Relids required_outer)
1885 : {
1886 53512 : Path *pathnode = makeNode(Path);
1887 :
1888 53512 : pathnode->pathtype = T_FunctionScan;
1889 53512 : pathnode->parent = rel;
1890 53512 : pathnode->pathtarget = rel->reltarget;
1891 53512 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
1892 : required_outer);
1893 53512 : pathnode->parallel_aware = false;
1894 53512 : pathnode->parallel_safe = rel->consider_parallel;
1895 53512 : pathnode->parallel_workers = 0;
1896 53512 : pathnode->pathkeys = pathkeys;
1897 :
1898 53512 : cost_functionscan(pathnode, root, rel, pathnode->param_info);
1899 :
1900 53512 : return pathnode;
1901 : }
1902 :
1903 : /*
1904 : * create_tablefuncscan_path
1905 : * Creates a path corresponding to a sequential scan of a table function,
1906 : * returning the pathnode.
1907 : */
1908 : Path *
1909 626 : create_tablefuncscan_path(PlannerInfo *root, RelOptInfo *rel,
1910 : Relids required_outer)
1911 : {
1912 626 : Path *pathnode = makeNode(Path);
1913 :
1914 626 : pathnode->pathtype = T_TableFuncScan;
1915 626 : pathnode->parent = rel;
1916 626 : pathnode->pathtarget = rel->reltarget;
1917 626 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
1918 : required_outer);
1919 626 : pathnode->parallel_aware = false;
1920 626 : pathnode->parallel_safe = rel->consider_parallel;
1921 626 : pathnode->parallel_workers = 0;
1922 626 : pathnode->pathkeys = NIL; /* result is always unordered */
1923 :
1924 626 : cost_tablefuncscan(pathnode, root, rel, pathnode->param_info);
1925 :
1926 626 : return pathnode;
1927 : }
1928 :
1929 : /*
1930 : * create_valuesscan_path
1931 : * Creates a path corresponding to a scan of a VALUES list,
1932 : * returning the pathnode.
1933 : */
1934 : Path *
1935 8516 : create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel,
1936 : Relids required_outer)
1937 : {
1938 8516 : Path *pathnode = makeNode(Path);
1939 :
1940 8516 : pathnode->pathtype = T_ValuesScan;
1941 8516 : pathnode->parent = rel;
1942 8516 : pathnode->pathtarget = rel->reltarget;
1943 8516 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
1944 : required_outer);
1945 8516 : pathnode->parallel_aware = false;
1946 8516 : pathnode->parallel_safe = rel->consider_parallel;
1947 8516 : pathnode->parallel_workers = 0;
1948 8516 : pathnode->pathkeys = NIL; /* result is always unordered */
1949 :
1950 8516 : cost_valuesscan(pathnode, root, rel, pathnode->param_info);
1951 :
1952 8516 : return pathnode;
1953 : }
1954 :
1955 : /*
1956 : * create_ctescan_path
1957 : * Creates a path corresponding to a scan of a non-self-reference CTE,
1958 : * returning the pathnode.
1959 : */
1960 : Path *
1961 4464 : create_ctescan_path(PlannerInfo *root, RelOptInfo *rel,
1962 : List *pathkeys, Relids required_outer)
1963 : {
1964 4464 : Path *pathnode = makeNode(Path);
1965 :
1966 4464 : pathnode->pathtype = T_CteScan;
1967 4464 : pathnode->parent = rel;
1968 4464 : pathnode->pathtarget = rel->reltarget;
1969 4464 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
1970 : required_outer);
1971 4464 : pathnode->parallel_aware = false;
1972 4464 : pathnode->parallel_safe = rel->consider_parallel;
1973 4464 : pathnode->parallel_workers = 0;
1974 4464 : pathnode->pathkeys = pathkeys;
1975 :
1976 4464 : cost_ctescan(pathnode, root, rel, pathnode->param_info);
1977 :
1978 4464 : return pathnode;
1979 : }
1980 :
1981 : /*
1982 : * create_namedtuplestorescan_path
1983 : * Creates a path corresponding to a scan of a named tuplestore, returning
1984 : * the pathnode.
1985 : */
1986 : Path *
1987 482 : create_namedtuplestorescan_path(PlannerInfo *root, RelOptInfo *rel,
1988 : Relids required_outer)
1989 : {
1990 482 : Path *pathnode = makeNode(Path);
1991 :
1992 482 : pathnode->pathtype = T_NamedTuplestoreScan;
1993 482 : pathnode->parent = rel;
1994 482 : pathnode->pathtarget = rel->reltarget;
1995 482 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
1996 : required_outer);
1997 482 : pathnode->parallel_aware = false;
1998 482 : pathnode->parallel_safe = rel->consider_parallel;
1999 482 : pathnode->parallel_workers = 0;
2000 482 : pathnode->pathkeys = NIL; /* result is always unordered */
2001 :
2002 482 : cost_namedtuplestorescan(pathnode, root, rel, pathnode->param_info);
2003 :
2004 482 : return pathnode;
2005 : }
2006 :
2007 : /*
2008 : * create_resultscan_path
2009 : * Creates a path corresponding to a scan of an RTE_RESULT relation,
2010 : * returning the pathnode.
2011 : */
2012 : Path *
2013 4352 : create_resultscan_path(PlannerInfo *root, RelOptInfo *rel,
2014 : Relids required_outer)
2015 : {
2016 4352 : Path *pathnode = makeNode(Path);
2017 :
2018 4352 : pathnode->pathtype = T_Result;
2019 4352 : pathnode->parent = rel;
2020 4352 : pathnode->pathtarget = rel->reltarget;
2021 4352 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
2022 : required_outer);
2023 4352 : pathnode->parallel_aware = false;
2024 4352 : pathnode->parallel_safe = rel->consider_parallel;
2025 4352 : pathnode->parallel_workers = 0;
2026 4352 : pathnode->pathkeys = NIL; /* result is always unordered */
2027 :
2028 4352 : cost_resultscan(pathnode, root, rel, pathnode->param_info);
2029 :
2030 4352 : return pathnode;
2031 : }
2032 :
2033 : /*
2034 : * create_worktablescan_path
2035 : * Creates a path corresponding to a scan of a self-reference CTE,
2036 : * returning the pathnode.
2037 : */
2038 : Path *
2039 942 : create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel,
2040 : Relids required_outer)
2041 : {
2042 942 : Path *pathnode = makeNode(Path);
2043 :
2044 942 : pathnode->pathtype = T_WorkTableScan;
2045 942 : pathnode->parent = rel;
2046 942 : pathnode->pathtarget = rel->reltarget;
2047 942 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
2048 : required_outer);
2049 942 : pathnode->parallel_aware = false;
2050 942 : pathnode->parallel_safe = rel->consider_parallel;
2051 942 : pathnode->parallel_workers = 0;
2052 942 : pathnode->pathkeys = NIL; /* result is always unordered */
2053 :
2054 : /* Cost is the same as for a regular CTE scan */
2055 942 : cost_ctescan(pathnode, root, rel, pathnode->param_info);
2056 :
2057 942 : return pathnode;
2058 : }
2059 :
2060 : /*
2061 : * create_foreignscan_path
2062 : * Creates a path corresponding to a scan of a foreign base table,
2063 : * returning the pathnode.
2064 : *
2065 : * This function is never called from core Postgres; rather, it's expected
2066 : * to be called by the GetForeignPaths function of a foreign data wrapper.
2067 : * We make the FDW supply all fields of the path, since we do not have any way
2068 : * to calculate them in core. However, there is a usually-sane default for
2069 : * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2070 : */
2071 : ForeignPath *
2072 3726 : create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel,
2073 : PathTarget *target,
2074 : double rows, int disabled_nodes,
2075 : Cost startup_cost, Cost total_cost,
2076 : List *pathkeys,
2077 : Relids required_outer,
2078 : Path *fdw_outerpath,
2079 : List *fdw_restrictinfo,
2080 : List *fdw_private)
2081 : {
2082 3726 : ForeignPath *pathnode = makeNode(ForeignPath);
2083 :
2084 : /* Historically some FDWs were confused about when to use this */
2085 : Assert(IS_SIMPLE_REL(rel));
2086 :
2087 3726 : pathnode->path.pathtype = T_ForeignScan;
2088 3726 : pathnode->path.parent = rel;
2089 3726 : pathnode->path.pathtarget = target ? target : rel->reltarget;
2090 3726 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2091 : required_outer);
2092 3726 : pathnode->path.parallel_aware = false;
2093 3726 : pathnode->path.parallel_safe = rel->consider_parallel;
2094 3726 : pathnode->path.parallel_workers = 0;
2095 3726 : pathnode->path.rows = rows;
2096 3726 : pathnode->path.disabled_nodes = disabled_nodes;
2097 3726 : pathnode->path.startup_cost = startup_cost;
2098 3726 : pathnode->path.total_cost = total_cost;
2099 3726 : pathnode->path.pathkeys = pathkeys;
2100 :
2101 3726 : pathnode->fdw_outerpath = fdw_outerpath;
2102 3726 : pathnode->fdw_restrictinfo = fdw_restrictinfo;
2103 3726 : pathnode->fdw_private = fdw_private;
2104 :
2105 3726 : return pathnode;
2106 : }
2107 :
2108 : /*
2109 : * create_foreign_join_path
2110 : * Creates a path corresponding to a scan of a foreign join,
2111 : * returning the pathnode.
2112 : *
2113 : * This function is never called from core Postgres; rather, it's expected
2114 : * to be called by the GetForeignJoinPaths function of a foreign data wrapper.
2115 : * We make the FDW supply all fields of the path, since we do not have any way
2116 : * to calculate them in core. However, there is a usually-sane default for
2117 : * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2118 : */
2119 : ForeignPath *
2120 1212 : create_foreign_join_path(PlannerInfo *root, RelOptInfo *rel,
2121 : PathTarget *target,
2122 : double rows, int disabled_nodes,
2123 : Cost startup_cost, Cost total_cost,
2124 : List *pathkeys,
2125 : Relids required_outer,
2126 : Path *fdw_outerpath,
2127 : List *fdw_restrictinfo,
2128 : List *fdw_private)
2129 : {
2130 1212 : ForeignPath *pathnode = makeNode(ForeignPath);
2131 :
2132 : /*
2133 : * We should use get_joinrel_parampathinfo to handle parameterized paths,
2134 : * but the API of this function doesn't support it, and existing
2135 : * extensions aren't yet trying to build such paths anyway. For the
2136 : * moment just throw an error if someone tries it; eventually we should
2137 : * revisit this.
2138 : */
2139 1212 : if (!bms_is_empty(required_outer) || !bms_is_empty(rel->lateral_relids))
2140 0 : elog(ERROR, "parameterized foreign joins are not supported yet");
2141 :
2142 1212 : pathnode->path.pathtype = T_ForeignScan;
2143 1212 : pathnode->path.parent = rel;
2144 1212 : pathnode->path.pathtarget = target ? target : rel->reltarget;
2145 1212 : pathnode->path.param_info = NULL; /* XXX see above */
2146 1212 : pathnode->path.parallel_aware = false;
2147 1212 : pathnode->path.parallel_safe = rel->consider_parallel;
2148 1212 : pathnode->path.parallel_workers = 0;
2149 1212 : pathnode->path.rows = rows;
2150 1212 : pathnode->path.disabled_nodes = disabled_nodes;
2151 1212 : pathnode->path.startup_cost = startup_cost;
2152 1212 : pathnode->path.total_cost = total_cost;
2153 1212 : pathnode->path.pathkeys = pathkeys;
2154 :
2155 1212 : pathnode->fdw_outerpath = fdw_outerpath;
2156 1212 : pathnode->fdw_restrictinfo = fdw_restrictinfo;
2157 1212 : pathnode->fdw_private = fdw_private;
2158 :
2159 1212 : return pathnode;
2160 : }
2161 :
2162 : /*
2163 : * create_foreign_upper_path
2164 : * Creates a path corresponding to an upper relation that's computed
2165 : * directly by an FDW, returning the pathnode.
2166 : *
2167 : * This function is never called from core Postgres; rather, it's expected to
2168 : * be called by the GetForeignUpperPaths function of a foreign data wrapper.
2169 : * We make the FDW supply all fields of the path, since we do not have any way
2170 : * to calculate them in core. However, there is a usually-sane default for
2171 : * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2172 : */
2173 : ForeignPath *
2174 588 : create_foreign_upper_path(PlannerInfo *root, RelOptInfo *rel,
2175 : PathTarget *target,
2176 : double rows, int disabled_nodes,
2177 : Cost startup_cost, Cost total_cost,
2178 : List *pathkeys,
2179 : Path *fdw_outerpath,
2180 : List *fdw_restrictinfo,
2181 : List *fdw_private)
2182 : {
2183 588 : ForeignPath *pathnode = makeNode(ForeignPath);
2184 :
2185 : /*
2186 : * Upper relations should never have any lateral references, since joining
2187 : * is complete.
2188 : */
2189 : Assert(bms_is_empty(rel->lateral_relids));
2190 :
2191 588 : pathnode->path.pathtype = T_ForeignScan;
2192 588 : pathnode->path.parent = rel;
2193 588 : pathnode->path.pathtarget = target ? target : rel->reltarget;
2194 588 : pathnode->path.param_info = NULL;
2195 588 : pathnode->path.parallel_aware = false;
2196 588 : pathnode->path.parallel_safe = rel->consider_parallel;
2197 588 : pathnode->path.parallel_workers = 0;
2198 588 : pathnode->path.rows = rows;
2199 588 : pathnode->path.disabled_nodes = disabled_nodes;
2200 588 : pathnode->path.startup_cost = startup_cost;
2201 588 : pathnode->path.total_cost = total_cost;
2202 588 : pathnode->path.pathkeys = pathkeys;
2203 :
2204 588 : pathnode->fdw_outerpath = fdw_outerpath;
2205 588 : pathnode->fdw_restrictinfo = fdw_restrictinfo;
2206 588 : pathnode->fdw_private = fdw_private;
2207 :
2208 588 : return pathnode;
2209 : }
2210 :
2211 : /*
2212 : * calc_nestloop_required_outer
2213 : * Compute the required_outer set for a nestloop join path
2214 : *
2215 : * Note: when considering a child join, the inputs nonetheless use top-level
2216 : * parent relids
2217 : *
2218 : * Note: result must not share storage with either input
2219 : */
2220 : Relids
2221 3511242 : calc_nestloop_required_outer(Relids outerrelids,
2222 : Relids outer_paramrels,
2223 : Relids innerrelids,
2224 : Relids inner_paramrels)
2225 : {
2226 : Relids required_outer;
2227 :
2228 : /* inner_path can require rels from outer path, but not vice versa */
2229 : Assert(!bms_overlap(outer_paramrels, innerrelids));
2230 : /* easy case if inner path is not parameterized */
2231 3511242 : if (!inner_paramrels)
2232 2442538 : return bms_copy(outer_paramrels);
2233 : /* else, form the union ... */
2234 1068704 : required_outer = bms_union(outer_paramrels, inner_paramrels);
2235 : /* ... and remove any mention of now-satisfied outer rels */
2236 1068704 : required_outer = bms_del_members(required_outer,
2237 : outerrelids);
2238 1068704 : return required_outer;
2239 : }
2240 :
2241 : /*
2242 : * calc_non_nestloop_required_outer
2243 : * Compute the required_outer set for a merge or hash join path
2244 : *
2245 : * Note: result must not share storage with either input
2246 : */
2247 : Relids
2248 2359740 : calc_non_nestloop_required_outer(Path *outer_path, Path *inner_path)
2249 : {
2250 2359740 : Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
2251 2359740 : Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
2252 : Relids innerrelids PG_USED_FOR_ASSERTS_ONLY;
2253 : Relids outerrelids PG_USED_FOR_ASSERTS_ONLY;
2254 : Relids required_outer;
2255 :
2256 : /*
2257 : * Any parameterization of the input paths refers to topmost parents of
2258 : * the relevant relations, because reparameterize_path_by_child() hasn't
2259 : * been called yet. So we must consider topmost parents of the relations
2260 : * being joined, too, while checking for disallowed parameterization
2261 : * cases.
2262 : */
2263 2359740 : if (inner_path->parent->top_parent_relids)
2264 159170 : innerrelids = inner_path->parent->top_parent_relids;
2265 : else
2266 2200570 : innerrelids = inner_path->parent->relids;
2267 :
2268 2359740 : if (outer_path->parent->top_parent_relids)
2269 159170 : outerrelids = outer_path->parent->top_parent_relids;
2270 : else
2271 2200570 : outerrelids = outer_path->parent->relids;
2272 :
2273 : /* neither path can require rels from the other */
2274 : Assert(!bms_overlap(outer_paramrels, innerrelids));
2275 : Assert(!bms_overlap(inner_paramrels, outerrelids));
2276 : /* form the union ... */
2277 2359740 : required_outer = bms_union(outer_paramrels, inner_paramrels);
2278 : /* we do not need an explicit test for empty; bms_union gets it right */
2279 2359740 : return required_outer;
2280 : }
2281 :
2282 : /*
2283 : * create_nestloop_path
2284 : * Creates a pathnode corresponding to a nestloop join between two
2285 : * relations.
2286 : *
2287 : * 'joinrel' is the join relation.
2288 : * 'jointype' is the type of join required
2289 : * 'workspace' is the result from initial_cost_nestloop
2290 : * 'extra' contains various information about the join
2291 : * 'outer_path' is the outer path
2292 : * 'inner_path' is the inner path
2293 : * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2294 : * 'pathkeys' are the path keys of the new join path
2295 : * 'required_outer' is the set of required outer rels
2296 : *
2297 : * Returns the resulting path node.
2298 : */
2299 : NestPath *
2300 1523128 : create_nestloop_path(PlannerInfo *root,
2301 : RelOptInfo *joinrel,
2302 : JoinType jointype,
2303 : JoinCostWorkspace *workspace,
2304 : JoinPathExtraData *extra,
2305 : Path *outer_path,
2306 : Path *inner_path,
2307 : List *restrict_clauses,
2308 : List *pathkeys,
2309 : Relids required_outer)
2310 : {
2311 1523128 : NestPath *pathnode = makeNode(NestPath);
2312 1523128 : Relids inner_req_outer = PATH_REQ_OUTER(inner_path);
2313 : Relids outerrelids;
2314 :
2315 : /*
2316 : * Paths are parameterized by top-level parents, so run parameterization
2317 : * tests on the parent relids.
2318 : */
2319 1523128 : if (outer_path->parent->top_parent_relids)
2320 76354 : outerrelids = outer_path->parent->top_parent_relids;
2321 : else
2322 1446774 : outerrelids = outer_path->parent->relids;
2323 :
2324 : /*
2325 : * If the inner path is parameterized by the outer, we must drop any
2326 : * restrict_clauses that are due to be moved into the inner path. We have
2327 : * to do this now, rather than postpone the work till createplan time,
2328 : * because the restrict_clauses list can affect the size and cost
2329 : * estimates for this path. We detect such clauses by checking for serial
2330 : * number match to clauses already enforced in the inner path.
2331 : */
2332 1523128 : if (bms_overlap(inner_req_outer, outerrelids))
2333 : {
2334 411384 : Bitmapset *enforced_serials = get_param_path_clause_serials(inner_path);
2335 411384 : List *jclauses = NIL;
2336 : ListCell *lc;
2337 :
2338 912540 : foreach(lc, restrict_clauses)
2339 : {
2340 501156 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
2341 :
2342 501156 : if (!bms_is_member(rinfo->rinfo_serial, enforced_serials))
2343 65690 : jclauses = lappend(jclauses, rinfo);
2344 : }
2345 411384 : restrict_clauses = jclauses;
2346 : }
2347 :
2348 1523128 : pathnode->jpath.path.pathtype = T_NestLoop;
2349 1523128 : pathnode->jpath.path.parent = joinrel;
2350 1523128 : pathnode->jpath.path.pathtarget = joinrel->reltarget;
2351 1523128 : pathnode->jpath.path.param_info =
2352 1523128 : get_joinrel_parampathinfo(root,
2353 : joinrel,
2354 : outer_path,
2355 : inner_path,
2356 : extra->sjinfo,
2357 : required_outer,
2358 : &restrict_clauses);
2359 1523128 : pathnode->jpath.path.parallel_aware = false;
2360 4436802 : pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2361 1523128 : outer_path->parallel_safe && inner_path->parallel_safe;
2362 : /* This is a foolish way to estimate parallel_workers, but for now... */
2363 1523128 : pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2364 1523128 : pathnode->jpath.path.pathkeys = pathkeys;
2365 1523128 : pathnode->jpath.jointype = jointype;
2366 1523128 : pathnode->jpath.inner_unique = extra->inner_unique;
2367 1523128 : pathnode->jpath.outerjoinpath = outer_path;
2368 1523128 : pathnode->jpath.innerjoinpath = inner_path;
2369 1523128 : pathnode->jpath.joinrestrictinfo = restrict_clauses;
2370 :
2371 1523128 : final_cost_nestloop(root, pathnode, workspace, extra);
2372 :
2373 1523128 : return pathnode;
2374 : }
2375 :
2376 : /*
2377 : * create_mergejoin_path
2378 : * Creates a pathnode corresponding to a mergejoin join between
2379 : * two relations
2380 : *
2381 : * 'joinrel' is the join relation
2382 : * 'jointype' is the type of join required
2383 : * 'workspace' is the result from initial_cost_mergejoin
2384 : * 'extra' contains various information about the join
2385 : * 'outer_path' is the outer path
2386 : * 'inner_path' is the inner path
2387 : * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2388 : * 'pathkeys' are the path keys of the new join path
2389 : * 'required_outer' is the set of required outer rels
2390 : * 'mergeclauses' are the RestrictInfo nodes to use as merge clauses
2391 : * (this should be a subset of the restrict_clauses list)
2392 : * 'outersortkeys' are the sort varkeys for the outer relation
2393 : * 'innersortkeys' are the sort varkeys for the inner relation
2394 : * 'outer_presorted_keys' is the number of presorted keys of the outer path
2395 : */
2396 : MergePath *
2397 473058 : create_mergejoin_path(PlannerInfo *root,
2398 : RelOptInfo *joinrel,
2399 : JoinType jointype,
2400 : JoinCostWorkspace *workspace,
2401 : JoinPathExtraData *extra,
2402 : Path *outer_path,
2403 : Path *inner_path,
2404 : List *restrict_clauses,
2405 : List *pathkeys,
2406 : Relids required_outer,
2407 : List *mergeclauses,
2408 : List *outersortkeys,
2409 : List *innersortkeys,
2410 : int outer_presorted_keys)
2411 : {
2412 473058 : MergePath *pathnode = makeNode(MergePath);
2413 :
2414 473058 : pathnode->jpath.path.pathtype = T_MergeJoin;
2415 473058 : pathnode->jpath.path.parent = joinrel;
2416 473058 : pathnode->jpath.path.pathtarget = joinrel->reltarget;
2417 473058 : pathnode->jpath.path.param_info =
2418 473058 : get_joinrel_parampathinfo(root,
2419 : joinrel,
2420 : outer_path,
2421 : inner_path,
2422 : extra->sjinfo,
2423 : required_outer,
2424 : &restrict_clauses);
2425 473058 : pathnode->jpath.path.parallel_aware = false;
2426 1381522 : pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2427 473058 : outer_path->parallel_safe && inner_path->parallel_safe;
2428 : /* This is a foolish way to estimate parallel_workers, but for now... */
2429 473058 : pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2430 473058 : pathnode->jpath.path.pathkeys = pathkeys;
2431 473058 : pathnode->jpath.jointype = jointype;
2432 473058 : pathnode->jpath.inner_unique = extra->inner_unique;
2433 473058 : pathnode->jpath.outerjoinpath = outer_path;
2434 473058 : pathnode->jpath.innerjoinpath = inner_path;
2435 473058 : pathnode->jpath.joinrestrictinfo = restrict_clauses;
2436 473058 : pathnode->path_mergeclauses = mergeclauses;
2437 473058 : pathnode->outersortkeys = outersortkeys;
2438 473058 : pathnode->innersortkeys = innersortkeys;
2439 473058 : pathnode->outer_presorted_keys = outer_presorted_keys;
2440 : /* pathnode->skip_mark_restore will be set by final_cost_mergejoin */
2441 : /* pathnode->materialize_inner will be set by final_cost_mergejoin */
2442 :
2443 473058 : final_cost_mergejoin(root, pathnode, workspace, extra);
2444 :
2445 473058 : return pathnode;
2446 : }
2447 :
2448 : /*
2449 : * create_hashjoin_path
2450 : * Creates a pathnode corresponding to a hash join between two relations.
2451 : *
2452 : * 'joinrel' is the join relation
2453 : * 'jointype' is the type of join required
2454 : * 'workspace' is the result from initial_cost_hashjoin
2455 : * 'extra' contains various information about the join
2456 : * 'outer_path' is the cheapest outer path
2457 : * 'inner_path' is the cheapest inner path
2458 : * 'parallel_hash' to select Parallel Hash of inner path (shared hash table)
2459 : * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2460 : * 'required_outer' is the set of required outer rels
2461 : * 'hashclauses' are the RestrictInfo nodes to use as hash clauses
2462 : * (this should be a subset of the restrict_clauses list)
2463 : */
2464 : HashPath *
2465 466218 : create_hashjoin_path(PlannerInfo *root,
2466 : RelOptInfo *joinrel,
2467 : JoinType jointype,
2468 : JoinCostWorkspace *workspace,
2469 : JoinPathExtraData *extra,
2470 : Path *outer_path,
2471 : Path *inner_path,
2472 : bool parallel_hash,
2473 : List *restrict_clauses,
2474 : Relids required_outer,
2475 : List *hashclauses)
2476 : {
2477 466218 : HashPath *pathnode = makeNode(HashPath);
2478 :
2479 466218 : pathnode->jpath.path.pathtype = T_HashJoin;
2480 466218 : pathnode->jpath.path.parent = joinrel;
2481 466218 : pathnode->jpath.path.pathtarget = joinrel->reltarget;
2482 466218 : pathnode->jpath.path.param_info =
2483 466218 : get_joinrel_parampathinfo(root,
2484 : joinrel,
2485 : outer_path,
2486 : inner_path,
2487 : extra->sjinfo,
2488 : required_outer,
2489 : &restrict_clauses);
2490 466218 : pathnode->jpath.path.parallel_aware =
2491 466218 : joinrel->consider_parallel && parallel_hash;
2492 1360774 : pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2493 466218 : outer_path->parallel_safe && inner_path->parallel_safe;
2494 : /* This is a foolish way to estimate parallel_workers, but for now... */
2495 466218 : pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2496 :
2497 : /*
2498 : * A hashjoin never has pathkeys, since its output ordering is
2499 : * unpredictable due to possible batching. XXX If the inner relation is
2500 : * small enough, we could instruct the executor that it must not batch,
2501 : * and then we could assume that the output inherits the outer relation's
2502 : * ordering, which might save a sort step. However there is considerable
2503 : * downside if our estimate of the inner relation size is badly off. For
2504 : * the moment we don't risk it. (Note also that if we wanted to take this
2505 : * seriously, joinpath.c would have to consider many more paths for the
2506 : * outer rel than it does now.)
2507 : */
2508 466218 : pathnode->jpath.path.pathkeys = NIL;
2509 466218 : pathnode->jpath.jointype = jointype;
2510 466218 : pathnode->jpath.inner_unique = extra->inner_unique;
2511 466218 : pathnode->jpath.outerjoinpath = outer_path;
2512 466218 : pathnode->jpath.innerjoinpath = inner_path;
2513 466218 : pathnode->jpath.joinrestrictinfo = restrict_clauses;
2514 466218 : pathnode->path_hashclauses = hashclauses;
2515 : /* final_cost_hashjoin will fill in pathnode->num_batches */
2516 :
2517 466218 : final_cost_hashjoin(root, pathnode, workspace, extra);
2518 :
2519 466218 : return pathnode;
2520 : }
2521 :
2522 : /*
2523 : * create_projection_path
2524 : * Creates a pathnode that represents performing a projection.
2525 : *
2526 : * 'rel' is the parent relation associated with the result
2527 : * 'subpath' is the path representing the source of data
2528 : * 'target' is the PathTarget to be computed
2529 : */
2530 : ProjectionPath *
2531 428736 : create_projection_path(PlannerInfo *root,
2532 : RelOptInfo *rel,
2533 : Path *subpath,
2534 : PathTarget *target)
2535 : {
2536 428736 : ProjectionPath *pathnode = makeNode(ProjectionPath);
2537 : PathTarget *oldtarget;
2538 :
2539 : /*
2540 : * We mustn't put a ProjectionPath directly above another; it's useless
2541 : * and will confuse create_projection_plan. Rather than making sure all
2542 : * callers handle that, let's implement it here, by stripping off any
2543 : * ProjectionPath in what we're given. Given this rule, there won't be
2544 : * more than one.
2545 : */
2546 428736 : if (IsA(subpath, ProjectionPath))
2547 : {
2548 24 : ProjectionPath *subpp = (ProjectionPath *) subpath;
2549 :
2550 : Assert(subpp->path.parent == rel);
2551 24 : subpath = subpp->subpath;
2552 : Assert(!IsA(subpath, ProjectionPath));
2553 : }
2554 :
2555 428736 : pathnode->path.pathtype = T_Result;
2556 428736 : pathnode->path.parent = rel;
2557 428736 : pathnode->path.pathtarget = target;
2558 428736 : pathnode->path.param_info = subpath->param_info;
2559 428736 : pathnode->path.parallel_aware = false;
2560 1002544 : pathnode->path.parallel_safe = rel->consider_parallel &&
2561 565194 : subpath->parallel_safe &&
2562 136458 : is_parallel_safe(root, (Node *) target->exprs);
2563 428736 : pathnode->path.parallel_workers = subpath->parallel_workers;
2564 : /* Projection does not change the sort order */
2565 428736 : pathnode->path.pathkeys = subpath->pathkeys;
2566 :
2567 428736 : pathnode->subpath = subpath;
2568 :
2569 : /*
2570 : * We might not need a separate Result node. If the input plan node type
2571 : * can project, we can just tell it to project something else. Or, if it
2572 : * can't project but the desired target has the same expression list as
2573 : * what the input will produce anyway, we can still give it the desired
2574 : * tlist (possibly changing its ressortgroupref labels, but nothing else).
2575 : * Note: in the latter case, create_projection_plan has to recheck our
2576 : * conclusion; see comments therein.
2577 : */
2578 428736 : oldtarget = subpath->pathtarget;
2579 446002 : if (is_projection_capable_path(subpath) ||
2580 17266 : equal(oldtarget->exprs, target->exprs))
2581 : {
2582 : /* No separate Result node needed */
2583 413472 : pathnode->dummypp = true;
2584 :
2585 : /*
2586 : * Set cost of plan as subpath's cost, adjusted for tlist replacement.
2587 : */
2588 413472 : pathnode->path.rows = subpath->rows;
2589 413472 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
2590 413472 : pathnode->path.startup_cost = subpath->startup_cost +
2591 413472 : (target->cost.startup - oldtarget->cost.startup);
2592 413472 : pathnode->path.total_cost = subpath->total_cost +
2593 413472 : (target->cost.startup - oldtarget->cost.startup) +
2594 413472 : (target->cost.per_tuple - oldtarget->cost.per_tuple) * subpath->rows;
2595 : }
2596 : else
2597 : {
2598 : /* We really do need the Result node */
2599 15264 : pathnode->dummypp = false;
2600 :
2601 : /*
2602 : * The Result node's cost is cpu_tuple_cost per row, plus the cost of
2603 : * evaluating the tlist. There is no qual to worry about.
2604 : */
2605 15264 : pathnode->path.rows = subpath->rows;
2606 15264 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
2607 15264 : pathnode->path.startup_cost = subpath->startup_cost +
2608 15264 : target->cost.startup;
2609 15264 : pathnode->path.total_cost = subpath->total_cost +
2610 15264 : target->cost.startup +
2611 15264 : (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows;
2612 : }
2613 :
2614 428736 : return pathnode;
2615 : }
2616 :
2617 : /*
2618 : * apply_projection_to_path
2619 : * Add a projection step, or just apply the target directly to given path.
2620 : *
2621 : * This has the same net effect as create_projection_path(), except that if
2622 : * a separate Result plan node isn't needed, we just replace the given path's
2623 : * pathtarget with the desired one. This must be used only when the caller
2624 : * knows that the given path isn't referenced elsewhere and so can be modified
2625 : * in-place.
2626 : *
2627 : * If the input path is a GatherPath or GatherMergePath, we try to push the
2628 : * new target down to its input as well; this is a yet more invasive
2629 : * modification of the input path, which create_projection_path() can't do.
2630 : *
2631 : * Note that we mustn't change the source path's parent link; so when it is
2632 : * add_path'd to "rel" things will be a bit inconsistent. So far that has
2633 : * not caused any trouble.
2634 : *
2635 : * 'rel' is the parent relation associated with the result
2636 : * 'path' is the path representing the source of data
2637 : * 'target' is the PathTarget to be computed
2638 : */
2639 : Path *
2640 14110 : apply_projection_to_path(PlannerInfo *root,
2641 : RelOptInfo *rel,
2642 : Path *path,
2643 : PathTarget *target)
2644 : {
2645 : QualCost oldcost;
2646 :
2647 : /*
2648 : * If given path can't project, we might need a Result node, so make a
2649 : * separate ProjectionPath.
2650 : */
2651 14110 : if (!is_projection_capable_path(path))
2652 1584 : return (Path *) create_projection_path(root, rel, path, target);
2653 :
2654 : /*
2655 : * We can just jam the desired tlist into the existing path, being sure to
2656 : * update its cost estimates appropriately.
2657 : */
2658 12526 : oldcost = path->pathtarget->cost;
2659 12526 : path->pathtarget = target;
2660 :
2661 12526 : path->startup_cost += target->cost.startup - oldcost.startup;
2662 12526 : path->total_cost += target->cost.startup - oldcost.startup +
2663 12526 : (target->cost.per_tuple - oldcost.per_tuple) * path->rows;
2664 :
2665 : /*
2666 : * If the path happens to be a Gather or GatherMerge path, we'd like to
2667 : * arrange for the subpath to return the required target list so that
2668 : * workers can help project. But if there is something that is not
2669 : * parallel-safe in the target expressions, then we can't.
2670 : */
2671 12550 : if ((IsA(path, GatherPath) || IsA(path, GatherMergePath)) &&
2672 24 : is_parallel_safe(root, (Node *) target->exprs))
2673 : {
2674 : /*
2675 : * We always use create_projection_path here, even if the subpath is
2676 : * projection-capable, so as to avoid modifying the subpath in place.
2677 : * It seems unlikely at present that there could be any other
2678 : * references to the subpath, but better safe than sorry.
2679 : *
2680 : * Note that we don't change the parallel path's cost estimates; it
2681 : * might be appropriate to do so, to reflect the fact that the bulk of
2682 : * the target evaluation will happen in workers.
2683 : */
2684 24 : if (IsA(path, GatherPath))
2685 : {
2686 0 : GatherPath *gpath = (GatherPath *) path;
2687 :
2688 0 : gpath->subpath = (Path *)
2689 0 : create_projection_path(root,
2690 0 : gpath->subpath->parent,
2691 : gpath->subpath,
2692 : target);
2693 : }
2694 : else
2695 : {
2696 24 : GatherMergePath *gmpath = (GatherMergePath *) path;
2697 :
2698 24 : gmpath->subpath = (Path *)
2699 24 : create_projection_path(root,
2700 24 : gmpath->subpath->parent,
2701 : gmpath->subpath,
2702 : target);
2703 : }
2704 : }
2705 12502 : else if (path->parallel_safe &&
2706 4766 : !is_parallel_safe(root, (Node *) target->exprs))
2707 : {
2708 : /*
2709 : * We're inserting a parallel-restricted target list into a path
2710 : * currently marked parallel-safe, so we have to mark it as no longer
2711 : * safe.
2712 : */
2713 12 : path->parallel_safe = false;
2714 : }
2715 :
2716 12526 : return path;
2717 : }
2718 :
2719 : /*
2720 : * create_set_projection_path
2721 : * Creates a pathnode that represents performing a projection that
2722 : * includes set-returning functions.
2723 : *
2724 : * 'rel' is the parent relation associated with the result
2725 : * 'subpath' is the path representing the source of data
2726 : * 'target' is the PathTarget to be computed
2727 : */
2728 : ProjectSetPath *
2729 12182 : create_set_projection_path(PlannerInfo *root,
2730 : RelOptInfo *rel,
2731 : Path *subpath,
2732 : PathTarget *target)
2733 : {
2734 12182 : ProjectSetPath *pathnode = makeNode(ProjectSetPath);
2735 : double tlist_rows;
2736 : ListCell *lc;
2737 :
2738 12182 : pathnode->path.pathtype = T_ProjectSet;
2739 12182 : pathnode->path.parent = rel;
2740 12182 : pathnode->path.pathtarget = target;
2741 : /* For now, assume we are above any joins, so no parameterization */
2742 12182 : pathnode->path.param_info = NULL;
2743 12182 : pathnode->path.parallel_aware = false;
2744 28962 : pathnode->path.parallel_safe = rel->consider_parallel &&
2745 16744 : subpath->parallel_safe &&
2746 4562 : is_parallel_safe(root, (Node *) target->exprs);
2747 12182 : pathnode->path.parallel_workers = subpath->parallel_workers;
2748 : /* Projection does not change the sort order XXX? */
2749 12182 : pathnode->path.pathkeys = subpath->pathkeys;
2750 :
2751 12182 : pathnode->subpath = subpath;
2752 :
2753 : /*
2754 : * Estimate number of rows produced by SRFs for each row of input; if
2755 : * there's more than one in this node, use the maximum.
2756 : */
2757 12182 : tlist_rows = 1;
2758 26426 : foreach(lc, target->exprs)
2759 : {
2760 14244 : Node *node = (Node *) lfirst(lc);
2761 : double itemrows;
2762 :
2763 14244 : itemrows = expression_returns_set_rows(root, node);
2764 14244 : if (tlist_rows < itemrows)
2765 11732 : tlist_rows = itemrows;
2766 : }
2767 :
2768 : /*
2769 : * In addition to the cost of evaluating the tlist, charge cpu_tuple_cost
2770 : * per input row, and half of cpu_tuple_cost for each added output row.
2771 : * This is slightly bizarre maybe, but it's what 9.6 did; we may revisit
2772 : * this estimate later.
2773 : */
2774 12182 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
2775 12182 : pathnode->path.rows = subpath->rows * tlist_rows;
2776 12182 : pathnode->path.startup_cost = subpath->startup_cost +
2777 12182 : target->cost.startup;
2778 12182 : pathnode->path.total_cost = subpath->total_cost +
2779 12182 : target->cost.startup +
2780 12182 : (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows +
2781 12182 : (pathnode->path.rows - subpath->rows) * cpu_tuple_cost / 2;
2782 :
2783 12182 : return pathnode;
2784 : }
2785 :
2786 : /*
2787 : * create_incremental_sort_path
2788 : * Creates a pathnode that represents performing an incremental sort.
2789 : *
2790 : * 'rel' is the parent relation associated with the result
2791 : * 'subpath' is the path representing the source of data
2792 : * 'pathkeys' represents the desired sort order
2793 : * 'presorted_keys' is the number of keys by which the input path is
2794 : * already sorted
2795 : * 'limit_tuples' is the estimated bound on the number of output tuples,
2796 : * or -1 if no LIMIT or couldn't estimate
2797 : */
2798 : IncrementalSortPath *
2799 10786 : create_incremental_sort_path(PlannerInfo *root,
2800 : RelOptInfo *rel,
2801 : Path *subpath,
2802 : List *pathkeys,
2803 : int presorted_keys,
2804 : double limit_tuples)
2805 : {
2806 10786 : IncrementalSortPath *sort = makeNode(IncrementalSortPath);
2807 10786 : SortPath *pathnode = &sort->spath;
2808 :
2809 10786 : pathnode->path.pathtype = T_IncrementalSort;
2810 10786 : pathnode->path.parent = rel;
2811 : /* Sort doesn't project, so use source path's pathtarget */
2812 10786 : pathnode->path.pathtarget = subpath->pathtarget;
2813 10786 : pathnode->path.param_info = subpath->param_info;
2814 10786 : pathnode->path.parallel_aware = false;
2815 16598 : pathnode->path.parallel_safe = rel->consider_parallel &&
2816 5812 : subpath->parallel_safe;
2817 10786 : pathnode->path.parallel_workers = subpath->parallel_workers;
2818 10786 : pathnode->path.pathkeys = pathkeys;
2819 :
2820 10786 : pathnode->subpath = subpath;
2821 :
2822 10786 : cost_incremental_sort(&pathnode->path,
2823 : root, pathkeys, presorted_keys,
2824 : subpath->disabled_nodes,
2825 : subpath->startup_cost,
2826 : subpath->total_cost,
2827 : subpath->rows,
2828 10786 : subpath->pathtarget->width,
2829 : 0.0, /* XXX comparison_cost shouldn't be 0? */
2830 : work_mem, limit_tuples);
2831 :
2832 10786 : sort->nPresortedCols = presorted_keys;
2833 :
2834 10786 : return sort;
2835 : }
2836 :
2837 : /*
2838 : * create_sort_path
2839 : * Creates a pathnode that represents performing an explicit sort.
2840 : *
2841 : * 'rel' is the parent relation associated with the result
2842 : * 'subpath' is the path representing the source of data
2843 : * 'pathkeys' represents the desired sort order
2844 : * 'limit_tuples' is the estimated bound on the number of output tuples,
2845 : * or -1 if no LIMIT or couldn't estimate
2846 : */
2847 : SortPath *
2848 118252 : create_sort_path(PlannerInfo *root,
2849 : RelOptInfo *rel,
2850 : Path *subpath,
2851 : List *pathkeys,
2852 : double limit_tuples)
2853 : {
2854 118252 : SortPath *pathnode = makeNode(SortPath);
2855 :
2856 118252 : pathnode->path.pathtype = T_Sort;
2857 118252 : pathnode->path.parent = rel;
2858 : /* Sort doesn't project, so use source path's pathtarget */
2859 118252 : pathnode->path.pathtarget = subpath->pathtarget;
2860 118252 : pathnode->path.param_info = subpath->param_info;
2861 118252 : pathnode->path.parallel_aware = false;
2862 206884 : pathnode->path.parallel_safe = rel->consider_parallel &&
2863 88632 : subpath->parallel_safe;
2864 118252 : pathnode->path.parallel_workers = subpath->parallel_workers;
2865 118252 : pathnode->path.pathkeys = pathkeys;
2866 :
2867 118252 : pathnode->subpath = subpath;
2868 :
2869 118252 : cost_sort(&pathnode->path, root, pathkeys,
2870 : subpath->disabled_nodes,
2871 : subpath->total_cost,
2872 : subpath->rows,
2873 118252 : subpath->pathtarget->width,
2874 : 0.0, /* XXX comparison_cost shouldn't be 0? */
2875 : work_mem, limit_tuples);
2876 :
2877 118252 : return pathnode;
2878 : }
2879 :
2880 : /*
2881 : * create_group_path
2882 : * Creates a pathnode that represents performing grouping of presorted input
2883 : *
2884 : * 'rel' is the parent relation associated with the result
2885 : * 'subpath' is the path representing the source of data
2886 : * 'target' is the PathTarget to be computed
2887 : * 'groupClause' is a list of SortGroupClause's representing the grouping
2888 : * 'qual' is the HAVING quals if any
2889 : * 'numGroups' is the estimated number of groups
2890 : */
2891 : GroupPath *
2892 1226 : create_group_path(PlannerInfo *root,
2893 : RelOptInfo *rel,
2894 : Path *subpath,
2895 : List *groupClause,
2896 : List *qual,
2897 : double numGroups)
2898 : {
2899 1226 : GroupPath *pathnode = makeNode(GroupPath);
2900 1226 : PathTarget *target = rel->reltarget;
2901 :
2902 1226 : pathnode->path.pathtype = T_Group;
2903 1226 : pathnode->path.parent = rel;
2904 1226 : pathnode->path.pathtarget = target;
2905 : /* For now, assume we are above any joins, so no parameterization */
2906 1226 : pathnode->path.param_info = NULL;
2907 1226 : pathnode->path.parallel_aware = false;
2908 1970 : pathnode->path.parallel_safe = rel->consider_parallel &&
2909 744 : subpath->parallel_safe;
2910 1226 : pathnode->path.parallel_workers = subpath->parallel_workers;
2911 : /* Group doesn't change sort ordering */
2912 1226 : pathnode->path.pathkeys = subpath->pathkeys;
2913 :
2914 1226 : pathnode->subpath = subpath;
2915 :
2916 1226 : pathnode->groupClause = groupClause;
2917 1226 : pathnode->qual = qual;
2918 :
2919 1226 : cost_group(&pathnode->path, root,
2920 : list_length(groupClause),
2921 : numGroups,
2922 : qual,
2923 : subpath->disabled_nodes,
2924 : subpath->startup_cost, subpath->total_cost,
2925 : subpath->rows);
2926 :
2927 : /* add tlist eval cost for each output row */
2928 1226 : pathnode->path.startup_cost += target->cost.startup;
2929 1226 : pathnode->path.total_cost += target->cost.startup +
2930 1226 : target->cost.per_tuple * pathnode->path.rows;
2931 :
2932 1226 : return pathnode;
2933 : }
2934 :
2935 : /*
2936 : * create_unique_path
2937 : * Creates a pathnode that represents performing an explicit Unique step
2938 : * on presorted input.
2939 : *
2940 : * 'rel' is the parent relation associated with the result
2941 : * 'subpath' is the path representing the source of data
2942 : * 'numCols' is the number of grouping columns
2943 : * 'numGroups' is the estimated number of groups
2944 : *
2945 : * The input path must be sorted on the grouping columns, plus possibly
2946 : * additional columns; so the first numCols pathkeys are the grouping columns
2947 : */
2948 : UniquePath *
2949 22878 : create_unique_path(PlannerInfo *root,
2950 : RelOptInfo *rel,
2951 : Path *subpath,
2952 : int numCols,
2953 : double numGroups)
2954 : {
2955 22878 : UniquePath *pathnode = makeNode(UniquePath);
2956 :
2957 22878 : pathnode->path.pathtype = T_Unique;
2958 22878 : pathnode->path.parent = rel;
2959 : /* Unique doesn't project, so use source path's pathtarget */
2960 22878 : pathnode->path.pathtarget = subpath->pathtarget;
2961 22878 : pathnode->path.param_info = subpath->param_info;
2962 22878 : pathnode->path.parallel_aware = false;
2963 41406 : pathnode->path.parallel_safe = rel->consider_parallel &&
2964 18528 : subpath->parallel_safe;
2965 22878 : pathnode->path.parallel_workers = subpath->parallel_workers;
2966 : /* Unique doesn't change the input ordering */
2967 22878 : pathnode->path.pathkeys = subpath->pathkeys;
2968 :
2969 22878 : pathnode->subpath = subpath;
2970 22878 : pathnode->numkeys = numCols;
2971 :
2972 : /*
2973 : * Charge one cpu_operator_cost per comparison per input tuple. We assume
2974 : * all columns get compared at most of the tuples. (XXX probably this is
2975 : * an overestimate.)
2976 : */
2977 22878 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
2978 22878 : pathnode->path.startup_cost = subpath->startup_cost;
2979 22878 : pathnode->path.total_cost = subpath->total_cost +
2980 22878 : cpu_operator_cost * subpath->rows * numCols;
2981 22878 : pathnode->path.rows = numGroups;
2982 :
2983 22878 : return pathnode;
2984 : }
2985 :
2986 : /*
2987 : * create_agg_path
2988 : * Creates a pathnode that represents performing aggregation/grouping
2989 : *
2990 : * 'rel' is the parent relation associated with the result
2991 : * 'subpath' is the path representing the source of data
2992 : * 'target' is the PathTarget to be computed
2993 : * 'aggstrategy' is the Agg node's basic implementation strategy
2994 : * 'aggsplit' is the Agg node's aggregate-splitting mode
2995 : * 'groupClause' is a list of SortGroupClause's representing the grouping
2996 : * 'qual' is the HAVING quals if any
2997 : * 'aggcosts' contains cost info about the aggregate functions to be computed
2998 : * 'numGroups' is the estimated number of groups (1 if not grouping)
2999 : */
3000 : AggPath *
3001 89474 : create_agg_path(PlannerInfo *root,
3002 : RelOptInfo *rel,
3003 : Path *subpath,
3004 : PathTarget *target,
3005 : AggStrategy aggstrategy,
3006 : AggSplit aggsplit,
3007 : List *groupClause,
3008 : List *qual,
3009 : const AggClauseCosts *aggcosts,
3010 : double numGroups)
3011 : {
3012 89474 : AggPath *pathnode = makeNode(AggPath);
3013 :
3014 89474 : pathnode->path.pathtype = T_Agg;
3015 89474 : pathnode->path.parent = rel;
3016 89474 : pathnode->path.pathtarget = target;
3017 89474 : pathnode->path.param_info = subpath->param_info;
3018 89474 : pathnode->path.parallel_aware = false;
3019 152456 : pathnode->path.parallel_safe = rel->consider_parallel &&
3020 62982 : subpath->parallel_safe;
3021 89474 : pathnode->path.parallel_workers = subpath->parallel_workers;
3022 :
3023 89474 : if (aggstrategy == AGG_SORTED)
3024 : {
3025 : /*
3026 : * Attempt to preserve the order of the subpath. Additional pathkeys
3027 : * may have been added in adjust_group_pathkeys_for_groupagg() to
3028 : * support ORDER BY / DISTINCT aggregates. Pathkeys added there
3029 : * belong to columns within the aggregate function, so we must strip
3030 : * these additional pathkeys off as those columns are unavailable
3031 : * above the aggregate node.
3032 : */
3033 14526 : if (list_length(subpath->pathkeys) > root->num_groupby_pathkeys)
3034 808 : pathnode->path.pathkeys = list_copy_head(subpath->pathkeys,
3035 : root->num_groupby_pathkeys);
3036 : else
3037 13718 : pathnode->path.pathkeys = subpath->pathkeys; /* preserves order */
3038 : }
3039 : else
3040 74948 : pathnode->path.pathkeys = NIL; /* output is unordered */
3041 :
3042 89474 : pathnode->subpath = subpath;
3043 :
3044 89474 : pathnode->aggstrategy = aggstrategy;
3045 89474 : pathnode->aggsplit = aggsplit;
3046 89474 : pathnode->numGroups = numGroups;
3047 89474 : pathnode->transitionSpace = aggcosts ? aggcosts->transitionSpace : 0;
3048 89474 : pathnode->groupClause = groupClause;
3049 89474 : pathnode->qual = qual;
3050 :
3051 89474 : cost_agg(&pathnode->path, root,
3052 : aggstrategy, aggcosts,
3053 : list_length(groupClause), numGroups,
3054 : qual,
3055 : subpath->disabled_nodes,
3056 : subpath->startup_cost, subpath->total_cost,
3057 89474 : subpath->rows, subpath->pathtarget->width);
3058 :
3059 : /* add tlist eval cost for each output row */
3060 89474 : pathnode->path.startup_cost += target->cost.startup;
3061 89474 : pathnode->path.total_cost += target->cost.startup +
3062 89474 : target->cost.per_tuple * pathnode->path.rows;
3063 :
3064 89474 : return pathnode;
3065 : }
3066 :
3067 : /*
3068 : * create_groupingsets_path
3069 : * Creates a pathnode that represents performing GROUPING SETS aggregation
3070 : *
3071 : * GroupingSetsPath represents sorted grouping with one or more grouping sets.
3072 : * The input path's result must be sorted to match the last entry in
3073 : * rollup_groupclauses.
3074 : *
3075 : * 'rel' is the parent relation associated with the result
3076 : * 'subpath' is the path representing the source of data
3077 : * 'target' is the PathTarget to be computed
3078 : * 'having_qual' is the HAVING quals if any
3079 : * 'rollups' is a list of RollupData nodes
3080 : * 'agg_costs' contains cost info about the aggregate functions to be computed
3081 : */
3082 : GroupingSetsPath *
3083 2406 : create_groupingsets_path(PlannerInfo *root,
3084 : RelOptInfo *rel,
3085 : Path *subpath,
3086 : List *having_qual,
3087 : AggStrategy aggstrategy,
3088 : List *rollups,
3089 : const AggClauseCosts *agg_costs)
3090 : {
3091 2406 : GroupingSetsPath *pathnode = makeNode(GroupingSetsPath);
3092 2406 : PathTarget *target = rel->reltarget;
3093 : ListCell *lc;
3094 2406 : bool is_first = true;
3095 2406 : bool is_first_sort = true;
3096 :
3097 : /* The topmost generated Plan node will be an Agg */
3098 2406 : pathnode->path.pathtype = T_Agg;
3099 2406 : pathnode->path.parent = rel;
3100 2406 : pathnode->path.pathtarget = target;
3101 2406 : pathnode->path.param_info = subpath->param_info;
3102 2406 : pathnode->path.parallel_aware = false;
3103 3552 : pathnode->path.parallel_safe = rel->consider_parallel &&
3104 1146 : subpath->parallel_safe;
3105 2406 : pathnode->path.parallel_workers = subpath->parallel_workers;
3106 2406 : pathnode->subpath = subpath;
3107 :
3108 : /*
3109 : * Simplify callers by downgrading AGG_SORTED to AGG_PLAIN, and AGG_MIXED
3110 : * to AGG_HASHED, here if possible.
3111 : */
3112 3438 : if (aggstrategy == AGG_SORTED &&
3113 1032 : list_length(rollups) == 1 &&
3114 534 : ((RollupData *) linitial(rollups))->groupClause == NIL)
3115 60 : aggstrategy = AGG_PLAIN;
3116 :
3117 3440 : if (aggstrategy == AGG_MIXED &&
3118 1034 : list_length(rollups) == 1)
3119 0 : aggstrategy = AGG_HASHED;
3120 :
3121 : /*
3122 : * Output will be in sorted order by group_pathkeys if, and only if, there
3123 : * is a single rollup operation on a non-empty list of grouping
3124 : * expressions.
3125 : */
3126 2406 : if (aggstrategy == AGG_SORTED && list_length(rollups) == 1)
3127 474 : pathnode->path.pathkeys = root->group_pathkeys;
3128 : else
3129 1932 : pathnode->path.pathkeys = NIL;
3130 :
3131 2406 : pathnode->aggstrategy = aggstrategy;
3132 2406 : pathnode->rollups = rollups;
3133 2406 : pathnode->qual = having_qual;
3134 2406 : pathnode->transitionSpace = agg_costs ? agg_costs->transitionSpace : 0;
3135 :
3136 : Assert(rollups != NIL);
3137 : Assert(aggstrategy != AGG_PLAIN || list_length(rollups) == 1);
3138 : Assert(aggstrategy != AGG_MIXED || list_length(rollups) > 1);
3139 :
3140 8222 : foreach(lc, rollups)
3141 : {
3142 5816 : RollupData *rollup = lfirst(lc);
3143 5816 : List *gsets = rollup->gsets;
3144 5816 : int numGroupCols = list_length(linitial(gsets));
3145 :
3146 : /*
3147 : * In AGG_SORTED or AGG_PLAIN mode, the first rollup takes the
3148 : * (already-sorted) input, and following ones do their own sort.
3149 : *
3150 : * In AGG_HASHED mode, there is one rollup for each grouping set.
3151 : *
3152 : * In AGG_MIXED mode, the first rollups are hashed, the first
3153 : * non-hashed one takes the (already-sorted) input, and following ones
3154 : * do their own sort.
3155 : */
3156 5816 : if (is_first)
3157 : {
3158 2406 : cost_agg(&pathnode->path, root,
3159 : aggstrategy,
3160 : agg_costs,
3161 : numGroupCols,
3162 : rollup->numGroups,
3163 : having_qual,
3164 : subpath->disabled_nodes,
3165 : subpath->startup_cost,
3166 : subpath->total_cost,
3167 : subpath->rows,
3168 2406 : subpath->pathtarget->width);
3169 2406 : is_first = false;
3170 2406 : if (!rollup->is_hashed)
3171 1032 : is_first_sort = false;
3172 : }
3173 : else
3174 : {
3175 : Path sort_path; /* dummy for result of cost_sort */
3176 : Path agg_path; /* dummy for result of cost_agg */
3177 :
3178 3410 : if (rollup->is_hashed || is_first_sort)
3179 : {
3180 : /*
3181 : * Account for cost of aggregation, but don't charge input
3182 : * cost again
3183 : */
3184 2612 : cost_agg(&agg_path, root,
3185 2612 : rollup->is_hashed ? AGG_HASHED : AGG_SORTED,
3186 : agg_costs,
3187 : numGroupCols,
3188 : rollup->numGroups,
3189 : having_qual,
3190 : 0, 0.0, 0.0,
3191 : subpath->rows,
3192 2612 : subpath->pathtarget->width);
3193 2612 : if (!rollup->is_hashed)
3194 1034 : is_first_sort = false;
3195 : }
3196 : else
3197 : {
3198 : /* Account for cost of sort, but don't charge input cost again */
3199 798 : cost_sort(&sort_path, root, NIL, 0,
3200 : 0.0,
3201 : subpath->rows,
3202 798 : subpath->pathtarget->width,
3203 : 0.0,
3204 : work_mem,
3205 : -1.0);
3206 :
3207 : /* Account for cost of aggregation */
3208 :
3209 798 : cost_agg(&agg_path, root,
3210 : AGG_SORTED,
3211 : agg_costs,
3212 : numGroupCols,
3213 : rollup->numGroups,
3214 : having_qual,
3215 : sort_path.disabled_nodes,
3216 : sort_path.startup_cost,
3217 : sort_path.total_cost,
3218 : sort_path.rows,
3219 798 : subpath->pathtarget->width);
3220 : }
3221 :
3222 3410 : pathnode->path.disabled_nodes += agg_path.disabled_nodes;
3223 3410 : pathnode->path.total_cost += agg_path.total_cost;
3224 3410 : pathnode->path.rows += agg_path.rows;
3225 : }
3226 : }
3227 :
3228 : /* add tlist eval cost for each output row */
3229 2406 : pathnode->path.startup_cost += target->cost.startup;
3230 2406 : pathnode->path.total_cost += target->cost.startup +
3231 2406 : target->cost.per_tuple * pathnode->path.rows;
3232 :
3233 2406 : return pathnode;
3234 : }
3235 :
3236 : /*
3237 : * create_minmaxagg_path
3238 : * Creates a pathnode that represents computation of MIN/MAX aggregates
3239 : *
3240 : * 'rel' is the parent relation associated with the result
3241 : * 'target' is the PathTarget to be computed
3242 : * 'mmaggregates' is a list of MinMaxAggInfo structs
3243 : * 'quals' is the HAVING quals if any
3244 : */
3245 : MinMaxAggPath *
3246 422 : create_minmaxagg_path(PlannerInfo *root,
3247 : RelOptInfo *rel,
3248 : PathTarget *target,
3249 : List *mmaggregates,
3250 : List *quals)
3251 : {
3252 422 : MinMaxAggPath *pathnode = makeNode(MinMaxAggPath);
3253 : Cost initplan_cost;
3254 422 : int initplan_disabled_nodes = 0;
3255 : ListCell *lc;
3256 :
3257 : /* The topmost generated Plan node will be a Result */
3258 422 : pathnode->path.pathtype = T_Result;
3259 422 : pathnode->path.parent = rel;
3260 422 : pathnode->path.pathtarget = target;
3261 : /* For now, assume we are above any joins, so no parameterization */
3262 422 : pathnode->path.param_info = NULL;
3263 422 : pathnode->path.parallel_aware = false;
3264 422 : pathnode->path.parallel_safe = true; /* might change below */
3265 422 : pathnode->path.parallel_workers = 0;
3266 : /* Result is one unordered row */
3267 422 : pathnode->path.rows = 1;
3268 422 : pathnode->path.pathkeys = NIL;
3269 :
3270 422 : pathnode->mmaggregates = mmaggregates;
3271 422 : pathnode->quals = quals;
3272 :
3273 : /* Calculate cost of all the initplans, and check parallel safety */
3274 422 : initplan_cost = 0;
3275 880 : foreach(lc, mmaggregates)
3276 : {
3277 458 : MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
3278 :
3279 458 : initplan_disabled_nodes += mminfo->path->disabled_nodes;
3280 458 : initplan_cost += mminfo->pathcost;
3281 458 : if (!mminfo->path->parallel_safe)
3282 110 : pathnode->path.parallel_safe = false;
3283 : }
3284 :
3285 : /* add tlist eval cost for each output row, plus cpu_tuple_cost */
3286 422 : pathnode->path.disabled_nodes = initplan_disabled_nodes;
3287 422 : pathnode->path.startup_cost = initplan_cost + target->cost.startup;
3288 422 : pathnode->path.total_cost = initplan_cost + target->cost.startup +
3289 422 : target->cost.per_tuple + cpu_tuple_cost;
3290 :
3291 : /*
3292 : * Add cost of qual, if any --- but we ignore its selectivity, since our
3293 : * rowcount estimate should be 1 no matter what the qual is.
3294 : */
3295 422 : if (quals)
3296 : {
3297 : QualCost qual_cost;
3298 :
3299 0 : cost_qual_eval(&qual_cost, quals, root);
3300 0 : pathnode->path.startup_cost += qual_cost.startup;
3301 0 : pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
3302 : }
3303 :
3304 : /*
3305 : * If the initplans were all parallel-safe, also check safety of the
3306 : * target and quals. (The Result node itself isn't parallelizable, but if
3307 : * we are in a subquery then it can be useful for the outer query to know
3308 : * that this one is parallel-safe.)
3309 : */
3310 422 : if (pathnode->path.parallel_safe)
3311 312 : pathnode->path.parallel_safe =
3312 624 : is_parallel_safe(root, (Node *) target->exprs) &&
3313 312 : is_parallel_safe(root, (Node *) quals);
3314 :
3315 422 : return pathnode;
3316 : }
3317 :
3318 : /*
3319 : * create_windowagg_path
3320 : * Creates a pathnode that represents computation of window functions
3321 : *
3322 : * 'rel' is the parent relation associated with the result
3323 : * 'subpath' is the path representing the source of data
3324 : * 'target' is the PathTarget to be computed
3325 : * 'windowFuncs' is a list of WindowFunc structs
3326 : * 'runCondition' is a list of OpExprs to short-circuit WindowAgg execution
3327 : * 'winclause' is a WindowClause that is common to all the WindowFuncs
3328 : * 'qual' WindowClause.runconditions from lower-level WindowAggPaths.
3329 : * Must always be NIL when topwindow == false
3330 : * 'topwindow' pass as true only for the top-level WindowAgg. False for all
3331 : * intermediate WindowAggs.
3332 : *
3333 : * The input must be sorted according to the WindowClause's PARTITION keys
3334 : * plus ORDER BY keys.
3335 : */
3336 : WindowAggPath *
3337 3076 : create_windowagg_path(PlannerInfo *root,
3338 : RelOptInfo *rel,
3339 : Path *subpath,
3340 : PathTarget *target,
3341 : List *windowFuncs,
3342 : List *runCondition,
3343 : WindowClause *winclause,
3344 : List *qual,
3345 : bool topwindow)
3346 : {
3347 3076 : WindowAggPath *pathnode = makeNode(WindowAggPath);
3348 :
3349 : /* qual can only be set for the topwindow */
3350 : Assert(qual == NIL || topwindow);
3351 :
3352 3076 : pathnode->path.pathtype = T_WindowAgg;
3353 3076 : pathnode->path.parent = rel;
3354 3076 : pathnode->path.pathtarget = target;
3355 : /* For now, assume we are above any joins, so no parameterization */
3356 3076 : pathnode->path.param_info = NULL;
3357 3076 : pathnode->path.parallel_aware = false;
3358 3076 : pathnode->path.parallel_safe = rel->consider_parallel &&
3359 0 : subpath->parallel_safe;
3360 3076 : pathnode->path.parallel_workers = subpath->parallel_workers;
3361 : /* WindowAgg preserves the input sort order */
3362 3076 : pathnode->path.pathkeys = subpath->pathkeys;
3363 :
3364 3076 : pathnode->subpath = subpath;
3365 3076 : pathnode->winclause = winclause;
3366 3076 : pathnode->qual = qual;
3367 3076 : pathnode->runCondition = runCondition;
3368 3076 : pathnode->topwindow = topwindow;
3369 :
3370 : /*
3371 : * For costing purposes, assume that there are no redundant partitioning
3372 : * or ordering columns; it's not worth the trouble to deal with that
3373 : * corner case here. So we just pass the unmodified list lengths to
3374 : * cost_windowagg.
3375 : */
3376 3076 : cost_windowagg(&pathnode->path, root,
3377 : windowFuncs,
3378 : winclause,
3379 : subpath->disabled_nodes,
3380 : subpath->startup_cost,
3381 : subpath->total_cost,
3382 : subpath->rows);
3383 :
3384 : /* add tlist eval cost for each output row */
3385 3076 : pathnode->path.startup_cost += target->cost.startup;
3386 3076 : pathnode->path.total_cost += target->cost.startup +
3387 3076 : target->cost.per_tuple * pathnode->path.rows;
3388 :
3389 3076 : return pathnode;
3390 : }
3391 :
3392 : /*
3393 : * create_setop_path
3394 : * Creates a pathnode that represents computation of INTERSECT or EXCEPT
3395 : *
3396 : * 'rel' is the parent relation associated with the result
3397 : * 'leftpath' is the path representing the left-hand source of data
3398 : * 'rightpath' is the path representing the right-hand source of data
3399 : * 'cmd' is the specific semantics (INTERSECT or EXCEPT, with/without ALL)
3400 : * 'strategy' is the implementation strategy (sorted or hashed)
3401 : * 'groupList' is a list of SortGroupClause's representing the grouping
3402 : * 'numGroups' is the estimated number of distinct groups in left-hand input
3403 : * 'outputRows' is the estimated number of output rows
3404 : *
3405 : * leftpath and rightpath must produce the same columns. Moreover, if
3406 : * strategy is SETOP_SORTED, leftpath and rightpath must both be sorted
3407 : * by all the grouping columns.
3408 : */
3409 : SetOpPath *
3410 1300 : create_setop_path(PlannerInfo *root,
3411 : RelOptInfo *rel,
3412 : Path *leftpath,
3413 : Path *rightpath,
3414 : SetOpCmd cmd,
3415 : SetOpStrategy strategy,
3416 : List *groupList,
3417 : double numGroups,
3418 : double outputRows)
3419 : {
3420 1300 : SetOpPath *pathnode = makeNode(SetOpPath);
3421 :
3422 1300 : pathnode->path.pathtype = T_SetOp;
3423 1300 : pathnode->path.parent = rel;
3424 1300 : pathnode->path.pathtarget = rel->reltarget;
3425 : /* For now, assume we are above any joins, so no parameterization */
3426 1300 : pathnode->path.param_info = NULL;
3427 1300 : pathnode->path.parallel_aware = false;
3428 2600 : pathnode->path.parallel_safe = rel->consider_parallel &&
3429 1300 : leftpath->parallel_safe && rightpath->parallel_safe;
3430 1300 : pathnode->path.parallel_workers =
3431 1300 : leftpath->parallel_workers + rightpath->parallel_workers;
3432 : /* SetOp preserves the input sort order if in sort mode */
3433 1300 : pathnode->path.pathkeys =
3434 1300 : (strategy == SETOP_SORTED) ? leftpath->pathkeys : NIL;
3435 :
3436 1300 : pathnode->leftpath = leftpath;
3437 1300 : pathnode->rightpath = rightpath;
3438 1300 : pathnode->cmd = cmd;
3439 1300 : pathnode->strategy = strategy;
3440 1300 : pathnode->groupList = groupList;
3441 1300 : pathnode->numGroups = numGroups;
3442 :
3443 : /*
3444 : * Compute cost estimates. As things stand, we end up with the same total
3445 : * cost in this node for sort and hash methods, but different startup
3446 : * costs. This could be refined perhaps, but it'll do for now.
3447 : */
3448 1300 : pathnode->path.disabled_nodes =
3449 1300 : leftpath->disabled_nodes + rightpath->disabled_nodes;
3450 1300 : if (strategy == SETOP_SORTED)
3451 : {
3452 : /*
3453 : * In sorted mode, we can emit output incrementally. Charge one
3454 : * cpu_operator_cost per comparison per input tuple. Like cost_group,
3455 : * we assume all columns get compared at most of the tuples.
3456 : */
3457 680 : pathnode->path.startup_cost =
3458 680 : leftpath->startup_cost + rightpath->startup_cost;
3459 680 : pathnode->path.total_cost =
3460 1360 : leftpath->total_cost + rightpath->total_cost +
3461 680 : cpu_operator_cost * (leftpath->rows + rightpath->rows) * list_length(groupList);
3462 :
3463 : /*
3464 : * Also charge a small amount per extracted tuple. Like cost_sort,
3465 : * charge only operator cost not cpu_tuple_cost, since SetOp does no
3466 : * qual-checking or projection.
3467 : */
3468 680 : pathnode->path.total_cost += cpu_operator_cost * outputRows;
3469 : }
3470 : else
3471 : {
3472 : Size hashtablesize;
3473 :
3474 : /*
3475 : * In hashed mode, we must read all the input before we can emit
3476 : * anything. Also charge comparison costs to represent the cost of
3477 : * hash table lookups.
3478 : */
3479 620 : pathnode->path.startup_cost =
3480 1240 : leftpath->total_cost + rightpath->total_cost +
3481 620 : cpu_operator_cost * (leftpath->rows + rightpath->rows) * list_length(groupList);
3482 620 : pathnode->path.total_cost = pathnode->path.startup_cost;
3483 :
3484 : /*
3485 : * Also charge a small amount per extracted tuple. Like cost_sort,
3486 : * charge only operator cost not cpu_tuple_cost, since SetOp does no
3487 : * qual-checking or projection.
3488 : */
3489 620 : pathnode->path.total_cost += cpu_operator_cost * outputRows;
3490 :
3491 : /*
3492 : * Mark the path as disabled if enable_hashagg is off. While this
3493 : * isn't exactly a HashAgg node, it seems close enough to justify
3494 : * letting that switch control it.
3495 : */
3496 620 : if (!enable_hashagg)
3497 114 : pathnode->path.disabled_nodes++;
3498 :
3499 : /*
3500 : * Also disable if it doesn't look like the hashtable will fit into
3501 : * hash_mem. (Note: reject on equality, to ensure that an estimate of
3502 : * SIZE_MAX disables hashing regardless of the hash_mem limit.)
3503 : */
3504 620 : hashtablesize = EstimateSetOpHashTableSpace(numGroups,
3505 620 : leftpath->pathtarget->width);
3506 620 : if (hashtablesize >= get_hash_memory_limit())
3507 0 : pathnode->path.disabled_nodes++;
3508 : }
3509 1300 : pathnode->path.rows = outputRows;
3510 :
3511 1300 : return pathnode;
3512 : }
3513 :
3514 : /*
3515 : * create_recursiveunion_path
3516 : * Creates a pathnode that represents a recursive UNION node
3517 : *
3518 : * 'rel' is the parent relation associated with the result
3519 : * 'leftpath' is the source of data for the non-recursive term
3520 : * 'rightpath' is the source of data for the recursive term
3521 : * 'target' is the PathTarget to be computed
3522 : * 'distinctList' is a list of SortGroupClause's representing the grouping
3523 : * 'wtParam' is the ID of Param representing work table
3524 : * 'numGroups' is the estimated number of groups
3525 : *
3526 : * For recursive UNION ALL, distinctList is empty and numGroups is zero
3527 : */
3528 : RecursiveUnionPath *
3529 936 : create_recursiveunion_path(PlannerInfo *root,
3530 : RelOptInfo *rel,
3531 : Path *leftpath,
3532 : Path *rightpath,
3533 : PathTarget *target,
3534 : List *distinctList,
3535 : int wtParam,
3536 : double numGroups)
3537 : {
3538 936 : RecursiveUnionPath *pathnode = makeNode(RecursiveUnionPath);
3539 :
3540 936 : pathnode->path.pathtype = T_RecursiveUnion;
3541 936 : pathnode->path.parent = rel;
3542 936 : pathnode->path.pathtarget = target;
3543 : /* For now, assume we are above any joins, so no parameterization */
3544 936 : pathnode->path.param_info = NULL;
3545 936 : pathnode->path.parallel_aware = false;
3546 1872 : pathnode->path.parallel_safe = rel->consider_parallel &&
3547 936 : leftpath->parallel_safe && rightpath->parallel_safe;
3548 : /* Foolish, but we'll do it like joins for now: */
3549 936 : pathnode->path.parallel_workers = leftpath->parallel_workers;
3550 : /* RecursiveUnion result is always unsorted */
3551 936 : pathnode->path.pathkeys = NIL;
3552 :
3553 936 : pathnode->leftpath = leftpath;
3554 936 : pathnode->rightpath = rightpath;
3555 936 : pathnode->distinctList = distinctList;
3556 936 : pathnode->wtParam = wtParam;
3557 936 : pathnode->numGroups = numGroups;
3558 :
3559 936 : cost_recursive_union(&pathnode->path, leftpath, rightpath);
3560 :
3561 936 : return pathnode;
3562 : }
3563 :
3564 : /*
3565 : * create_lockrows_path
3566 : * Creates a pathnode that represents acquiring row locks
3567 : *
3568 : * 'rel' is the parent relation associated with the result
3569 : * 'subpath' is the path representing the source of data
3570 : * 'rowMarks' is a list of PlanRowMark's
3571 : * 'epqParam' is the ID of Param for EvalPlanQual re-eval
3572 : */
3573 : LockRowsPath *
3574 14066 : create_lockrows_path(PlannerInfo *root, RelOptInfo *rel,
3575 : Path *subpath, List *rowMarks, int epqParam)
3576 : {
3577 14066 : LockRowsPath *pathnode = makeNode(LockRowsPath);
3578 :
3579 14066 : pathnode->path.pathtype = T_LockRows;
3580 14066 : pathnode->path.parent = rel;
3581 : /* LockRows doesn't project, so use source path's pathtarget */
3582 14066 : pathnode->path.pathtarget = subpath->pathtarget;
3583 : /* For now, assume we are above any joins, so no parameterization */
3584 14066 : pathnode->path.param_info = NULL;
3585 14066 : pathnode->path.parallel_aware = false;
3586 14066 : pathnode->path.parallel_safe = false;
3587 14066 : pathnode->path.parallel_workers = 0;
3588 14066 : pathnode->path.rows = subpath->rows;
3589 :
3590 : /*
3591 : * The result cannot be assumed sorted, since locking might cause the sort
3592 : * key columns to be replaced with new values.
3593 : */
3594 14066 : pathnode->path.pathkeys = NIL;
3595 :
3596 14066 : pathnode->subpath = subpath;
3597 14066 : pathnode->rowMarks = rowMarks;
3598 14066 : pathnode->epqParam = epqParam;
3599 :
3600 : /*
3601 : * We should charge something extra for the costs of row locking and
3602 : * possible refetches, but it's hard to say how much. For now, use
3603 : * cpu_tuple_cost per row.
3604 : */
3605 14066 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
3606 14066 : pathnode->path.startup_cost = subpath->startup_cost;
3607 14066 : pathnode->path.total_cost = subpath->total_cost +
3608 14066 : cpu_tuple_cost * subpath->rows;
3609 :
3610 14066 : return pathnode;
3611 : }
3612 :
3613 : /*
3614 : * create_modifytable_path
3615 : * Creates a pathnode that represents performing INSERT/UPDATE/DELETE/MERGE
3616 : * mods
3617 : *
3618 : * 'rel' is the parent relation associated with the result
3619 : * 'subpath' is a Path producing source data
3620 : * 'operation' is the operation type
3621 : * 'canSetTag' is true if we set the command tag/es_processed
3622 : * 'nominalRelation' is the parent RT index for use of EXPLAIN
3623 : * 'rootRelation' is the partitioned/inherited table root RTI, or 0 if none
3624 : * 'resultRelations' is an integer list of actual RT indexes of target rel(s)
3625 : * 'updateColnosLists' is a list of UPDATE target column number lists
3626 : * (one sublist per rel); or NIL if not an UPDATE
3627 : * 'withCheckOptionLists' is a list of WCO lists (one per rel)
3628 : * 'returningLists' is a list of RETURNING tlists (one per rel)
3629 : * 'rowMarks' is a list of PlanRowMarks (non-locking only)
3630 : * 'onconflict' is the ON CONFLICT clause, or NULL
3631 : * 'epqParam' is the ID of Param for EvalPlanQual re-eval
3632 : * 'mergeActionLists' is a list of lists of MERGE actions (one per rel)
3633 : * 'mergeJoinConditions' is a list of join conditions for MERGE (one per rel)
3634 : */
3635 : ModifyTablePath *
3636 89398 : create_modifytable_path(PlannerInfo *root, RelOptInfo *rel,
3637 : Path *subpath,
3638 : CmdType operation, bool canSetTag,
3639 : Index nominalRelation, Index rootRelation,
3640 : List *resultRelations,
3641 : List *updateColnosLists,
3642 : List *withCheckOptionLists, List *returningLists,
3643 : List *rowMarks, OnConflictExpr *onconflict,
3644 : List *mergeActionLists, List *mergeJoinConditions,
3645 : int epqParam)
3646 : {
3647 89398 : ModifyTablePath *pathnode = makeNode(ModifyTablePath);
3648 :
3649 : Assert(operation == CMD_MERGE ||
3650 : (operation == CMD_UPDATE ?
3651 : list_length(resultRelations) == list_length(updateColnosLists) :
3652 : updateColnosLists == NIL));
3653 : Assert(withCheckOptionLists == NIL ||
3654 : list_length(resultRelations) == list_length(withCheckOptionLists));
3655 : Assert(returningLists == NIL ||
3656 : list_length(resultRelations) == list_length(returningLists));
3657 :
3658 89398 : pathnode->path.pathtype = T_ModifyTable;
3659 89398 : pathnode->path.parent = rel;
3660 : /* pathtarget is not interesting, just make it minimally valid */
3661 89398 : pathnode->path.pathtarget = rel->reltarget;
3662 : /* For now, assume we are above any joins, so no parameterization */
3663 89398 : pathnode->path.param_info = NULL;
3664 89398 : pathnode->path.parallel_aware = false;
3665 89398 : pathnode->path.parallel_safe = false;
3666 89398 : pathnode->path.parallel_workers = 0;
3667 89398 : pathnode->path.pathkeys = NIL;
3668 :
3669 : /*
3670 : * Compute cost & rowcount as subpath cost & rowcount (if RETURNING)
3671 : *
3672 : * Currently, we don't charge anything extra for the actual table
3673 : * modification work, nor for the WITH CHECK OPTIONS or RETURNING
3674 : * expressions if any. It would only be window dressing, since
3675 : * ModifyTable is always a top-level node and there is no way for the
3676 : * costs to change any higher-level planning choices. But we might want
3677 : * to make it look better sometime.
3678 : */
3679 89398 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
3680 89398 : pathnode->path.startup_cost = subpath->startup_cost;
3681 89398 : pathnode->path.total_cost = subpath->total_cost;
3682 89398 : if (returningLists != NIL)
3683 : {
3684 3048 : pathnode->path.rows = subpath->rows;
3685 :
3686 : /*
3687 : * Set width to match the subpath output. XXX this is totally wrong:
3688 : * we should return an average of the RETURNING tlist widths. But
3689 : * it's what happened historically, and improving it is a task for
3690 : * another day. (Again, it's mostly window dressing.)
3691 : */
3692 3048 : pathnode->path.pathtarget->width = subpath->pathtarget->width;
3693 : }
3694 : else
3695 : {
3696 86350 : pathnode->path.rows = 0;
3697 86350 : pathnode->path.pathtarget->width = 0;
3698 : }
3699 :
3700 89398 : pathnode->subpath = subpath;
3701 89398 : pathnode->operation = operation;
3702 89398 : pathnode->canSetTag = canSetTag;
3703 89398 : pathnode->nominalRelation = nominalRelation;
3704 89398 : pathnode->rootRelation = rootRelation;
3705 89398 : pathnode->resultRelations = resultRelations;
3706 89398 : pathnode->updateColnosLists = updateColnosLists;
3707 89398 : pathnode->withCheckOptionLists = withCheckOptionLists;
3708 89398 : pathnode->returningLists = returningLists;
3709 89398 : pathnode->rowMarks = rowMarks;
3710 89398 : pathnode->onconflict = onconflict;
3711 89398 : pathnode->epqParam = epqParam;
3712 89398 : pathnode->mergeActionLists = mergeActionLists;
3713 89398 : pathnode->mergeJoinConditions = mergeJoinConditions;
3714 :
3715 89398 : return pathnode;
3716 : }
3717 :
3718 : /*
3719 : * create_limit_path
3720 : * Creates a pathnode that represents performing LIMIT/OFFSET
3721 : *
3722 : * In addition to providing the actual OFFSET and LIMIT expressions,
3723 : * the caller must provide estimates of their values for costing purposes.
3724 : * The estimates are as computed by preprocess_limit(), ie, 0 represents
3725 : * the clause not being present, and -1 means it's present but we could
3726 : * not estimate its value.
3727 : *
3728 : * 'rel' is the parent relation associated with the result
3729 : * 'subpath' is the path representing the source of data
3730 : * 'limitOffset' is the actual OFFSET expression, or NULL
3731 : * 'limitCount' is the actual LIMIT expression, or NULL
3732 : * 'offset_est' is the estimated value of the OFFSET expression
3733 : * 'count_est' is the estimated value of the LIMIT expression
3734 : */
3735 : LimitPath *
3736 6396 : create_limit_path(PlannerInfo *root, RelOptInfo *rel,
3737 : Path *subpath,
3738 : Node *limitOffset, Node *limitCount,
3739 : LimitOption limitOption,
3740 : int64 offset_est, int64 count_est)
3741 : {
3742 6396 : LimitPath *pathnode = makeNode(LimitPath);
3743 :
3744 6396 : pathnode->path.pathtype = T_Limit;
3745 6396 : pathnode->path.parent = rel;
3746 : /* Limit doesn't project, so use source path's pathtarget */
3747 6396 : pathnode->path.pathtarget = subpath->pathtarget;
3748 : /* For now, assume we are above any joins, so no parameterization */
3749 6396 : pathnode->path.param_info = NULL;
3750 6396 : pathnode->path.parallel_aware = false;
3751 8860 : pathnode->path.parallel_safe = rel->consider_parallel &&
3752 2464 : subpath->parallel_safe;
3753 6396 : pathnode->path.parallel_workers = subpath->parallel_workers;
3754 6396 : pathnode->path.rows = subpath->rows;
3755 6396 : pathnode->path.disabled_nodes = subpath->disabled_nodes;
3756 6396 : pathnode->path.startup_cost = subpath->startup_cost;
3757 6396 : pathnode->path.total_cost = subpath->total_cost;
3758 6396 : pathnode->path.pathkeys = subpath->pathkeys;
3759 6396 : pathnode->subpath = subpath;
3760 6396 : pathnode->limitOffset = limitOffset;
3761 6396 : pathnode->limitCount = limitCount;
3762 6396 : pathnode->limitOption = limitOption;
3763 :
3764 : /*
3765 : * Adjust the output rows count and costs according to the offset/limit.
3766 : */
3767 6396 : adjust_limit_rows_costs(&pathnode->path.rows,
3768 : &pathnode->path.startup_cost,
3769 : &pathnode->path.total_cost,
3770 : offset_est, count_est);
3771 :
3772 6396 : return pathnode;
3773 : }
3774 :
3775 : /*
3776 : * adjust_limit_rows_costs
3777 : * Adjust the size and cost estimates for a LimitPath node according to the
3778 : * offset/limit.
3779 : *
3780 : * This is only a cosmetic issue if we are at top level, but if we are
3781 : * building a subquery then it's important to report correct info to the outer
3782 : * planner.
3783 : *
3784 : * When the offset or count couldn't be estimated, use 10% of the estimated
3785 : * number of rows emitted from the subpath.
3786 : *
3787 : * XXX we don't bother to add eval costs of the offset/limit expressions
3788 : * themselves to the path costs. In theory we should, but in most cases those
3789 : * expressions are trivial and it's just not worth the trouble.
3790 : */
3791 : void
3792 6580 : adjust_limit_rows_costs(double *rows, /* in/out parameter */
3793 : Cost *startup_cost, /* in/out parameter */
3794 : Cost *total_cost, /* in/out parameter */
3795 : int64 offset_est,
3796 : int64 count_est)
3797 : {
3798 6580 : double input_rows = *rows;
3799 6580 : Cost input_startup_cost = *startup_cost;
3800 6580 : Cost input_total_cost = *total_cost;
3801 :
3802 6580 : if (offset_est != 0)
3803 : {
3804 : double offset_rows;
3805 :
3806 712 : if (offset_est > 0)
3807 688 : offset_rows = (double) offset_est;
3808 : else
3809 24 : offset_rows = clamp_row_est(input_rows * 0.10);
3810 712 : if (offset_rows > *rows)
3811 46 : offset_rows = *rows;
3812 712 : if (input_rows > 0)
3813 712 : *startup_cost +=
3814 712 : (input_total_cost - input_startup_cost)
3815 712 : * offset_rows / input_rows;
3816 712 : *rows -= offset_rows;
3817 712 : if (*rows < 1)
3818 54 : *rows = 1;
3819 : }
3820 :
3821 6580 : if (count_est != 0)
3822 : {
3823 : double count_rows;
3824 :
3825 6504 : if (count_est > 0)
3826 6498 : count_rows = (double) count_est;
3827 : else
3828 6 : count_rows = clamp_row_est(input_rows * 0.10);
3829 6504 : if (count_rows > *rows)
3830 272 : count_rows = *rows;
3831 6504 : if (input_rows > 0)
3832 6504 : *total_cost = *startup_cost +
3833 6504 : (input_total_cost - input_startup_cost)
3834 6504 : * count_rows / input_rows;
3835 6504 : *rows = count_rows;
3836 6504 : if (*rows < 1)
3837 0 : *rows = 1;
3838 : }
3839 6580 : }
3840 :
3841 :
3842 : /*
3843 : * reparameterize_path
3844 : * Attempt to modify a Path to have greater parameterization
3845 : *
3846 : * We use this to attempt to bring all child paths of an appendrel to the
3847 : * same parameterization level, ensuring that they all enforce the same set
3848 : * of join quals (and thus that that parameterization can be attributed to
3849 : * an append path built from such paths). Currently, only a few path types
3850 : * are supported here, though more could be added at need. We return NULL
3851 : * if we can't reparameterize the given path.
3852 : *
3853 : * Note: we intentionally do not pass created paths to add_path(); it would
3854 : * possibly try to delete them on the grounds of being cost-inferior to the
3855 : * paths they were made from, and we don't want that. Paths made here are
3856 : * not necessarily of general-purpose usefulness, but they can be useful
3857 : * as members of an append path.
3858 : */
3859 : Path *
3860 356 : reparameterize_path(PlannerInfo *root, Path *path,
3861 : Relids required_outer,
3862 : double loop_count)
3863 : {
3864 356 : RelOptInfo *rel = path->parent;
3865 :
3866 : /* Can only increase, not decrease, path's parameterization */
3867 356 : if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
3868 0 : return NULL;
3869 356 : switch (path->pathtype)
3870 : {
3871 264 : case T_SeqScan:
3872 264 : return create_seqscan_path(root, rel, required_outer, 0);
3873 0 : case T_SampleScan:
3874 0 : return create_samplescan_path(root, rel, required_outer);
3875 0 : case T_IndexScan:
3876 : case T_IndexOnlyScan:
3877 : {
3878 0 : IndexPath *ipath = (IndexPath *) path;
3879 0 : IndexPath *newpath = makeNode(IndexPath);
3880 :
3881 : /*
3882 : * We can't use create_index_path directly, and would not want
3883 : * to because it would re-compute the indexqual conditions
3884 : * which is wasted effort. Instead we hack things a bit:
3885 : * flat-copy the path node, revise its param_info, and redo
3886 : * the cost estimate.
3887 : */
3888 0 : memcpy(newpath, ipath, sizeof(IndexPath));
3889 0 : newpath->path.param_info =
3890 0 : get_baserel_parampathinfo(root, rel, required_outer);
3891 0 : cost_index(newpath, root, loop_count, false);
3892 0 : return (Path *) newpath;
3893 : }
3894 0 : case T_BitmapHeapScan:
3895 : {
3896 0 : BitmapHeapPath *bpath = (BitmapHeapPath *) path;
3897 :
3898 0 : return (Path *) create_bitmap_heap_path(root,
3899 : rel,
3900 : bpath->bitmapqual,
3901 : required_outer,
3902 : loop_count, 0);
3903 : }
3904 0 : case T_SubqueryScan:
3905 : {
3906 0 : SubqueryScanPath *spath = (SubqueryScanPath *) path;
3907 0 : Path *subpath = spath->subpath;
3908 : bool trivial_pathtarget;
3909 :
3910 : /*
3911 : * If existing node has zero extra cost, we must have decided
3912 : * its target is trivial. (The converse is not true, because
3913 : * it might have a trivial target but quals to enforce; but in
3914 : * that case the new node will too, so it doesn't matter
3915 : * whether we get the right answer here.)
3916 : */
3917 0 : trivial_pathtarget =
3918 0 : (subpath->total_cost == spath->path.total_cost);
3919 :
3920 0 : return (Path *) create_subqueryscan_path(root,
3921 : rel,
3922 : subpath,
3923 : trivial_pathtarget,
3924 : spath->path.pathkeys,
3925 : required_outer);
3926 : }
3927 60 : case T_Result:
3928 : /* Supported only for RTE_RESULT scan paths */
3929 60 : if (IsA(path, Path))
3930 60 : return create_resultscan_path(root, rel, required_outer);
3931 0 : break;
3932 0 : case T_Append:
3933 : {
3934 0 : AppendPath *apath = (AppendPath *) path;
3935 0 : List *childpaths = NIL;
3936 0 : List *partialpaths = NIL;
3937 : int i;
3938 : ListCell *lc;
3939 :
3940 : /* Reparameterize the children */
3941 0 : i = 0;
3942 0 : foreach(lc, apath->subpaths)
3943 : {
3944 0 : Path *spath = (Path *) lfirst(lc);
3945 :
3946 0 : spath = reparameterize_path(root, spath,
3947 : required_outer,
3948 : loop_count);
3949 0 : if (spath == NULL)
3950 0 : return NULL;
3951 : /* We have to re-split the regular and partial paths */
3952 0 : if (i < apath->first_partial_path)
3953 0 : childpaths = lappend(childpaths, spath);
3954 : else
3955 0 : partialpaths = lappend(partialpaths, spath);
3956 0 : i++;
3957 : }
3958 0 : return (Path *)
3959 0 : create_append_path(root, rel, childpaths, partialpaths,
3960 : apath->path.pathkeys, required_outer,
3961 : apath->path.parallel_workers,
3962 0 : apath->path.parallel_aware,
3963 : -1);
3964 : }
3965 0 : case T_Material:
3966 : {
3967 0 : MaterialPath *mpath = (MaterialPath *) path;
3968 0 : Path *spath = mpath->subpath;
3969 : bool enabled;
3970 :
3971 0 : spath = reparameterize_path(root, spath,
3972 : required_outer,
3973 : loop_count);
3974 0 : if (spath == NULL)
3975 0 : return NULL;
3976 0 : enabled =
3977 0 : (mpath->path.disabled_nodes <= spath->disabled_nodes);
3978 0 : return (Path *) create_material_path(rel, spath, enabled);
3979 : }
3980 0 : case T_Memoize:
3981 : {
3982 0 : MemoizePath *mpath = (MemoizePath *) path;
3983 0 : Path *spath = mpath->subpath;
3984 :
3985 0 : spath = reparameterize_path(root, spath,
3986 : required_outer,
3987 : loop_count);
3988 0 : if (spath == NULL)
3989 0 : return NULL;
3990 0 : return (Path *) create_memoize_path(root, rel,
3991 : spath,
3992 : mpath->param_exprs,
3993 : mpath->hash_operators,
3994 0 : mpath->singlerow,
3995 0 : mpath->binary_mode,
3996 : mpath->est_calls);
3997 : }
3998 32 : default:
3999 32 : break;
4000 : }
4001 32 : return NULL;
4002 : }
4003 :
4004 : /*
4005 : * reparameterize_path_by_child
4006 : * Given a path parameterized by the parent of the given child relation,
4007 : * translate the path to be parameterized by the given child relation.
4008 : *
4009 : * Most fields in the path are not changed, but any expressions must be
4010 : * adjusted to refer to the correct varnos, and any subpaths must be
4011 : * recursively reparameterized. Other fields that refer to specific relids
4012 : * also need adjustment.
4013 : *
4014 : * The cost, number of rows, width and parallel path properties depend upon
4015 : * path->parent, which does not change during the translation. So we need
4016 : * not change those.
4017 : *
4018 : * Currently, only a few path types are supported here, though more could be
4019 : * added at need. We return NULL if we can't reparameterize the given path.
4020 : *
4021 : * Note that this function can change referenced RangeTblEntries, RelOptInfos
4022 : * and IndexOptInfos as well as the Path structures. Therefore, it's only safe
4023 : * to call during create_plan(), when we have made a final choice of which Path
4024 : * to use for each RangeTblEntry/RelOptInfo/IndexOptInfo.
4025 : *
4026 : * Keep this code in sync with path_is_reparameterizable_by_child()!
4027 : */
4028 : Path *
4029 101396 : reparameterize_path_by_child(PlannerInfo *root, Path *path,
4030 : RelOptInfo *child_rel)
4031 : {
4032 : Path *new_path;
4033 : ParamPathInfo *new_ppi;
4034 : ParamPathInfo *old_ppi;
4035 : Relids required_outer;
4036 :
4037 : #define ADJUST_CHILD_ATTRS(node) \
4038 : ((node) = (void *) adjust_appendrel_attrs_multilevel(root, \
4039 : (Node *) (node), \
4040 : child_rel, \
4041 : child_rel->top_parent))
4042 :
4043 : #define REPARAMETERIZE_CHILD_PATH(path) \
4044 : do { \
4045 : (path) = reparameterize_path_by_child(root, (path), child_rel); \
4046 : if ((path) == NULL) \
4047 : return NULL; \
4048 : } while(0)
4049 :
4050 : #define REPARAMETERIZE_CHILD_PATH_LIST(pathlist) \
4051 : do { \
4052 : if ((pathlist) != NIL) \
4053 : { \
4054 : (pathlist) = reparameterize_pathlist_by_child(root, (pathlist), \
4055 : child_rel); \
4056 : if ((pathlist) == NIL) \
4057 : return NULL; \
4058 : } \
4059 : } while(0)
4060 :
4061 : /*
4062 : * If the path is not parameterized by the parent of the given relation,
4063 : * it doesn't need reparameterization.
4064 : */
4065 101396 : if (!path->param_info ||
4066 51056 : !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4067 100394 : return path;
4068 :
4069 : /*
4070 : * If possible, reparameterize the given path.
4071 : *
4072 : * This function is currently only applied to the inner side of a nestloop
4073 : * join that is being partitioned by the partitionwise-join code. Hence,
4074 : * we need only support path types that plausibly arise in that context.
4075 : * (In particular, supporting sorted path types would be a waste of code
4076 : * and cycles: even if we translated them here, they'd just lose in
4077 : * subsequent cost comparisons.) If we do see an unsupported path type,
4078 : * that just means we won't be able to generate a partitionwise-join plan
4079 : * using that path type.
4080 : */
4081 1002 : switch (nodeTag(path))
4082 : {
4083 228 : case T_Path:
4084 228 : new_path = path;
4085 228 : ADJUST_CHILD_ATTRS(new_path->parent->baserestrictinfo);
4086 228 : if (path->pathtype == T_SampleScan)
4087 : {
4088 48 : Index scan_relid = path->parent->relid;
4089 : RangeTblEntry *rte;
4090 :
4091 : /* it should be a base rel with a tablesample clause... */
4092 : Assert(scan_relid > 0);
4093 48 : rte = planner_rt_fetch(scan_relid, root);
4094 : Assert(rte->rtekind == RTE_RELATION);
4095 : Assert(rte->tablesample != NULL);
4096 :
4097 48 : ADJUST_CHILD_ATTRS(rte->tablesample);
4098 : }
4099 228 : break;
4100 :
4101 522 : case T_IndexPath:
4102 : {
4103 522 : IndexPath *ipath = (IndexPath *) path;
4104 :
4105 522 : ADJUST_CHILD_ATTRS(ipath->indexinfo->indrestrictinfo);
4106 522 : ADJUST_CHILD_ATTRS(ipath->indexclauses);
4107 522 : new_path = (Path *) ipath;
4108 : }
4109 522 : break;
4110 :
4111 48 : case T_BitmapHeapPath:
4112 : {
4113 48 : BitmapHeapPath *bhpath = (BitmapHeapPath *) path;
4114 :
4115 48 : ADJUST_CHILD_ATTRS(bhpath->path.parent->baserestrictinfo);
4116 48 : REPARAMETERIZE_CHILD_PATH(bhpath->bitmapqual);
4117 48 : new_path = (Path *) bhpath;
4118 : }
4119 48 : break;
4120 :
4121 24 : case T_BitmapAndPath:
4122 : {
4123 24 : BitmapAndPath *bapath = (BitmapAndPath *) path;
4124 :
4125 24 : REPARAMETERIZE_CHILD_PATH_LIST(bapath->bitmapquals);
4126 24 : new_path = (Path *) bapath;
4127 : }
4128 24 : break;
4129 :
4130 24 : case T_BitmapOrPath:
4131 : {
4132 24 : BitmapOrPath *bopath = (BitmapOrPath *) path;
4133 :
4134 24 : REPARAMETERIZE_CHILD_PATH_LIST(bopath->bitmapquals);
4135 24 : new_path = (Path *) bopath;
4136 : }
4137 24 : break;
4138 :
4139 0 : case T_ForeignPath:
4140 : {
4141 0 : ForeignPath *fpath = (ForeignPath *) path;
4142 : ReparameterizeForeignPathByChild_function rfpc_func;
4143 :
4144 0 : ADJUST_CHILD_ATTRS(fpath->path.parent->baserestrictinfo);
4145 0 : if (fpath->fdw_outerpath)
4146 0 : REPARAMETERIZE_CHILD_PATH(fpath->fdw_outerpath);
4147 0 : if (fpath->fdw_restrictinfo)
4148 0 : ADJUST_CHILD_ATTRS(fpath->fdw_restrictinfo);
4149 :
4150 : /* Hand over to FDW if needed. */
4151 0 : rfpc_func =
4152 0 : path->parent->fdwroutine->ReparameterizeForeignPathByChild;
4153 0 : if (rfpc_func)
4154 0 : fpath->fdw_private = rfpc_func(root, fpath->fdw_private,
4155 : child_rel);
4156 0 : new_path = (Path *) fpath;
4157 : }
4158 0 : break;
4159 :
4160 0 : case T_CustomPath:
4161 : {
4162 0 : CustomPath *cpath = (CustomPath *) path;
4163 :
4164 0 : ADJUST_CHILD_ATTRS(cpath->path.parent->baserestrictinfo);
4165 0 : REPARAMETERIZE_CHILD_PATH_LIST(cpath->custom_paths);
4166 0 : if (cpath->custom_restrictinfo)
4167 0 : ADJUST_CHILD_ATTRS(cpath->custom_restrictinfo);
4168 0 : if (cpath->methods &&
4169 0 : cpath->methods->ReparameterizeCustomPathByChild)
4170 0 : cpath->custom_private =
4171 0 : cpath->methods->ReparameterizeCustomPathByChild(root,
4172 : cpath->custom_private,
4173 : child_rel);
4174 0 : new_path = (Path *) cpath;
4175 : }
4176 0 : break;
4177 :
4178 36 : case T_NestPath:
4179 : {
4180 36 : NestPath *npath = (NestPath *) path;
4181 36 : JoinPath *jpath = (JoinPath *) npath;
4182 :
4183 36 : REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath);
4184 36 : REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath);
4185 36 : ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo);
4186 36 : new_path = (Path *) npath;
4187 : }
4188 36 : break;
4189 :
4190 0 : case T_MergePath:
4191 : {
4192 0 : MergePath *mpath = (MergePath *) path;
4193 0 : JoinPath *jpath = (JoinPath *) mpath;
4194 :
4195 0 : REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath);
4196 0 : REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath);
4197 0 : ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo);
4198 0 : ADJUST_CHILD_ATTRS(mpath->path_mergeclauses);
4199 0 : new_path = (Path *) mpath;
4200 : }
4201 0 : break;
4202 :
4203 48 : case T_HashPath:
4204 : {
4205 48 : HashPath *hpath = (HashPath *) path;
4206 48 : JoinPath *jpath = (JoinPath *) hpath;
4207 :
4208 48 : REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath);
4209 48 : REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath);
4210 48 : ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo);
4211 48 : ADJUST_CHILD_ATTRS(hpath->path_hashclauses);
4212 48 : new_path = (Path *) hpath;
4213 : }
4214 48 : break;
4215 :
4216 24 : case T_AppendPath:
4217 : {
4218 24 : AppendPath *apath = (AppendPath *) path;
4219 :
4220 24 : REPARAMETERIZE_CHILD_PATH_LIST(apath->subpaths);
4221 24 : new_path = (Path *) apath;
4222 : }
4223 24 : break;
4224 :
4225 0 : case T_MaterialPath:
4226 : {
4227 0 : MaterialPath *mpath = (MaterialPath *) path;
4228 :
4229 0 : REPARAMETERIZE_CHILD_PATH(mpath->subpath);
4230 0 : new_path = (Path *) mpath;
4231 : }
4232 0 : break;
4233 :
4234 48 : case T_MemoizePath:
4235 : {
4236 48 : MemoizePath *mpath = (MemoizePath *) path;
4237 :
4238 48 : REPARAMETERIZE_CHILD_PATH(mpath->subpath);
4239 48 : ADJUST_CHILD_ATTRS(mpath->param_exprs);
4240 48 : new_path = (Path *) mpath;
4241 : }
4242 48 : break;
4243 :
4244 0 : case T_GatherPath:
4245 : {
4246 0 : GatherPath *gpath = (GatherPath *) path;
4247 :
4248 0 : REPARAMETERIZE_CHILD_PATH(gpath->subpath);
4249 0 : new_path = (Path *) gpath;
4250 : }
4251 0 : break;
4252 :
4253 0 : default:
4254 : /* We don't know how to reparameterize this path. */
4255 0 : return NULL;
4256 : }
4257 :
4258 : /*
4259 : * Adjust the parameterization information, which refers to the topmost
4260 : * parent. The topmost parent can be multiple levels away from the given
4261 : * child, hence use multi-level expression adjustment routines.
4262 : */
4263 1002 : old_ppi = new_path->param_info;
4264 : required_outer =
4265 1002 : adjust_child_relids_multilevel(root, old_ppi->ppi_req_outer,
4266 : child_rel,
4267 1002 : child_rel->top_parent);
4268 :
4269 : /* If we already have a PPI for this parameterization, just return it */
4270 1002 : new_ppi = find_param_path_info(new_path->parent, required_outer);
4271 :
4272 : /*
4273 : * If not, build a new one and link it to the list of PPIs. For the same
4274 : * reason as explained in mark_dummy_rel(), allocate new PPI in the same
4275 : * context the given RelOptInfo is in.
4276 : */
4277 1002 : if (new_ppi == NULL)
4278 : {
4279 : MemoryContext oldcontext;
4280 858 : RelOptInfo *rel = path->parent;
4281 :
4282 858 : oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
4283 :
4284 858 : new_ppi = makeNode(ParamPathInfo);
4285 858 : new_ppi->ppi_req_outer = bms_copy(required_outer);
4286 858 : new_ppi->ppi_rows = old_ppi->ppi_rows;
4287 858 : new_ppi->ppi_clauses = old_ppi->ppi_clauses;
4288 858 : ADJUST_CHILD_ATTRS(new_ppi->ppi_clauses);
4289 858 : new_ppi->ppi_serials = bms_copy(old_ppi->ppi_serials);
4290 858 : rel->ppilist = lappend(rel->ppilist, new_ppi);
4291 :
4292 858 : MemoryContextSwitchTo(oldcontext);
4293 : }
4294 1002 : bms_free(required_outer);
4295 :
4296 1002 : new_path->param_info = new_ppi;
4297 :
4298 : /*
4299 : * Adjust the path target if the parent of the outer relation is
4300 : * referenced in the targetlist. This can happen when only the parent of
4301 : * outer relation is laterally referenced in this relation.
4302 : */
4303 1002 : if (bms_overlap(path->parent->lateral_relids,
4304 1002 : child_rel->top_parent_relids))
4305 : {
4306 480 : new_path->pathtarget = copy_pathtarget(new_path->pathtarget);
4307 480 : ADJUST_CHILD_ATTRS(new_path->pathtarget->exprs);
4308 : }
4309 :
4310 1002 : return new_path;
4311 : }
4312 :
4313 : /*
4314 : * path_is_reparameterizable_by_child
4315 : * Given a path parameterized by the parent of the given child relation,
4316 : * see if it can be translated to be parameterized by the child relation.
4317 : *
4318 : * This must return true if and only if reparameterize_path_by_child()
4319 : * would succeed on this path. Currently it's sufficient to verify that
4320 : * the path and all of its subpaths (if any) are of the types handled by
4321 : * that function. However, subpaths that are not parameterized can be
4322 : * disregarded since they won't require translation.
4323 : */
4324 : bool
4325 36168 : path_is_reparameterizable_by_child(Path *path, RelOptInfo *child_rel)
4326 : {
4327 : #define REJECT_IF_PATH_NOT_REPARAMETERIZABLE(path) \
4328 : do { \
4329 : if (!path_is_reparameterizable_by_child(path, child_rel)) \
4330 : return false; \
4331 : } while(0)
4332 :
4333 : #define REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(pathlist) \
4334 : do { \
4335 : if (!pathlist_is_reparameterizable_by_child(pathlist, child_rel)) \
4336 : return false; \
4337 : } while(0)
4338 :
4339 : /*
4340 : * If the path is not parameterized by the parent of the given relation,
4341 : * it doesn't need reparameterization.
4342 : */
4343 36168 : if (!path->param_info ||
4344 35760 : !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4345 984 : return true;
4346 :
4347 : /*
4348 : * Check that the path type is one that reparameterize_path_by_child() can
4349 : * handle, and recursively check subpaths.
4350 : */
4351 35184 : switch (nodeTag(path))
4352 : {
4353 23784 : case T_Path:
4354 : case T_IndexPath:
4355 23784 : break;
4356 :
4357 48 : case T_BitmapHeapPath:
4358 : {
4359 48 : BitmapHeapPath *bhpath = (BitmapHeapPath *) path;
4360 :
4361 48 : REJECT_IF_PATH_NOT_REPARAMETERIZABLE(bhpath->bitmapqual);
4362 : }
4363 48 : break;
4364 :
4365 24 : case T_BitmapAndPath:
4366 : {
4367 24 : BitmapAndPath *bapath = (BitmapAndPath *) path;
4368 :
4369 24 : REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(bapath->bitmapquals);
4370 : }
4371 24 : break;
4372 :
4373 24 : case T_BitmapOrPath:
4374 : {
4375 24 : BitmapOrPath *bopath = (BitmapOrPath *) path;
4376 :
4377 24 : REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(bopath->bitmapquals);
4378 : }
4379 24 : break;
4380 :
4381 148 : case T_ForeignPath:
4382 : {
4383 148 : ForeignPath *fpath = (ForeignPath *) path;
4384 :
4385 148 : if (fpath->fdw_outerpath)
4386 0 : REJECT_IF_PATH_NOT_REPARAMETERIZABLE(fpath->fdw_outerpath);
4387 : }
4388 148 : break;
4389 :
4390 0 : case T_CustomPath:
4391 : {
4392 0 : CustomPath *cpath = (CustomPath *) path;
4393 :
4394 0 : REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(cpath->custom_paths);
4395 : }
4396 0 : break;
4397 :
4398 1248 : case T_NestPath:
4399 : case T_MergePath:
4400 : case T_HashPath:
4401 : {
4402 1248 : JoinPath *jpath = (JoinPath *) path;
4403 :
4404 1248 : REJECT_IF_PATH_NOT_REPARAMETERIZABLE(jpath->outerjoinpath);
4405 1248 : REJECT_IF_PATH_NOT_REPARAMETERIZABLE(jpath->innerjoinpath);
4406 : }
4407 1248 : break;
4408 :
4409 192 : case T_AppendPath:
4410 : {
4411 192 : AppendPath *apath = (AppendPath *) path;
4412 :
4413 192 : REJECT_IF_PATH_LIST_NOT_REPARAMETERIZABLE(apath->subpaths);
4414 : }
4415 192 : break;
4416 :
4417 0 : case T_MaterialPath:
4418 : {
4419 0 : MaterialPath *mpath = (MaterialPath *) path;
4420 :
4421 0 : REJECT_IF_PATH_NOT_REPARAMETERIZABLE(mpath->subpath);
4422 : }
4423 0 : break;
4424 :
4425 9716 : case T_MemoizePath:
4426 : {
4427 9716 : MemoizePath *mpath = (MemoizePath *) path;
4428 :
4429 9716 : REJECT_IF_PATH_NOT_REPARAMETERIZABLE(mpath->subpath);
4430 : }
4431 9716 : break;
4432 :
4433 0 : case T_GatherPath:
4434 : {
4435 0 : GatherPath *gpath = (GatherPath *) path;
4436 :
4437 0 : REJECT_IF_PATH_NOT_REPARAMETERIZABLE(gpath->subpath);
4438 : }
4439 0 : break;
4440 :
4441 0 : default:
4442 : /* We don't know how to reparameterize this path. */
4443 0 : return false;
4444 : }
4445 :
4446 35184 : return true;
4447 : }
4448 :
4449 : /*
4450 : * reparameterize_pathlist_by_child
4451 : * Helper function to reparameterize a list of paths by given child rel.
4452 : *
4453 : * Returns NIL to indicate failure, so pathlist had better not be NIL.
4454 : */
4455 : static List *
4456 72 : reparameterize_pathlist_by_child(PlannerInfo *root,
4457 : List *pathlist,
4458 : RelOptInfo *child_rel)
4459 : {
4460 : ListCell *lc;
4461 72 : List *result = NIL;
4462 :
4463 216 : foreach(lc, pathlist)
4464 : {
4465 144 : Path *path = reparameterize_path_by_child(root, lfirst(lc),
4466 : child_rel);
4467 :
4468 144 : if (path == NULL)
4469 : {
4470 0 : list_free(result);
4471 0 : return NIL;
4472 : }
4473 :
4474 144 : result = lappend(result, path);
4475 : }
4476 :
4477 72 : return result;
4478 : }
4479 :
4480 : /*
4481 : * pathlist_is_reparameterizable_by_child
4482 : * Helper function to check if a list of paths can be reparameterized.
4483 : */
4484 : static bool
4485 240 : pathlist_is_reparameterizable_by_child(List *pathlist, RelOptInfo *child_rel)
4486 : {
4487 : ListCell *lc;
4488 :
4489 720 : foreach(lc, pathlist)
4490 : {
4491 480 : Path *path = (Path *) lfirst(lc);
4492 :
4493 480 : if (!path_is_reparameterizable_by_child(path, child_rel))
4494 0 : return false;
4495 : }
4496 :
4497 240 : return true;
4498 : }
|