Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * pathnode.c
4 : * Routines to manipulate pathlists and create path nodes
5 : *
6 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/optimizer/util/pathnode.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : #include "postgres.h"
16 :
17 : #include <math.h>
18 :
19 : #include "foreign/fdwapi.h"
20 : #include "miscadmin.h"
21 : #include "nodes/extensible.h"
22 : #include "nodes/nodeFuncs.h"
23 : #include "optimizer/appendinfo.h"
24 : #include "optimizer/clauses.h"
25 : #include "optimizer/cost.h"
26 : #include "optimizer/optimizer.h"
27 : #include "optimizer/pathnode.h"
28 : #include "optimizer/paths.h"
29 : #include "optimizer/planmain.h"
30 : #include "optimizer/prep.h"
31 : #include "optimizer/restrictinfo.h"
32 : #include "optimizer/tlist.h"
33 : #include "parser/parsetree.h"
34 : #include "utils/lsyscache.h"
35 : #include "utils/memutils.h"
36 : #include "utils/selfuncs.h"
37 :
38 : typedef enum
39 : {
40 : COSTS_EQUAL, /* path costs are fuzzily equal */
41 : COSTS_BETTER1, /* first path is cheaper than second */
42 : COSTS_BETTER2, /* second path is cheaper than first */
43 : COSTS_DIFFERENT, /* neither path dominates the other on cost */
44 : } PathCostComparison;
45 :
46 : /*
47 : * STD_FUZZ_FACTOR is the normal fuzz factor for compare_path_costs_fuzzily.
48 : * XXX is it worth making this user-controllable? It provides a tradeoff
49 : * between planner runtime and the accuracy of path cost comparisons.
50 : */
51 : #define STD_FUZZ_FACTOR 1.01
52 :
53 : static List *translate_sub_tlist(List *tlist, int relid);
54 : static int append_total_cost_compare(const ListCell *a, const ListCell *b);
55 : static int append_startup_cost_compare(const ListCell *a, const ListCell *b);
56 : static List *reparameterize_pathlist_by_child(PlannerInfo *root,
57 : List *pathlist,
58 : RelOptInfo *child_rel);
59 :
60 :
61 : /*****************************************************************************
62 : * MISC. PATH UTILITIES
63 : *****************************************************************************/
64 :
65 : /*
66 : * compare_path_costs
67 : * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
68 : * or more expensive than path2 for the specified criterion.
69 : */
70 : int
71 641526 : compare_path_costs(Path *path1, Path *path2, CostSelector criterion)
72 : {
73 641526 : if (criterion == STARTUP_COST)
74 : {
75 328044 : if (path1->startup_cost < path2->startup_cost)
76 203480 : return -1;
77 124564 : if (path1->startup_cost > path2->startup_cost)
78 54392 : return +1;
79 :
80 : /*
81 : * If paths have the same startup cost (not at all unlikely), order
82 : * them by total cost.
83 : */
84 70172 : if (path1->total_cost < path2->total_cost)
85 29066 : return -1;
86 41106 : if (path1->total_cost > path2->total_cost)
87 3980 : return +1;
88 : }
89 : else
90 : {
91 313482 : if (path1->total_cost < path2->total_cost)
92 295684 : return -1;
93 17798 : if (path1->total_cost > path2->total_cost)
94 1422 : return +1;
95 :
96 : /*
97 : * If paths have the same total cost, order them by startup cost.
98 : */
99 16376 : if (path1->startup_cost < path2->startup_cost)
100 80 : return -1;
101 16296 : if (path1->startup_cost > path2->startup_cost)
102 24 : return +1;
103 : }
104 53398 : return 0;
105 : }
106 :
107 : /*
108 : * compare_fractional_path_costs
109 : * Return -1, 0, or +1 according as path1 is cheaper, the same cost,
110 : * or more expensive than path2 for fetching the specified fraction
111 : * of the total tuples.
112 : *
113 : * If fraction is <= 0 or > 1, we interpret it as 1, ie, we select the
114 : * path with the cheaper total_cost.
115 : */
116 : int
117 5842 : compare_fractional_path_costs(Path *path1, Path *path2,
118 : double fraction)
119 : {
120 : Cost cost1,
121 : cost2;
122 :
123 5842 : if (fraction <= 0.0 || fraction >= 1.0)
124 4738 : return compare_path_costs(path1, path2, TOTAL_COST);
125 1104 : cost1 = path1->startup_cost +
126 1104 : fraction * (path1->total_cost - path1->startup_cost);
127 1104 : cost2 = path2->startup_cost +
128 1104 : fraction * (path2->total_cost - path2->startup_cost);
129 1104 : if (cost1 < cost2)
130 654 : return -1;
131 450 : if (cost1 > cost2)
132 450 : return +1;
133 0 : return 0;
134 : }
135 :
136 : /*
137 : * compare_path_costs_fuzzily
138 : * Compare the costs of two paths to see if either can be said to
139 : * dominate the other.
140 : *
141 : * We use fuzzy comparisons so that add_path() can avoid keeping both of
142 : * a pair of paths that really have insignificantly different cost.
143 : *
144 : * The fuzz_factor argument must be 1.0 plus delta, where delta is the
145 : * fraction of the smaller cost that is considered to be a significant
146 : * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit
147 : * be 1% of the smaller cost.
148 : *
149 : * The two paths are said to have "equal" costs if both startup and total
150 : * costs are fuzzily the same. Path1 is said to be better than path2 if
151 : * it has fuzzily better startup cost and fuzzily no worse total cost,
152 : * or if it has fuzzily better total cost and fuzzily no worse startup cost.
153 : * Path2 is better than path1 if the reverse holds. Finally, if one path
154 : * is fuzzily better than the other on startup cost and fuzzily worse on
155 : * total cost, we just say that their costs are "different", since neither
156 : * dominates the other across the whole performance spectrum.
157 : *
158 : * This function also enforces a policy rule that paths for which the relevant
159 : * one of parent->consider_startup and parent->consider_param_startup is false
160 : * cannot survive comparisons solely on the grounds of good startup cost, so
161 : * we never return COSTS_DIFFERENT when that is true for the total-cost loser.
162 : * (But if total costs are fuzzily equal, we compare startup costs anyway,
163 : * in hopes of eliminating one path or the other.)
164 : */
165 : static PathCostComparison
166 2948282 : compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
167 : {
168 : #define CONSIDER_PATH_STARTUP_COST(p) \
169 : ((p)->param_info == NULL ? (p)->parent->consider_startup : (p)->parent->consider_param_startup)
170 :
171 : /*
172 : * Check total cost first since it's more likely to be different; many
173 : * paths have zero startup cost.
174 : */
175 2948282 : if (path1->total_cost > path2->total_cost * fuzz_factor)
176 : {
177 : /* path1 fuzzily worse on total cost */
178 1487562 : if (CONSIDER_PATH_STARTUP_COST(path1) &&
179 29552 : path2->startup_cost > path1->startup_cost * fuzz_factor)
180 : {
181 : /* ... but path2 fuzzily worse on startup, so DIFFERENT */
182 12568 : return COSTS_DIFFERENT;
183 : }
184 : /* else path2 dominates */
185 1474994 : return COSTS_BETTER2;
186 : }
187 1460720 : if (path2->total_cost > path1->total_cost * fuzz_factor)
188 : {
189 : /* path2 fuzzily worse on total cost */
190 755828 : if (CONSIDER_PATH_STARTUP_COST(path2) &&
191 14902 : path1->startup_cost > path2->startup_cost * fuzz_factor)
192 : {
193 : /* ... but path1 fuzzily worse on startup, so DIFFERENT */
194 8280 : return COSTS_DIFFERENT;
195 : }
196 : /* else path1 dominates */
197 747548 : return COSTS_BETTER1;
198 : }
199 : /* fuzzily the same on total cost ... */
200 704892 : if (path1->startup_cost > path2->startup_cost * fuzz_factor)
201 : {
202 : /* ... but path1 fuzzily worse on startup, so path2 wins */
203 272826 : return COSTS_BETTER2;
204 : }
205 432066 : if (path2->startup_cost > path1->startup_cost * fuzz_factor)
206 : {
207 : /* ... but path2 fuzzily worse on startup, so path1 wins */
208 42066 : return COSTS_BETTER1;
209 : }
210 : /* fuzzily the same on both costs */
211 390000 : return COSTS_EQUAL;
212 :
213 : #undef CONSIDER_PATH_STARTUP_COST
214 : }
215 :
216 : /*
217 : * set_cheapest
218 : * Find the minimum-cost paths from among a relation's paths,
219 : * and save them in the rel's cheapest-path fields.
220 : *
221 : * cheapest_total_path is normally the cheapest-total-cost unparameterized
222 : * path; but if there are no unparameterized paths, we assign it to be the
223 : * best (cheapest least-parameterized) parameterized path. However, only
224 : * unparameterized paths are considered candidates for cheapest_startup_path,
225 : * so that will be NULL if there are no unparameterized paths.
226 : *
227 : * The cheapest_parameterized_paths list collects all parameterized paths
228 : * that have survived the add_path() tournament for this relation. (Since
229 : * add_path ignores pathkeys for a parameterized path, these will be paths
230 : * that have best cost or best row count for their parameterization. We
231 : * may also have both a parallel-safe and a non-parallel-safe path in some
232 : * cases for the same parameterization in some cases, but this should be
233 : * relatively rare since, most typically, all paths for the same relation
234 : * will be parallel-safe or none of them will.)
235 : *
236 : * cheapest_parameterized_paths always includes the cheapest-total
237 : * unparameterized path, too, if there is one; the users of that list find
238 : * it more convenient if that's included.
239 : *
240 : * This is normally called only after we've finished constructing the path
241 : * list for the rel node.
242 : */
243 : void
244 1750640 : set_cheapest(RelOptInfo *parent_rel)
245 : {
246 : Path *cheapest_startup_path;
247 : Path *cheapest_total_path;
248 : Path *best_param_path;
249 : List *parameterized_paths;
250 : ListCell *p;
251 :
252 : Assert(IsA(parent_rel, RelOptInfo));
253 :
254 1750640 : if (parent_rel->pathlist == NIL)
255 0 : elog(ERROR, "could not devise a query plan for the given query");
256 :
257 1750640 : cheapest_startup_path = cheapest_total_path = best_param_path = NULL;
258 1750640 : parameterized_paths = NIL;
259 :
260 3850276 : foreach(p, parent_rel->pathlist)
261 : {
262 2099636 : Path *path = (Path *) lfirst(p);
263 : int cmp;
264 :
265 2099636 : if (path->param_info)
266 : {
267 : /* Parameterized path, so add it to parameterized_paths */
268 105884 : parameterized_paths = lappend(parameterized_paths, path);
269 :
270 : /*
271 : * If we have an unparameterized cheapest-total, we no longer care
272 : * about finding the best parameterized path, so move on.
273 : */
274 105884 : if (cheapest_total_path)
275 19440 : continue;
276 :
277 : /*
278 : * Otherwise, track the best parameterized path, which is the one
279 : * with least total cost among those of the minimum
280 : * parameterization.
281 : */
282 86444 : if (best_param_path == NULL)
283 80152 : best_param_path = path;
284 : else
285 : {
286 6292 : switch (bms_subset_compare(PATH_REQ_OUTER(path),
287 6292 : PATH_REQ_OUTER(best_param_path)))
288 : {
289 54 : case BMS_EQUAL:
290 : /* keep the cheaper one */
291 54 : if (compare_path_costs(path, best_param_path,
292 : TOTAL_COST) < 0)
293 0 : best_param_path = path;
294 54 : break;
295 340 : case BMS_SUBSET1:
296 : /* new path is less-parameterized */
297 340 : best_param_path = path;
298 340 : break;
299 4 : case BMS_SUBSET2:
300 : /* old path is less-parameterized, keep it */
301 4 : break;
302 5894 : case BMS_DIFFERENT:
303 :
304 : /*
305 : * This means that neither path has the least possible
306 : * parameterization for the rel. We'll sit on the old
307 : * path until something better comes along.
308 : */
309 5894 : break;
310 : }
311 86444 : }
312 : }
313 : else
314 : {
315 : /* Unparameterized path, so consider it for cheapest slots */
316 1993752 : if (cheapest_total_path == NULL)
317 : {
318 1739952 : cheapest_startup_path = cheapest_total_path = path;
319 1739952 : continue;
320 : }
321 :
322 : /*
323 : * If we find two paths of identical costs, try to keep the
324 : * better-sorted one. The paths might have unrelated sort
325 : * orderings, in which case we can only guess which might be
326 : * better to keep, but if one is superior then we definitely
327 : * should keep that one.
328 : */
329 253800 : cmp = compare_path_costs(cheapest_startup_path, path, STARTUP_COST);
330 253800 : if (cmp > 0 ||
331 218 : (cmp == 0 &&
332 218 : compare_pathkeys(cheapest_startup_path->pathkeys,
333 : path->pathkeys) == PATHKEYS_BETTER2))
334 43874 : cheapest_startup_path = path;
335 :
336 253800 : cmp = compare_path_costs(cheapest_total_path, path, TOTAL_COST);
337 253800 : if (cmp > 0 ||
338 0 : (cmp == 0 &&
339 0 : compare_pathkeys(cheapest_total_path->pathkeys,
340 : path->pathkeys) == PATHKEYS_BETTER2))
341 0 : cheapest_total_path = path;
342 : }
343 : }
344 :
345 : /* Add cheapest unparameterized path, if any, to parameterized_paths */
346 1750640 : if (cheapest_total_path)
347 1739952 : parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
348 :
349 : /*
350 : * If there is no unparameterized path, use the best parameterized path as
351 : * cheapest_total_path (but not as cheapest_startup_path).
352 : */
353 1750640 : if (cheapest_total_path == NULL)
354 10688 : cheapest_total_path = best_param_path;
355 : Assert(cheapest_total_path != NULL);
356 :
357 1750640 : parent_rel->cheapest_startup_path = cheapest_startup_path;
358 1750640 : parent_rel->cheapest_total_path = cheapest_total_path;
359 1750640 : parent_rel->cheapest_unique_path = NULL; /* computed only if needed */
360 1750640 : parent_rel->cheapest_parameterized_paths = parameterized_paths;
361 1750640 : }
362 :
363 : /*
364 : * add_path
365 : * Consider a potential implementation path for the specified parent rel,
366 : * and add it to the rel's pathlist if it is worthy of consideration.
367 : * A path is worthy if it has a better sort order (better pathkeys) or
368 : * cheaper cost (on either dimension), or generates fewer rows, than any
369 : * existing path that has the same or superset parameterization rels.
370 : * We also consider parallel-safe paths more worthy than others.
371 : *
372 : * We also remove from the rel's pathlist any old paths that are dominated
373 : * by new_path --- that is, new_path is cheaper, at least as well ordered,
374 : * generates no more rows, requires no outer rels not required by the old
375 : * path, and is no less parallel-safe.
376 : *
377 : * In most cases, a path with a superset parameterization will generate
378 : * fewer rows (since it has more join clauses to apply), so that those two
379 : * figures of merit move in opposite directions; this means that a path of
380 : * one parameterization can seldom dominate a path of another. But such
381 : * cases do arise, so we make the full set of checks anyway.
382 : *
383 : * There are two policy decisions embedded in this function, along with
384 : * its sibling add_path_precheck. First, we treat all parameterized paths
385 : * as having NIL pathkeys, so that they cannot win comparisons on the
386 : * basis of sort order. This is to reduce the number of parameterized
387 : * paths that are kept; see discussion in src/backend/optimizer/README.
388 : *
389 : * Second, we only consider cheap startup cost to be interesting if
390 : * parent_rel->consider_startup is true for an unparameterized path, or
391 : * parent_rel->consider_param_startup is true for a parameterized one.
392 : * Again, this allows discarding useless paths sooner.
393 : *
394 : * The pathlist is kept sorted by total_cost, with cheaper paths
395 : * at the front. Within this routine, that's simply a speed hack:
396 : * doing it that way makes it more likely that we will reject an inferior
397 : * path after a few comparisons, rather than many comparisons.
398 : * However, add_path_precheck relies on this ordering to exit early
399 : * when possible.
400 : *
401 : * NOTE: discarded Path objects are immediately pfree'd to reduce planner
402 : * memory consumption. We dare not try to free the substructure of a Path,
403 : * since much of it may be shared with other Paths or the query tree itself;
404 : * but just recycling discarded Path nodes is a very useful savings in
405 : * a large join tree. We can recycle the List nodes of pathlist, too.
406 : *
407 : * As noted in optimizer/README, deleting a previously-accepted Path is
408 : * safe because we know that Paths of this rel cannot yet be referenced
409 : * from any other rel, such as a higher-level join. However, in some cases
410 : * it is possible that a Path is referenced by another Path for its own
411 : * rel; we must not delete such a Path, even if it is dominated by the new
412 : * Path. Currently this occurs only for IndexPath objects, which may be
413 : * referenced as children of BitmapHeapPaths as well as being paths in
414 : * their own right. Hence, we don't pfree IndexPaths when rejecting them.
415 : *
416 : * 'parent_rel' is the relation entry to which the path corresponds.
417 : * 'new_path' is a potential path for parent_rel.
418 : *
419 : * Returns nothing, but modifies parent_rel->pathlist.
420 : */
421 : void
422 3303528 : add_path(RelOptInfo *parent_rel, Path *new_path)
423 : {
424 3303528 : bool accept_new = true; /* unless we find a superior old path */
425 3303528 : int insert_at = 0; /* where to insert new item */
426 : List *new_path_pathkeys;
427 : ListCell *p1;
428 :
429 : /*
430 : * This is a convenient place to check for query cancel --- no part of the
431 : * planner goes very long without calling add_path().
432 : */
433 3303528 : CHECK_FOR_INTERRUPTS();
434 :
435 : /* Pretend parameterized paths have no pathkeys, per comment above */
436 3303528 : new_path_pathkeys = new_path->param_info ? NIL : new_path->pathkeys;
437 :
438 : /*
439 : * Loop to check proposed new path against old paths. Note it is possible
440 : * for more than one old path to be tossed out because new_path dominates
441 : * it.
442 : */
443 4829478 : foreach(p1, parent_rel->pathlist)
444 : {
445 2695778 : Path *old_path = (Path *) lfirst(p1);
446 2695778 : bool remove_old = false; /* unless new proves superior */
447 : PathCostComparison costcmp;
448 : PathKeysComparison keyscmp;
449 : BMS_Comparison outercmp;
450 :
451 : /*
452 : * Do a fuzzy cost comparison with standard fuzziness limit.
453 : */
454 2695778 : costcmp = compare_path_costs_fuzzily(new_path, old_path,
455 : STD_FUZZ_FACTOR);
456 :
457 : /*
458 : * If the two paths compare differently for startup and total cost,
459 : * then we want to keep both, and we can skip comparing pathkeys and
460 : * required_outer rels. If they compare the same, proceed with the
461 : * other comparisons. Row count is checked last. (We make the tests
462 : * in this order because the cost comparison is most likely to turn
463 : * out "different", and the pathkeys comparison next most likely. As
464 : * explained above, row count very seldom makes a difference, so even
465 : * though it's cheap to compare there's not much point in checking it
466 : * earlier.)
467 : */
468 2695778 : if (costcmp != COSTS_DIFFERENT)
469 : {
470 : /* Similarly check to see if either dominates on pathkeys */
471 : List *old_path_pathkeys;
472 :
473 2674930 : old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
474 2674930 : keyscmp = compare_pathkeys(new_path_pathkeys,
475 : old_path_pathkeys);
476 2674930 : if (keyscmp != PATHKEYS_DIFFERENT)
477 : {
478 2567004 : switch (costcmp)
479 : {
480 271112 : case COSTS_EQUAL:
481 271112 : outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
482 271112 : PATH_REQ_OUTER(old_path));
483 271112 : if (keyscmp == PATHKEYS_BETTER1)
484 : {
485 2062 : if ((outercmp == BMS_EQUAL ||
486 2062 : outercmp == BMS_SUBSET1) &&
487 2062 : new_path->rows <= old_path->rows &&
488 2054 : new_path->parallel_safe >= old_path->parallel_safe)
489 2054 : remove_old = true; /* new dominates old */
490 : }
491 269050 : else if (keyscmp == PATHKEYS_BETTER2)
492 : {
493 12746 : if ((outercmp == BMS_EQUAL ||
494 12746 : outercmp == BMS_SUBSET2) &&
495 12746 : new_path->rows >= old_path->rows &&
496 12626 : new_path->parallel_safe <= old_path->parallel_safe)
497 12626 : accept_new = false; /* old dominates new */
498 : }
499 : else /* keyscmp == PATHKEYS_EQUAL */
500 : {
501 256304 : if (outercmp == BMS_EQUAL)
502 : {
503 : /*
504 : * Same pathkeys and outer rels, and fuzzily
505 : * the same cost, so keep just one; to decide
506 : * which, first check parallel-safety, then
507 : * rows, then do a fuzzy cost comparison with
508 : * very small fuzz limit. (We used to do an
509 : * exact cost comparison, but that results in
510 : * annoying platform-specific plan variations
511 : * due to roundoff in the cost estimates.) If
512 : * things are still tied, arbitrarily keep
513 : * only the old path. Notice that we will
514 : * keep only the old path even if the
515 : * less-fuzzy comparison decides the startup
516 : * and total costs compare differently.
517 : */
518 252724 : if (new_path->parallel_safe >
519 252724 : old_path->parallel_safe)
520 48 : remove_old = true; /* new dominates old */
521 252676 : else if (new_path->parallel_safe <
522 252676 : old_path->parallel_safe)
523 136 : accept_new = false; /* old dominates new */
524 252540 : else if (new_path->rows < old_path->rows)
525 24 : remove_old = true; /* new dominates old */
526 252516 : else if (new_path->rows > old_path->rows)
527 12 : accept_new = false; /* old dominates new */
528 252504 : else if (compare_path_costs_fuzzily(new_path,
529 : old_path,
530 : 1.0000000001) == COSTS_BETTER1)
531 10478 : remove_old = true; /* new dominates old */
532 : else
533 242026 : accept_new = false; /* old equals or
534 : * dominates new */
535 : }
536 3580 : else if (outercmp == BMS_SUBSET1 &&
537 748 : new_path->rows <= old_path->rows &&
538 720 : new_path->parallel_safe >= old_path->parallel_safe)
539 720 : remove_old = true; /* new dominates old */
540 2860 : else if (outercmp == BMS_SUBSET2 &&
541 2450 : new_path->rows >= old_path->rows &&
542 2430 : new_path->parallel_safe <= old_path->parallel_safe)
543 2430 : accept_new = false; /* old dominates new */
544 : /* else different parameterizations, keep both */
545 : }
546 271112 : break;
547 762966 : case COSTS_BETTER1:
548 762966 : if (keyscmp != PATHKEYS_BETTER2)
549 : {
550 541712 : outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
551 541712 : PATH_REQ_OUTER(old_path));
552 541712 : if ((outercmp == BMS_EQUAL ||
553 460216 : outercmp == BMS_SUBSET1) &&
554 460216 : new_path->rows <= old_path->rows &&
555 456982 : new_path->parallel_safe >= old_path->parallel_safe)
556 454424 : remove_old = true; /* new dominates old */
557 : }
558 762966 : break;
559 1532926 : case COSTS_BETTER2:
560 1532926 : if (keyscmp != PATHKEYS_BETTER1)
561 : {
562 1038572 : outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
563 1038572 : PATH_REQ_OUTER(old_path));
564 1038572 : if ((outercmp == BMS_EQUAL ||
565 961950 : outercmp == BMS_SUBSET2) &&
566 961950 : new_path->rows >= old_path->rows &&
567 914212 : new_path->parallel_safe <= old_path->parallel_safe)
568 912598 : accept_new = false; /* old dominates new */
569 : }
570 1532926 : break;
571 0 : case COSTS_DIFFERENT:
572 :
573 : /*
574 : * can't get here, but keep this case to keep compiler
575 : * quiet
576 : */
577 0 : break;
578 : }
579 128774 : }
580 : }
581 :
582 : /*
583 : * Remove current element from pathlist if dominated by new.
584 : */
585 2695778 : if (remove_old)
586 : {
587 467748 : parent_rel->pathlist = foreach_delete_current(parent_rel->pathlist,
588 : p1);
589 :
590 : /*
591 : * Delete the data pointed-to by the deleted cell, if possible
592 : */
593 467748 : if (!IsA(old_path, IndexPath))
594 453218 : pfree(old_path);
595 : }
596 : else
597 : {
598 : /* new belongs after this old path if it has cost >= old's */
599 2228030 : if (new_path->total_cost >= old_path->total_cost)
600 1858830 : insert_at = foreach_current_index(p1) + 1;
601 : }
602 :
603 : /*
604 : * If we found an old path that dominates new_path, we can quit
605 : * scanning the pathlist; we will not add new_path, and we assume
606 : * new_path cannot dominate any other elements of the pathlist.
607 : */
608 2695778 : if (!accept_new)
609 1169828 : break;
610 : }
611 :
612 3303528 : if (accept_new)
613 : {
614 : /* Accept the new path: insert it at proper place in pathlist */
615 2133700 : parent_rel->pathlist =
616 2133700 : list_insert_nth(parent_rel->pathlist, insert_at, new_path);
617 : }
618 : else
619 : {
620 : /* Reject and recycle the new path */
621 1169828 : if (!IsA(new_path, IndexPath))
622 1099832 : pfree(new_path);
623 : }
624 3303528 : }
625 :
626 : /*
627 : * add_path_precheck
628 : * Check whether a proposed new path could possibly get accepted.
629 : * We assume we know the path's pathkeys and parameterization accurately,
630 : * and have lower bounds for its costs.
631 : *
632 : * Note that we do not know the path's rowcount, since getting an estimate for
633 : * that is too expensive to do before prechecking. We assume here that paths
634 : * of a superset parameterization will generate fewer rows; if that holds,
635 : * then paths with different parameterizations cannot dominate each other
636 : * and so we can simply ignore existing paths of another parameterization.
637 : * (In the infrequent cases where that rule of thumb fails, add_path will
638 : * get rid of the inferior path.)
639 : *
640 : * At the time this is called, we haven't actually built a Path structure,
641 : * so the required information has to be passed piecemeal.
642 : */
643 : bool
644 3403666 : add_path_precheck(RelOptInfo *parent_rel,
645 : Cost startup_cost, Cost total_cost,
646 : List *pathkeys, Relids required_outer)
647 : {
648 : List *new_path_pathkeys;
649 : bool consider_startup;
650 : ListCell *p1;
651 :
652 : /* Pretend parameterized paths have no pathkeys, per add_path policy */
653 3403666 : new_path_pathkeys = required_outer ? NIL : pathkeys;
654 :
655 : /* Decide whether new path's startup cost is interesting */
656 3403666 : consider_startup = required_outer ? parent_rel->consider_param_startup : parent_rel->consider_startup;
657 :
658 4278534 : foreach(p1, parent_rel->pathlist)
659 : {
660 4056832 : Path *old_path = (Path *) lfirst(p1);
661 : PathKeysComparison keyscmp;
662 :
663 : /*
664 : * We are looking for an old_path with the same parameterization (and
665 : * by assumption the same rowcount) that dominates the new path on
666 : * pathkeys as well as both cost metrics. If we find one, we can
667 : * reject the new path.
668 : *
669 : * Cost comparisons here should match compare_path_costs_fuzzily.
670 : */
671 4056832 : if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR)
672 : {
673 : /* new path can win on startup cost only if consider_startup */
674 2857466 : if (startup_cost > old_path->startup_cost * STD_FUZZ_FACTOR ||
675 1256762 : !consider_startup)
676 : {
677 : /* new path loses on cost, so check pathkeys... */
678 : List *old_path_pathkeys;
679 :
680 2847012 : old_path_pathkeys = old_path->param_info ? NIL : old_path->pathkeys;
681 2847012 : keyscmp = compare_pathkeys(new_path_pathkeys,
682 : old_path_pathkeys);
683 2847012 : if (keyscmp == PATHKEYS_EQUAL ||
684 : keyscmp == PATHKEYS_BETTER2)
685 : {
686 : /* new path does not win on pathkeys... */
687 2052014 : if (bms_equal(required_outer, PATH_REQ_OUTER(old_path)))
688 : {
689 : /* Found an old path that dominates the new one */
690 1982598 : return false;
691 : }
692 : }
693 : }
694 : }
695 : else
696 : {
697 : /*
698 : * Since the pathlist is sorted by total_cost, we can stop looking
699 : * once we reach a path with a total_cost larger than the new
700 : * path's.
701 : */
702 1199366 : break;
703 : }
704 : }
705 :
706 1421068 : return true;
707 : }
708 :
709 : /*
710 : * add_partial_path
711 : * Like add_path, our goal here is to consider whether a path is worthy
712 : * of being kept around, but the considerations here are a bit different.
713 : * A partial path is one which can be executed in any number of workers in
714 : * parallel such that each worker will generate a subset of the path's
715 : * overall result.
716 : *
717 : * As in add_path, the partial_pathlist is kept sorted with the cheapest
718 : * total path in front. This is depended on by multiple places, which
719 : * just take the front entry as the cheapest path without searching.
720 : *
721 : * We don't generate parameterized partial paths for several reasons. Most
722 : * importantly, they're not safe to execute, because there's nothing to
723 : * make sure that a parallel scan within the parameterized portion of the
724 : * plan is running with the same value in every worker at the same time.
725 : * Fortunately, it seems unlikely to be worthwhile anyway, because having
726 : * each worker scan the entire outer relation and a subset of the inner
727 : * relation will generally be a terrible plan. The inner (parameterized)
728 : * side of the plan will be small anyway. There could be rare cases where
729 : * this wins big - e.g. if join order constraints put a 1-row relation on
730 : * the outer side of the topmost join with a parameterized plan on the inner
731 : * side - but we'll have to be content not to handle such cases until
732 : * somebody builds an executor infrastructure that can cope with them.
733 : *
734 : * Because we don't consider parameterized paths here, we also don't
735 : * need to consider the row counts as a measure of quality: every path will
736 : * produce the same number of rows. Neither do we need to consider startup
737 : * costs: parallelism is only used for plans that will be run to completion.
738 : * Therefore, this routine is much simpler than add_path: it needs to
739 : * consider only pathkeys and total cost.
740 : *
741 : * As with add_path, we pfree paths that are found to be dominated by
742 : * another partial path; this requires that there be no other references to
743 : * such paths yet. Hence, GatherPaths must not be created for a rel until
744 : * we're done creating all partial paths for it. Unlike add_path, we don't
745 : * take an exception for IndexPaths as partial index paths won't be
746 : * referenced by partial BitmapHeapPaths.
747 : */
748 : void
749 90286 : add_partial_path(RelOptInfo *parent_rel, Path *new_path)
750 : {
751 90286 : bool accept_new = true; /* unless we find a superior old path */
752 90286 : int insert_at = 0; /* where to insert new item */
753 : ListCell *p1;
754 :
755 : /* Check for query cancel. */
756 90286 : CHECK_FOR_INTERRUPTS();
757 :
758 : /* Path to be added must be parallel safe. */
759 : Assert(new_path->parallel_safe);
760 :
761 : /* Relation should be OK for parallelism, too. */
762 : Assert(parent_rel->consider_parallel);
763 :
764 : /*
765 : * As in add_path, throw out any paths which are dominated by the new
766 : * path, but throw out the new path if some existing path dominates it.
767 : */
768 122068 : foreach(p1, parent_rel->partial_pathlist)
769 : {
770 46328 : Path *old_path = (Path *) lfirst(p1);
771 46328 : bool remove_old = false; /* unless new proves superior */
772 : PathKeysComparison keyscmp;
773 :
774 : /* Compare pathkeys. */
775 46328 : keyscmp = compare_pathkeys(new_path->pathkeys, old_path->pathkeys);
776 :
777 : /* Unless pathkeys are incompatible, keep just one of the two paths. */
778 46328 : if (keyscmp != PATHKEYS_DIFFERENT)
779 : {
780 46130 : if (new_path->total_cost > old_path->total_cost * STD_FUZZ_FACTOR)
781 : {
782 : /* New path costs more; keep it only if pathkeys are better. */
783 14818 : if (keyscmp != PATHKEYS_BETTER1)
784 6482 : accept_new = false;
785 : }
786 31312 : else if (old_path->total_cost > new_path->total_cost
787 31312 : * STD_FUZZ_FACTOR)
788 : {
789 : /* Old path costs more; keep it only if pathkeys are better. */
790 22904 : if (keyscmp != PATHKEYS_BETTER2)
791 11802 : remove_old = true;
792 : }
793 8408 : else if (keyscmp == PATHKEYS_BETTER1)
794 : {
795 : /* Costs are about the same, new path has better pathkeys. */
796 0 : remove_old = true;
797 : }
798 8408 : else if (keyscmp == PATHKEYS_BETTER2)
799 : {
800 : /* Costs are about the same, old path has better pathkeys. */
801 1662 : accept_new = false;
802 : }
803 6746 : else if (old_path->total_cost > new_path->total_cost * 1.0000000001)
804 : {
805 : /* Pathkeys are the same, and the old path costs more. */
806 344 : remove_old = true;
807 : }
808 : else
809 : {
810 : /*
811 : * Pathkeys are the same, and new path isn't materially
812 : * cheaper.
813 : */
814 6402 : accept_new = false;
815 : }
816 : }
817 :
818 : /*
819 : * Remove current element from partial_pathlist if dominated by new.
820 : */
821 46328 : if (remove_old)
822 : {
823 12146 : parent_rel->partial_pathlist =
824 12146 : foreach_delete_current(parent_rel->partial_pathlist, p1);
825 12146 : pfree(old_path);
826 : }
827 : else
828 : {
829 : /* new belongs after this old path if it has cost >= old's */
830 34182 : if (new_path->total_cost >= old_path->total_cost)
831 22654 : insert_at = foreach_current_index(p1) + 1;
832 : }
833 :
834 : /*
835 : * If we found an old path that dominates new_path, we can quit
836 : * scanning the partial_pathlist; we will not add new_path, and we
837 : * assume new_path cannot dominate any later path.
838 : */
839 46328 : if (!accept_new)
840 14546 : break;
841 : }
842 :
843 90286 : if (accept_new)
844 : {
845 : /* Accept the new path: insert it at proper place */
846 75740 : parent_rel->partial_pathlist =
847 75740 : list_insert_nth(parent_rel->partial_pathlist, insert_at, new_path);
848 : }
849 : else
850 : {
851 : /* Reject and recycle the new path */
852 14546 : pfree(new_path);
853 : }
854 90286 : }
855 :
856 : /*
857 : * add_partial_path_precheck
858 : * Check whether a proposed new partial path could possibly get accepted.
859 : *
860 : * Unlike add_path_precheck, we can ignore startup cost and parameterization,
861 : * since they don't matter for partial paths (see add_partial_path). But
862 : * we do want to make sure we don't add a partial path if there's already
863 : * a complete path that dominates it, since in that case the proposed path
864 : * is surely a loser.
865 : */
866 : bool
867 62602 : add_partial_path_precheck(RelOptInfo *parent_rel, Cost total_cost,
868 : List *pathkeys)
869 : {
870 : ListCell *p1;
871 :
872 : /*
873 : * Our goal here is twofold. First, we want to find out whether this path
874 : * is clearly inferior to some existing partial path. If so, we want to
875 : * reject it immediately. Second, we want to find out whether this path
876 : * is clearly superior to some existing partial path -- at least, modulo
877 : * final cost computations. If so, we definitely want to consider it.
878 : *
879 : * Unlike add_path(), we always compare pathkeys here. This is because we
880 : * expect partial_pathlist to be very short, and getting a definitive
881 : * answer at this stage avoids the need to call add_path_precheck.
882 : */
883 87700 : foreach(p1, parent_rel->partial_pathlist)
884 : {
885 70238 : Path *old_path = (Path *) lfirst(p1);
886 : PathKeysComparison keyscmp;
887 :
888 70238 : keyscmp = compare_pathkeys(pathkeys, old_path->pathkeys);
889 70238 : if (keyscmp != PATHKEYS_DIFFERENT)
890 : {
891 70046 : if (total_cost > old_path->total_cost * STD_FUZZ_FACTOR &&
892 : keyscmp != PATHKEYS_BETTER1)
893 45140 : return false;
894 35190 : if (old_path->total_cost > total_cost * STD_FUZZ_FACTOR &&
895 : keyscmp != PATHKEYS_BETTER2)
896 10284 : return true;
897 : }
898 : }
899 :
900 : /*
901 : * This path is neither clearly inferior to an existing partial path nor
902 : * clearly good enough that it might replace one. Compare it to
903 : * non-parallel plans. If it loses even before accounting for the cost of
904 : * the Gather node, we should definitely reject it.
905 : *
906 : * Note that we pass the total_cost to add_path_precheck twice. This is
907 : * because it's never advantageous to consider the startup cost of a
908 : * partial path; the resulting plans, if run in parallel, will be run to
909 : * completion.
910 : */
911 17462 : if (!add_path_precheck(parent_rel, total_cost, total_cost, pathkeys,
912 : NULL))
913 788 : return false;
914 :
915 16674 : return true;
916 : }
917 :
918 :
919 : /*****************************************************************************
920 : * PATH NODE CREATION ROUTINES
921 : *****************************************************************************/
922 :
923 : /*
924 : * create_seqscan_path
925 : * Creates a path corresponding to a sequential scan, returning the
926 : * pathnode.
927 : */
928 : Path *
929 352688 : create_seqscan_path(PlannerInfo *root, RelOptInfo *rel,
930 : Relids required_outer, int parallel_workers)
931 : {
932 352688 : Path *pathnode = makeNode(Path);
933 :
934 352688 : pathnode->pathtype = T_SeqScan;
935 352688 : pathnode->parent = rel;
936 352688 : pathnode->pathtarget = rel->reltarget;
937 352688 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
938 : required_outer);
939 352688 : pathnode->parallel_aware = (parallel_workers > 0);
940 352688 : pathnode->parallel_safe = rel->consider_parallel;
941 352688 : pathnode->parallel_workers = parallel_workers;
942 352688 : pathnode->pathkeys = NIL; /* seqscan has unordered result */
943 :
944 352688 : cost_seqscan(pathnode, root, rel, pathnode->param_info);
945 :
946 352688 : return pathnode;
947 : }
948 :
949 : /*
950 : * create_samplescan_path
951 : * Creates a path node for a sampled table scan.
952 : */
953 : Path *
954 252 : create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
955 : {
956 252 : Path *pathnode = makeNode(Path);
957 :
958 252 : pathnode->pathtype = T_SampleScan;
959 252 : pathnode->parent = rel;
960 252 : pathnode->pathtarget = rel->reltarget;
961 252 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
962 : required_outer);
963 252 : pathnode->parallel_aware = false;
964 252 : pathnode->parallel_safe = rel->consider_parallel;
965 252 : pathnode->parallel_workers = 0;
966 252 : pathnode->pathkeys = NIL; /* samplescan has unordered result */
967 :
968 252 : cost_samplescan(pathnode, root, rel, pathnode->param_info);
969 :
970 252 : return pathnode;
971 : }
972 :
973 : /*
974 : * create_index_path
975 : * Creates a path node for an index scan.
976 : *
977 : * 'index' is a usable index.
978 : * 'indexclauses' is a list of IndexClause nodes representing clauses
979 : * to be enforced as qual conditions in the scan.
980 : * 'indexorderbys' is a list of bare expressions (no RestrictInfos)
981 : * to be used as index ordering operators in the scan.
982 : * 'indexorderbycols' is an integer list of index column numbers (zero based)
983 : * the ordering operators can be used with.
984 : * 'pathkeys' describes the ordering of the path.
985 : * 'indexscandir' is either ForwardScanDirection or BackwardScanDirection.
986 : * 'indexonly' is true if an index-only scan is wanted.
987 : * 'required_outer' is the set of outer relids for a parameterized path.
988 : * 'loop_count' is the number of repetitions of the indexscan to factor into
989 : * estimates of caching behavior.
990 : * 'partial_path' is true if constructing a parallel index scan path.
991 : *
992 : * Returns the new path node.
993 : */
994 : IndexPath *
995 592352 : create_index_path(PlannerInfo *root,
996 : IndexOptInfo *index,
997 : List *indexclauses,
998 : List *indexorderbys,
999 : List *indexorderbycols,
1000 : List *pathkeys,
1001 : ScanDirection indexscandir,
1002 : bool indexonly,
1003 : Relids required_outer,
1004 : double loop_count,
1005 : bool partial_path)
1006 : {
1007 592352 : IndexPath *pathnode = makeNode(IndexPath);
1008 592352 : RelOptInfo *rel = index->rel;
1009 :
1010 592352 : pathnode->path.pathtype = indexonly ? T_IndexOnlyScan : T_IndexScan;
1011 592352 : pathnode->path.parent = rel;
1012 592352 : pathnode->path.pathtarget = rel->reltarget;
1013 592352 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1014 : required_outer);
1015 592352 : pathnode->path.parallel_aware = false;
1016 592352 : pathnode->path.parallel_safe = rel->consider_parallel;
1017 592352 : pathnode->path.parallel_workers = 0;
1018 592352 : pathnode->path.pathkeys = pathkeys;
1019 :
1020 592352 : pathnode->indexinfo = index;
1021 592352 : pathnode->indexclauses = indexclauses;
1022 592352 : pathnode->indexorderbys = indexorderbys;
1023 592352 : pathnode->indexorderbycols = indexorderbycols;
1024 592352 : pathnode->indexscandir = indexscandir;
1025 :
1026 592352 : cost_index(pathnode, root, loop_count, partial_path);
1027 :
1028 592352 : return pathnode;
1029 : }
1030 :
1031 : /*
1032 : * create_bitmap_heap_path
1033 : * Creates a path node for a bitmap scan.
1034 : *
1035 : * 'bitmapqual' is a tree of IndexPath, BitmapAndPath, and BitmapOrPath nodes.
1036 : * 'required_outer' is the set of outer relids for a parameterized path.
1037 : * 'loop_count' is the number of repetitions of the indexscan to factor into
1038 : * estimates of caching behavior.
1039 : *
1040 : * loop_count should match the value used when creating the component
1041 : * IndexPaths.
1042 : */
1043 : BitmapHeapPath *
1044 274728 : create_bitmap_heap_path(PlannerInfo *root,
1045 : RelOptInfo *rel,
1046 : Path *bitmapqual,
1047 : Relids required_outer,
1048 : double loop_count,
1049 : int parallel_degree)
1050 : {
1051 274728 : BitmapHeapPath *pathnode = makeNode(BitmapHeapPath);
1052 :
1053 274728 : pathnode->path.pathtype = T_BitmapHeapScan;
1054 274728 : pathnode->path.parent = rel;
1055 274728 : pathnode->path.pathtarget = rel->reltarget;
1056 274728 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1057 : required_outer);
1058 274728 : pathnode->path.parallel_aware = (parallel_degree > 0);
1059 274728 : pathnode->path.parallel_safe = rel->consider_parallel;
1060 274728 : pathnode->path.parallel_workers = parallel_degree;
1061 274728 : pathnode->path.pathkeys = NIL; /* always unordered */
1062 :
1063 274728 : pathnode->bitmapqual = bitmapqual;
1064 :
1065 274728 : cost_bitmap_heap_scan(&pathnode->path, root, rel,
1066 : pathnode->path.param_info,
1067 : bitmapqual, loop_count);
1068 :
1069 274728 : return pathnode;
1070 : }
1071 :
1072 : /*
1073 : * create_bitmap_and_path
1074 : * Creates a path node representing a BitmapAnd.
1075 : */
1076 : BitmapAndPath *
1077 31706 : create_bitmap_and_path(PlannerInfo *root,
1078 : RelOptInfo *rel,
1079 : List *bitmapquals)
1080 : {
1081 31706 : BitmapAndPath *pathnode = makeNode(BitmapAndPath);
1082 31706 : Relids required_outer = NULL;
1083 : ListCell *lc;
1084 :
1085 31706 : pathnode->path.pathtype = T_BitmapAnd;
1086 31706 : pathnode->path.parent = rel;
1087 31706 : pathnode->path.pathtarget = rel->reltarget;
1088 :
1089 : /*
1090 : * Identify the required outer rels as the union of what the child paths
1091 : * depend on. (Alternatively, we could insist that the caller pass this
1092 : * in, but it's more convenient and reliable to compute it here.)
1093 : */
1094 95118 : foreach(lc, bitmapquals)
1095 : {
1096 63412 : Path *bitmapqual = (Path *) lfirst(lc);
1097 :
1098 63412 : required_outer = bms_add_members(required_outer,
1099 63412 : PATH_REQ_OUTER(bitmapqual));
1100 : }
1101 31706 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1102 : required_outer);
1103 :
1104 : /*
1105 : * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1106 : * parallel-safe if and only if rel->consider_parallel is set. So, we can
1107 : * set the flag for this path based only on the relation-level flag,
1108 : * without actually iterating over the list of children.
1109 : */
1110 31706 : pathnode->path.parallel_aware = false;
1111 31706 : pathnode->path.parallel_safe = rel->consider_parallel;
1112 31706 : pathnode->path.parallel_workers = 0;
1113 :
1114 31706 : pathnode->path.pathkeys = NIL; /* always unordered */
1115 :
1116 31706 : pathnode->bitmapquals = bitmapquals;
1117 :
1118 : /* this sets bitmapselectivity as well as the regular cost fields: */
1119 31706 : cost_bitmap_and_node(pathnode, root);
1120 :
1121 31706 : return pathnode;
1122 : }
1123 :
1124 : /*
1125 : * create_bitmap_or_path
1126 : * Creates a path node representing a BitmapOr.
1127 : */
1128 : BitmapOrPath *
1129 864 : create_bitmap_or_path(PlannerInfo *root,
1130 : RelOptInfo *rel,
1131 : List *bitmapquals)
1132 : {
1133 864 : BitmapOrPath *pathnode = makeNode(BitmapOrPath);
1134 864 : Relids required_outer = NULL;
1135 : ListCell *lc;
1136 :
1137 864 : pathnode->path.pathtype = T_BitmapOr;
1138 864 : pathnode->path.parent = rel;
1139 864 : pathnode->path.pathtarget = rel->reltarget;
1140 :
1141 : /*
1142 : * Identify the required outer rels as the union of what the child paths
1143 : * depend on. (Alternatively, we could insist that the caller pass this
1144 : * in, but it's more convenient and reliable to compute it here.)
1145 : */
1146 2652 : foreach(lc, bitmapquals)
1147 : {
1148 1788 : Path *bitmapqual = (Path *) lfirst(lc);
1149 :
1150 1788 : required_outer = bms_add_members(required_outer,
1151 1788 : PATH_REQ_OUTER(bitmapqual));
1152 : }
1153 864 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1154 : required_outer);
1155 :
1156 : /*
1157 : * Currently, a BitmapHeapPath, BitmapAndPath, or BitmapOrPath will be
1158 : * parallel-safe if and only if rel->consider_parallel is set. So, we can
1159 : * set the flag for this path based only on the relation-level flag,
1160 : * without actually iterating over the list of children.
1161 : */
1162 864 : pathnode->path.parallel_aware = false;
1163 864 : pathnode->path.parallel_safe = rel->consider_parallel;
1164 864 : pathnode->path.parallel_workers = 0;
1165 :
1166 864 : pathnode->path.pathkeys = NIL; /* always unordered */
1167 :
1168 864 : pathnode->bitmapquals = bitmapquals;
1169 :
1170 : /* this sets bitmapselectivity as well as the regular cost fields: */
1171 864 : cost_bitmap_or_node(pathnode, root);
1172 :
1173 864 : return pathnode;
1174 : }
1175 :
1176 : /*
1177 : * create_tidscan_path
1178 : * Creates a path corresponding to a scan by TID, returning the pathnode.
1179 : */
1180 : TidPath *
1181 756 : create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, List *tidquals,
1182 : Relids required_outer)
1183 : {
1184 756 : TidPath *pathnode = makeNode(TidPath);
1185 :
1186 756 : pathnode->path.pathtype = T_TidScan;
1187 756 : pathnode->path.parent = rel;
1188 756 : pathnode->path.pathtarget = rel->reltarget;
1189 756 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1190 : required_outer);
1191 756 : pathnode->path.parallel_aware = false;
1192 756 : pathnode->path.parallel_safe = rel->consider_parallel;
1193 756 : pathnode->path.parallel_workers = 0;
1194 756 : pathnode->path.pathkeys = NIL; /* always unordered */
1195 :
1196 756 : pathnode->tidquals = tidquals;
1197 :
1198 756 : cost_tidscan(&pathnode->path, root, rel, tidquals,
1199 : pathnode->path.param_info);
1200 :
1201 756 : return pathnode;
1202 : }
1203 :
1204 : /*
1205 : * create_tidrangescan_path
1206 : * Creates a path corresponding to a scan by a range of TIDs, returning
1207 : * the pathnode.
1208 : */
1209 : TidRangePath *
1210 202 : create_tidrangescan_path(PlannerInfo *root, RelOptInfo *rel,
1211 : List *tidrangequals, Relids required_outer)
1212 : {
1213 202 : TidRangePath *pathnode = makeNode(TidRangePath);
1214 :
1215 202 : pathnode->path.pathtype = T_TidRangeScan;
1216 202 : pathnode->path.parent = rel;
1217 202 : pathnode->path.pathtarget = rel->reltarget;
1218 202 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1219 : required_outer);
1220 202 : pathnode->path.parallel_aware = false;
1221 202 : pathnode->path.parallel_safe = rel->consider_parallel;
1222 202 : pathnode->path.parallel_workers = 0;
1223 202 : pathnode->path.pathkeys = NIL; /* always unordered */
1224 :
1225 202 : pathnode->tidrangequals = tidrangequals;
1226 :
1227 202 : cost_tidrangescan(&pathnode->path, root, rel, tidrangequals,
1228 : pathnode->path.param_info);
1229 :
1230 202 : return pathnode;
1231 : }
1232 :
1233 : /*
1234 : * create_append_path
1235 : * Creates a path corresponding to an Append plan, returning the
1236 : * pathnode.
1237 : *
1238 : * Note that we must handle subpaths = NIL, representing a dummy access path.
1239 : * Also, there are callers that pass root = NULL.
1240 : */
1241 : AppendPath *
1242 64710 : create_append_path(PlannerInfo *root,
1243 : RelOptInfo *rel,
1244 : List *subpaths, List *partial_subpaths,
1245 : List *pathkeys, Relids required_outer,
1246 : int parallel_workers, bool parallel_aware,
1247 : double rows)
1248 : {
1249 64710 : AppendPath *pathnode = makeNode(AppendPath);
1250 : ListCell *l;
1251 :
1252 : Assert(!parallel_aware || parallel_workers > 0);
1253 :
1254 64710 : pathnode->path.pathtype = T_Append;
1255 64710 : pathnode->path.parent = rel;
1256 64710 : pathnode->path.pathtarget = rel->reltarget;
1257 :
1258 : /*
1259 : * If this is for a baserel (not a join or non-leaf partition), we prefer
1260 : * to apply get_baserel_parampathinfo to construct a full ParamPathInfo
1261 : * for the path. This supports building a Memoize path atop this path,
1262 : * and if this is a partitioned table the info may be useful for run-time
1263 : * pruning (cf make_partition_pruneinfo()).
1264 : *
1265 : * However, if we don't have "root" then that won't work and we fall back
1266 : * on the simpler get_appendrel_parampathinfo. There's no point in doing
1267 : * the more expensive thing for a dummy path, either.
1268 : */
1269 64710 : if (rel->reloptkind == RELOPT_BASEREL && root && subpaths != NIL)
1270 29262 : pathnode->path.param_info = get_baserel_parampathinfo(root,
1271 : rel,
1272 : required_outer);
1273 : else
1274 35448 : pathnode->path.param_info = get_appendrel_parampathinfo(rel,
1275 : required_outer);
1276 :
1277 64710 : pathnode->path.parallel_aware = parallel_aware;
1278 64710 : pathnode->path.parallel_safe = rel->consider_parallel;
1279 64710 : pathnode->path.parallel_workers = parallel_workers;
1280 64710 : pathnode->path.pathkeys = pathkeys;
1281 :
1282 : /*
1283 : * For parallel append, non-partial paths are sorted by descending total
1284 : * costs. That way, the total time to finish all non-partial paths is
1285 : * minimized. Also, the partial paths are sorted by descending startup
1286 : * costs. There may be some paths that require to do startup work by a
1287 : * single worker. In such case, it's better for workers to choose the
1288 : * expensive ones first, whereas the leader should choose the cheapest
1289 : * startup plan.
1290 : */
1291 64710 : if (pathnode->path.parallel_aware)
1292 : {
1293 : /*
1294 : * We mustn't fiddle with the order of subpaths when the Append has
1295 : * pathkeys. The order they're listed in is critical to keeping the
1296 : * pathkeys valid.
1297 : */
1298 : Assert(pathkeys == NIL);
1299 :
1300 21816 : list_sort(subpaths, append_total_cost_compare);
1301 21816 : list_sort(partial_subpaths, append_startup_cost_compare);
1302 : }
1303 64710 : pathnode->first_partial_path = list_length(subpaths);
1304 64710 : pathnode->subpaths = list_concat(subpaths, partial_subpaths);
1305 :
1306 : /*
1307 : * Apply query-wide LIMIT if known and path is for sole base relation.
1308 : * (Handling this at this low level is a bit klugy.)
1309 : */
1310 64710 : if (root != NULL && bms_equal(rel->relids, root->all_query_rels))
1311 35938 : pathnode->limit_tuples = root->limit_tuples;
1312 : else
1313 28772 : pathnode->limit_tuples = -1.0;
1314 :
1315 213416 : foreach(l, pathnode->subpaths)
1316 : {
1317 148706 : Path *subpath = (Path *) lfirst(l);
1318 :
1319 264798 : pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1320 116092 : subpath->parallel_safe;
1321 :
1322 : /* All child paths must have same parameterization */
1323 : Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1324 : }
1325 :
1326 : Assert(!parallel_aware || pathnode->path.parallel_safe);
1327 :
1328 : /*
1329 : * If there's exactly one child path then the output of the Append is
1330 : * necessarily ordered the same as the child's, so we can inherit the
1331 : * child's pathkeys if any, overriding whatever the caller might've said.
1332 : * Furthermore, if the child's parallel awareness matches the Append's,
1333 : * then the Append is a no-op and will be discarded later (in setrefs.c).
1334 : * Then we can inherit the child's size and cost too, effectively charging
1335 : * zero for the Append. Otherwise, we must do the normal costsize
1336 : * calculation.
1337 : */
1338 64710 : if (list_length(pathnode->subpaths) == 1)
1339 : {
1340 20954 : Path *child = (Path *) linitial(pathnode->subpaths);
1341 :
1342 20954 : if (child->parallel_aware == parallel_aware)
1343 : {
1344 20552 : pathnode->path.rows = child->rows;
1345 20552 : pathnode->path.startup_cost = child->startup_cost;
1346 20552 : pathnode->path.total_cost = child->total_cost;
1347 : }
1348 : else
1349 402 : cost_append(pathnode);
1350 : /* Must do this last, else cost_append complains */
1351 20954 : pathnode->path.pathkeys = child->pathkeys;
1352 : }
1353 : else
1354 43756 : cost_append(pathnode);
1355 :
1356 : /* If the caller provided a row estimate, override the computed value. */
1357 64710 : if (rows >= 0)
1358 582 : pathnode->path.rows = rows;
1359 :
1360 64710 : return pathnode;
1361 : }
1362 :
1363 : /*
1364 : * append_total_cost_compare
1365 : * list_sort comparator for sorting append child paths
1366 : * by total_cost descending
1367 : *
1368 : * For equal total costs, we fall back to comparing startup costs; if those
1369 : * are equal too, break ties using bms_compare on the paths' relids.
1370 : * (This is to avoid getting unpredictable results from list_sort.)
1371 : */
1372 : static int
1373 1908 : append_total_cost_compare(const ListCell *a, const ListCell *b)
1374 : {
1375 1908 : Path *path1 = (Path *) lfirst(a);
1376 1908 : Path *path2 = (Path *) lfirst(b);
1377 : int cmp;
1378 :
1379 1908 : cmp = compare_path_costs(path1, path2, TOTAL_COST);
1380 1908 : if (cmp != 0)
1381 1638 : return -cmp;
1382 270 : return bms_compare(path1->parent->relids, path2->parent->relids);
1383 : }
1384 :
1385 : /*
1386 : * append_startup_cost_compare
1387 : * list_sort comparator for sorting append child paths
1388 : * by startup_cost descending
1389 : *
1390 : * For equal startup costs, we fall back to comparing total costs; if those
1391 : * are equal too, break ties using bms_compare on the paths' relids.
1392 : * (This is to avoid getting unpredictable results from list_sort.)
1393 : */
1394 : static int
1395 32140 : append_startup_cost_compare(const ListCell *a, const ListCell *b)
1396 : {
1397 32140 : Path *path1 = (Path *) lfirst(a);
1398 32140 : Path *path2 = (Path *) lfirst(b);
1399 : int cmp;
1400 :
1401 32140 : cmp = compare_path_costs(path1, path2, STARTUP_COST);
1402 32140 : if (cmp != 0)
1403 12452 : return -cmp;
1404 19688 : return bms_compare(path1->parent->relids, path2->parent->relids);
1405 : }
1406 :
1407 : /*
1408 : * create_merge_append_path
1409 : * Creates a path corresponding to a MergeAppend plan, returning the
1410 : * pathnode.
1411 : */
1412 : MergeAppendPath *
1413 3692 : create_merge_append_path(PlannerInfo *root,
1414 : RelOptInfo *rel,
1415 : List *subpaths,
1416 : List *pathkeys,
1417 : Relids required_outer)
1418 : {
1419 3692 : MergeAppendPath *pathnode = makeNode(MergeAppendPath);
1420 : Cost input_startup_cost;
1421 : Cost input_total_cost;
1422 : ListCell *l;
1423 :
1424 3692 : pathnode->path.pathtype = T_MergeAppend;
1425 3692 : pathnode->path.parent = rel;
1426 3692 : pathnode->path.pathtarget = rel->reltarget;
1427 3692 : pathnode->path.param_info = get_appendrel_parampathinfo(rel,
1428 : required_outer);
1429 3692 : pathnode->path.parallel_aware = false;
1430 3692 : pathnode->path.parallel_safe = rel->consider_parallel;
1431 3692 : pathnode->path.parallel_workers = 0;
1432 3692 : pathnode->path.pathkeys = pathkeys;
1433 3692 : pathnode->subpaths = subpaths;
1434 :
1435 : /*
1436 : * Apply query-wide LIMIT if known and path is for sole base relation.
1437 : * (Handling this at this low level is a bit klugy.)
1438 : */
1439 3692 : if (bms_equal(rel->relids, root->all_query_rels))
1440 2018 : pathnode->limit_tuples = root->limit_tuples;
1441 : else
1442 1674 : pathnode->limit_tuples = -1.0;
1443 :
1444 : /*
1445 : * Add up the sizes and costs of the input paths.
1446 : */
1447 3692 : pathnode->path.rows = 0;
1448 3692 : input_startup_cost = 0;
1449 3692 : input_total_cost = 0;
1450 13950 : foreach(l, subpaths)
1451 : {
1452 10258 : Path *subpath = (Path *) lfirst(l);
1453 :
1454 10258 : pathnode->path.rows += subpath->rows;
1455 18480 : pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
1456 8222 : subpath->parallel_safe;
1457 :
1458 10258 : if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
1459 : {
1460 : /* Subpath is adequately ordered, we won't need to sort it */
1461 9978 : input_startup_cost += subpath->startup_cost;
1462 9978 : input_total_cost += subpath->total_cost;
1463 : }
1464 : else
1465 : {
1466 : /* We'll need to insert a Sort node, so include cost for that */
1467 : Path sort_path; /* dummy for result of cost_sort */
1468 :
1469 280 : cost_sort(&sort_path,
1470 : root,
1471 : pathkeys,
1472 : subpath->total_cost,
1473 280 : subpath->parent->tuples,
1474 280 : subpath->pathtarget->width,
1475 : 0.0,
1476 : work_mem,
1477 : pathnode->limit_tuples);
1478 280 : input_startup_cost += sort_path.startup_cost;
1479 280 : input_total_cost += sort_path.total_cost;
1480 : }
1481 :
1482 : /* All child paths must have same parameterization */
1483 : Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
1484 : }
1485 :
1486 : /*
1487 : * Now we can compute total costs of the MergeAppend. If there's exactly
1488 : * one child path and its parallel awareness matches that of the
1489 : * MergeAppend, then the MergeAppend is a no-op and will be discarded
1490 : * later (in setrefs.c); otherwise we do the normal cost calculation.
1491 : */
1492 3692 : if (list_length(subpaths) == 1 &&
1493 110 : ((Path *) linitial(subpaths))->parallel_aware ==
1494 110 : pathnode->path.parallel_aware)
1495 : {
1496 110 : pathnode->path.startup_cost = input_startup_cost;
1497 110 : pathnode->path.total_cost = input_total_cost;
1498 : }
1499 : else
1500 3582 : cost_merge_append(&pathnode->path, root,
1501 : pathkeys, list_length(subpaths),
1502 : input_startup_cost, input_total_cost,
1503 : pathnode->path.rows);
1504 :
1505 3692 : return pathnode;
1506 : }
1507 :
1508 : /*
1509 : * create_group_result_path
1510 : * Creates a path representing a Result-and-nothing-else plan.
1511 : *
1512 : * This is only used for degenerate grouping cases, in which we know we
1513 : * need to produce one result row, possibly filtered by a HAVING qual.
1514 : */
1515 : GroupResultPath *
1516 188664 : create_group_result_path(PlannerInfo *root, RelOptInfo *rel,
1517 : PathTarget *target, List *havingqual)
1518 : {
1519 188664 : GroupResultPath *pathnode = makeNode(GroupResultPath);
1520 :
1521 188664 : pathnode->path.pathtype = T_Result;
1522 188664 : pathnode->path.parent = rel;
1523 188664 : pathnode->path.pathtarget = target;
1524 188664 : pathnode->path.param_info = NULL; /* there are no other rels... */
1525 188664 : pathnode->path.parallel_aware = false;
1526 188664 : pathnode->path.parallel_safe = rel->consider_parallel;
1527 188664 : pathnode->path.parallel_workers = 0;
1528 188664 : pathnode->path.pathkeys = NIL;
1529 188664 : pathnode->quals = havingqual;
1530 :
1531 : /*
1532 : * We can't quite use cost_resultscan() because the quals we want to
1533 : * account for are not baserestrict quals of the rel. Might as well just
1534 : * hack it here.
1535 : */
1536 188664 : pathnode->path.rows = 1;
1537 188664 : pathnode->path.startup_cost = target->cost.startup;
1538 188664 : pathnode->path.total_cost = target->cost.startup +
1539 188664 : cpu_tuple_cost + target->cost.per_tuple;
1540 :
1541 : /*
1542 : * Add cost of qual, if any --- but we ignore its selectivity, since our
1543 : * rowcount estimate should be 1 no matter what the qual is.
1544 : */
1545 188664 : if (havingqual)
1546 : {
1547 : QualCost qual_cost;
1548 :
1549 498 : cost_qual_eval(&qual_cost, havingqual, root);
1550 : /* havingqual is evaluated once at startup */
1551 498 : pathnode->path.startup_cost += qual_cost.startup + qual_cost.per_tuple;
1552 498 : pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
1553 : }
1554 :
1555 188664 : return pathnode;
1556 : }
1557 :
1558 : /*
1559 : * create_material_path
1560 : * Creates a path corresponding to a Material plan, returning the
1561 : * pathnode.
1562 : */
1563 : MaterialPath *
1564 390864 : create_material_path(RelOptInfo *rel, Path *subpath)
1565 : {
1566 390864 : MaterialPath *pathnode = makeNode(MaterialPath);
1567 :
1568 : Assert(subpath->parent == rel);
1569 :
1570 390864 : pathnode->path.pathtype = T_Material;
1571 390864 : pathnode->path.parent = rel;
1572 390864 : pathnode->path.pathtarget = rel->reltarget;
1573 390864 : pathnode->path.param_info = subpath->param_info;
1574 390864 : pathnode->path.parallel_aware = false;
1575 739236 : pathnode->path.parallel_safe = rel->consider_parallel &&
1576 348372 : subpath->parallel_safe;
1577 390864 : pathnode->path.parallel_workers = subpath->parallel_workers;
1578 390864 : pathnode->path.pathkeys = subpath->pathkeys;
1579 :
1580 390864 : pathnode->subpath = subpath;
1581 :
1582 390864 : cost_material(&pathnode->path,
1583 : subpath->startup_cost,
1584 : subpath->total_cost,
1585 : subpath->rows,
1586 390864 : subpath->pathtarget->width);
1587 :
1588 390864 : return pathnode;
1589 : }
1590 :
1591 : /*
1592 : * create_memoize_path
1593 : * Creates a path corresponding to a Memoize plan, returning the pathnode.
1594 : */
1595 : MemoizePath *
1596 217126 : create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
1597 : List *param_exprs, List *hash_operators,
1598 : bool singlerow, bool binary_mode, double calls)
1599 : {
1600 217126 : MemoizePath *pathnode = makeNode(MemoizePath);
1601 :
1602 : Assert(subpath->parent == rel);
1603 :
1604 217126 : pathnode->path.pathtype = T_Memoize;
1605 217126 : pathnode->path.parent = rel;
1606 217126 : pathnode->path.pathtarget = rel->reltarget;
1607 217126 : pathnode->path.param_info = subpath->param_info;
1608 217126 : pathnode->path.parallel_aware = false;
1609 426866 : pathnode->path.parallel_safe = rel->consider_parallel &&
1610 209740 : subpath->parallel_safe;
1611 217126 : pathnode->path.parallel_workers = subpath->parallel_workers;
1612 217126 : pathnode->path.pathkeys = subpath->pathkeys;
1613 :
1614 217126 : pathnode->subpath = subpath;
1615 217126 : pathnode->hash_operators = hash_operators;
1616 217126 : pathnode->param_exprs = param_exprs;
1617 217126 : pathnode->singlerow = singlerow;
1618 217126 : pathnode->binary_mode = binary_mode;
1619 217126 : pathnode->calls = calls;
1620 :
1621 : /*
1622 : * For now we set est_entries to 0. cost_memoize_rescan() does all the
1623 : * hard work to determine how many cache entries there are likely to be,
1624 : * so it seems best to leave it up to that function to fill this field in.
1625 : * If left at 0, the executor will make a guess at a good value.
1626 : */
1627 217126 : pathnode->est_entries = 0;
1628 :
1629 : /*
1630 : * Add a small additional charge for caching the first entry. All the
1631 : * harder calculations for rescans are performed in cost_memoize_rescan().
1632 : */
1633 217126 : pathnode->path.startup_cost = subpath->startup_cost + cpu_tuple_cost;
1634 217126 : pathnode->path.total_cost = subpath->total_cost + cpu_tuple_cost;
1635 217126 : pathnode->path.rows = subpath->rows;
1636 :
1637 217126 : return pathnode;
1638 : }
1639 :
1640 : /*
1641 : * create_unique_path
1642 : * Creates a path representing elimination of distinct rows from the
1643 : * input data. Distinct-ness is defined according to the needs of the
1644 : * semijoin represented by sjinfo. If it is not possible to identify
1645 : * how to make the data unique, NULL is returned.
1646 : *
1647 : * If used at all, this is likely to be called repeatedly on the same rel;
1648 : * and the input subpath should always be the same (the cheapest_total path
1649 : * for the rel). So we cache the result.
1650 : */
1651 : UniquePath *
1652 22990 : create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
1653 : SpecialJoinInfo *sjinfo)
1654 : {
1655 : UniquePath *pathnode;
1656 : Path sort_path; /* dummy for result of cost_sort */
1657 : Path agg_path; /* dummy for result of cost_agg */
1658 : MemoryContext oldcontext;
1659 : int numCols;
1660 :
1661 : /* Caller made a mistake if subpath isn't cheapest_total ... */
1662 : Assert(subpath == rel->cheapest_total_path);
1663 : Assert(subpath->parent == rel);
1664 : /* ... or if SpecialJoinInfo is the wrong one */
1665 : Assert(sjinfo->jointype == JOIN_SEMI);
1666 : Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
1667 :
1668 : /* If result already cached, return it */
1669 22990 : if (rel->cheapest_unique_path)
1670 19770 : return (UniquePath *) rel->cheapest_unique_path;
1671 :
1672 : /* If it's not possible to unique-ify, return NULL */
1673 3220 : if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
1674 102 : return NULL;
1675 :
1676 : /*
1677 : * When called during GEQO join planning, we are in a short-lived memory
1678 : * context. We must make sure that the path and any subsidiary data
1679 : * structures created for a baserel survive the GEQO cycle, else the
1680 : * baserel is trashed for future GEQO cycles. On the other hand, when we
1681 : * are creating those for a joinrel during GEQO, we don't want them to
1682 : * clutter the main planning context. Upshot is that the best solution is
1683 : * to explicitly allocate memory in the same context the given RelOptInfo
1684 : * is in.
1685 : */
1686 3118 : oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
1687 :
1688 3118 : pathnode = makeNode(UniquePath);
1689 :
1690 3118 : pathnode->path.pathtype = T_Unique;
1691 3118 : pathnode->path.parent = rel;
1692 3118 : pathnode->path.pathtarget = rel->reltarget;
1693 3118 : pathnode->path.param_info = subpath->param_info;
1694 3118 : pathnode->path.parallel_aware = false;
1695 5902 : pathnode->path.parallel_safe = rel->consider_parallel &&
1696 2784 : subpath->parallel_safe;
1697 3118 : pathnode->path.parallel_workers = subpath->parallel_workers;
1698 :
1699 : /*
1700 : * Assume the output is unsorted, since we don't necessarily have pathkeys
1701 : * to represent it. (This might get overridden below.)
1702 : */
1703 3118 : pathnode->path.pathkeys = NIL;
1704 :
1705 3118 : pathnode->subpath = subpath;
1706 3118 : pathnode->in_operators = sjinfo->semi_operators;
1707 3118 : pathnode->uniq_exprs = sjinfo->semi_rhs_exprs;
1708 :
1709 : /*
1710 : * If the input is a relation and it has a unique index that proves the
1711 : * semi_rhs_exprs are unique, then we don't need to do anything. Note
1712 : * that relation_has_unique_index_for automatically considers restriction
1713 : * clauses for the rel, as well.
1714 : */
1715 3792 : if (rel->rtekind == RTE_RELATION && sjinfo->semi_can_btree &&
1716 674 : relation_has_unique_index_for(root, rel, NIL,
1717 : sjinfo->semi_rhs_exprs,
1718 : sjinfo->semi_operators))
1719 : {
1720 0 : pathnode->umethod = UNIQUE_PATH_NOOP;
1721 0 : pathnode->path.rows = rel->rows;
1722 0 : pathnode->path.startup_cost = subpath->startup_cost;
1723 0 : pathnode->path.total_cost = subpath->total_cost;
1724 0 : pathnode->path.pathkeys = subpath->pathkeys;
1725 :
1726 0 : rel->cheapest_unique_path = (Path *) pathnode;
1727 :
1728 0 : MemoryContextSwitchTo(oldcontext);
1729 :
1730 0 : return pathnode;
1731 : }
1732 :
1733 : /*
1734 : * If the input is a subquery whose output must be unique already, then we
1735 : * don't need to do anything. The test for uniqueness has to consider
1736 : * exactly which columns we are extracting; for example "SELECT DISTINCT
1737 : * x,y" doesn't guarantee that x alone is distinct. So we cannot check for
1738 : * this optimization unless semi_rhs_exprs consists only of simple Vars
1739 : * referencing subquery outputs. (Possibly we could do something with
1740 : * expressions in the subquery outputs, too, but for now keep it simple.)
1741 : */
1742 3118 : if (rel->rtekind == RTE_SUBQUERY)
1743 : {
1744 548 : RangeTblEntry *rte = planner_rt_fetch(rel->relid, root);
1745 :
1746 548 : if (query_supports_distinctness(rte->subquery))
1747 : {
1748 : List *sub_tlist_colnos;
1749 :
1750 512 : sub_tlist_colnos = translate_sub_tlist(sjinfo->semi_rhs_exprs,
1751 512 : rel->relid);
1752 :
1753 610 : if (sub_tlist_colnos &&
1754 98 : query_is_distinct_for(rte->subquery,
1755 : sub_tlist_colnos,
1756 : sjinfo->semi_operators))
1757 : {
1758 0 : pathnode->umethod = UNIQUE_PATH_NOOP;
1759 0 : pathnode->path.rows = rel->rows;
1760 0 : pathnode->path.startup_cost = subpath->startup_cost;
1761 0 : pathnode->path.total_cost = subpath->total_cost;
1762 0 : pathnode->path.pathkeys = subpath->pathkeys;
1763 :
1764 0 : rel->cheapest_unique_path = (Path *) pathnode;
1765 :
1766 0 : MemoryContextSwitchTo(oldcontext);
1767 :
1768 0 : return pathnode;
1769 : }
1770 : }
1771 : }
1772 :
1773 : /* Estimate number of output rows */
1774 3118 : pathnode->path.rows = estimate_num_groups(root,
1775 : sjinfo->semi_rhs_exprs,
1776 : rel->rows,
1777 : NULL,
1778 : NULL);
1779 3118 : numCols = list_length(sjinfo->semi_rhs_exprs);
1780 :
1781 3118 : if (sjinfo->semi_can_btree)
1782 : {
1783 : /*
1784 : * Estimate cost for sort+unique implementation
1785 : */
1786 3118 : cost_sort(&sort_path, root, NIL,
1787 : subpath->total_cost,
1788 : rel->rows,
1789 3118 : subpath->pathtarget->width,
1790 : 0.0,
1791 : work_mem,
1792 : -1.0);
1793 :
1794 : /*
1795 : * Charge one cpu_operator_cost per comparison per input tuple. We
1796 : * assume all columns get compared at most of the tuples. (XXX
1797 : * probably this is an overestimate.) This should agree with
1798 : * create_upper_unique_path.
1799 : */
1800 3118 : sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
1801 : }
1802 :
1803 3118 : if (sjinfo->semi_can_hash)
1804 : {
1805 : /*
1806 : * Estimate the overhead per hashtable entry at 64 bytes (same as in
1807 : * planner.c).
1808 : */
1809 3118 : int hashentrysize = subpath->pathtarget->width + 64;
1810 :
1811 3118 : if (hashentrysize * pathnode->path.rows > get_hash_memory_limit())
1812 : {
1813 : /*
1814 : * We should not try to hash. Hack the SpecialJoinInfo to
1815 : * remember this, in case we come through here again.
1816 : */
1817 0 : sjinfo->semi_can_hash = false;
1818 : }
1819 : else
1820 3118 : cost_agg(&agg_path, root,
1821 : AGG_HASHED, NULL,
1822 : numCols, pathnode->path.rows,
1823 : NIL,
1824 : subpath->startup_cost,
1825 : subpath->total_cost,
1826 : rel->rows,
1827 3118 : subpath->pathtarget->width);
1828 : }
1829 :
1830 3118 : if (sjinfo->semi_can_btree && sjinfo->semi_can_hash)
1831 : {
1832 3118 : if (agg_path.total_cost < sort_path.total_cost)
1833 3008 : pathnode->umethod = UNIQUE_PATH_HASH;
1834 : else
1835 110 : pathnode->umethod = UNIQUE_PATH_SORT;
1836 : }
1837 0 : else if (sjinfo->semi_can_btree)
1838 0 : pathnode->umethod = UNIQUE_PATH_SORT;
1839 0 : else if (sjinfo->semi_can_hash)
1840 0 : pathnode->umethod = UNIQUE_PATH_HASH;
1841 : else
1842 : {
1843 : /* we can get here only if we abandoned hashing above */
1844 0 : MemoryContextSwitchTo(oldcontext);
1845 0 : return NULL;
1846 : }
1847 :
1848 3118 : if (pathnode->umethod == UNIQUE_PATH_HASH)
1849 : {
1850 3008 : pathnode->path.startup_cost = agg_path.startup_cost;
1851 3008 : pathnode->path.total_cost = agg_path.total_cost;
1852 : }
1853 : else
1854 : {
1855 110 : pathnode->path.startup_cost = sort_path.startup_cost;
1856 110 : pathnode->path.total_cost = sort_path.total_cost;
1857 : }
1858 :
1859 3118 : rel->cheapest_unique_path = (Path *) pathnode;
1860 :
1861 3118 : MemoryContextSwitchTo(oldcontext);
1862 :
1863 3118 : return pathnode;
1864 : }
1865 :
1866 : /*
1867 : * create_gather_merge_path
1868 : *
1869 : * Creates a path corresponding to a gather merge scan, returning
1870 : * the pathnode.
1871 : */
1872 : GatherMergePath *
1873 9446 : create_gather_merge_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
1874 : PathTarget *target, List *pathkeys,
1875 : Relids required_outer, double *rows)
1876 : {
1877 9446 : GatherMergePath *pathnode = makeNode(GatherMergePath);
1878 9446 : Cost input_startup_cost = 0;
1879 9446 : Cost input_total_cost = 0;
1880 :
1881 : Assert(subpath->parallel_safe);
1882 : Assert(pathkeys);
1883 :
1884 9446 : pathnode->path.pathtype = T_GatherMerge;
1885 9446 : pathnode->path.parent = rel;
1886 9446 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1887 : required_outer);
1888 9446 : pathnode->path.parallel_aware = false;
1889 :
1890 9446 : pathnode->subpath = subpath;
1891 9446 : pathnode->num_workers = subpath->parallel_workers;
1892 9446 : pathnode->path.pathkeys = pathkeys;
1893 9446 : pathnode->path.pathtarget = target ? target : rel->reltarget;
1894 9446 : pathnode->path.rows += subpath->rows;
1895 :
1896 9446 : if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
1897 : {
1898 : /* Subpath is adequately ordered, we won't need to sort it */
1899 9446 : input_startup_cost += subpath->startup_cost;
1900 9446 : input_total_cost += subpath->total_cost;
1901 : }
1902 : else
1903 : {
1904 : /* We'll need to insert a Sort node, so include cost for that */
1905 : Path sort_path; /* dummy for result of cost_sort */
1906 :
1907 0 : cost_sort(&sort_path,
1908 : root,
1909 : pathkeys,
1910 : subpath->total_cost,
1911 : subpath->rows,
1912 0 : subpath->pathtarget->width,
1913 : 0.0,
1914 : work_mem,
1915 : -1);
1916 0 : input_startup_cost += sort_path.startup_cost;
1917 0 : input_total_cost += sort_path.total_cost;
1918 : }
1919 :
1920 9446 : cost_gather_merge(pathnode, root, rel, pathnode->path.param_info,
1921 : input_startup_cost, input_total_cost, rows);
1922 :
1923 9446 : return pathnode;
1924 : }
1925 :
1926 : /*
1927 : * translate_sub_tlist - get subquery column numbers represented by tlist
1928 : *
1929 : * The given targetlist usually contains only Vars referencing the given relid.
1930 : * Extract their varattnos (ie, the column numbers of the subquery) and return
1931 : * as an integer List.
1932 : *
1933 : * If any of the tlist items is not a simple Var, we cannot determine whether
1934 : * the subquery's uniqueness condition (if any) matches ours, so punt and
1935 : * return NIL.
1936 : */
1937 : static List *
1938 512 : translate_sub_tlist(List *tlist, int relid)
1939 : {
1940 512 : List *result = NIL;
1941 : ListCell *l;
1942 :
1943 610 : foreach(l, tlist)
1944 : {
1945 512 : Var *var = (Var *) lfirst(l);
1946 :
1947 512 : if (!var || !IsA(var, Var) ||
1948 98 : var->varno != relid)
1949 414 : return NIL; /* punt */
1950 :
1951 98 : result = lappend_int(result, var->varattno);
1952 : }
1953 98 : return result;
1954 : }
1955 :
1956 : /*
1957 : * create_gather_path
1958 : * Creates a path corresponding to a gather scan, returning the
1959 : * pathnode.
1960 : *
1961 : * 'rows' may optionally be set to override row estimates from other sources.
1962 : */
1963 : GatherPath *
1964 15362 : create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
1965 : PathTarget *target, Relids required_outer, double *rows)
1966 : {
1967 15362 : GatherPath *pathnode = makeNode(GatherPath);
1968 :
1969 : Assert(subpath->parallel_safe);
1970 :
1971 15362 : pathnode->path.pathtype = T_Gather;
1972 15362 : pathnode->path.parent = rel;
1973 15362 : pathnode->path.pathtarget = target;
1974 15362 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
1975 : required_outer);
1976 15362 : pathnode->path.parallel_aware = false;
1977 15362 : pathnode->path.parallel_safe = false;
1978 15362 : pathnode->path.parallel_workers = 0;
1979 15362 : pathnode->path.pathkeys = NIL; /* Gather has unordered result */
1980 :
1981 15362 : pathnode->subpath = subpath;
1982 15362 : pathnode->num_workers = subpath->parallel_workers;
1983 15362 : pathnode->single_copy = false;
1984 :
1985 15362 : if (pathnode->num_workers == 0)
1986 : {
1987 0 : pathnode->path.pathkeys = subpath->pathkeys;
1988 0 : pathnode->num_workers = 1;
1989 0 : pathnode->single_copy = true;
1990 : }
1991 :
1992 15362 : cost_gather(pathnode, root, rel, pathnode->path.param_info, rows);
1993 :
1994 15362 : return pathnode;
1995 : }
1996 :
1997 : /*
1998 : * create_subqueryscan_path
1999 : * Creates a path corresponding to a scan of a subquery,
2000 : * returning the pathnode.
2001 : *
2002 : * Caller must pass trivial_pathtarget = true if it believes rel->reltarget to
2003 : * be trivial, ie just a fetch of all the subquery output columns in order.
2004 : * While we could determine that here, the caller can usually do it more
2005 : * efficiently (or at least amortize it over multiple calls).
2006 : */
2007 : SubqueryScanPath *
2008 19726 : create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
2009 : bool trivial_pathtarget,
2010 : List *pathkeys, Relids required_outer)
2011 : {
2012 19726 : SubqueryScanPath *pathnode = makeNode(SubqueryScanPath);
2013 :
2014 19726 : pathnode->path.pathtype = T_SubqueryScan;
2015 19726 : pathnode->path.parent = rel;
2016 19726 : pathnode->path.pathtarget = rel->reltarget;
2017 19726 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2018 : required_outer);
2019 19726 : pathnode->path.parallel_aware = false;
2020 32632 : pathnode->path.parallel_safe = rel->consider_parallel &&
2021 12906 : subpath->parallel_safe;
2022 19726 : pathnode->path.parallel_workers = subpath->parallel_workers;
2023 19726 : pathnode->path.pathkeys = pathkeys;
2024 19726 : pathnode->subpath = subpath;
2025 :
2026 19726 : cost_subqueryscan(pathnode, root, rel, pathnode->path.param_info,
2027 : trivial_pathtarget);
2028 :
2029 19726 : return pathnode;
2030 : }
2031 :
2032 : /*
2033 : * create_functionscan_path
2034 : * Creates a path corresponding to a sequential scan of a function,
2035 : * returning the pathnode.
2036 : */
2037 : Path *
2038 38500 : create_functionscan_path(PlannerInfo *root, RelOptInfo *rel,
2039 : List *pathkeys, Relids required_outer)
2040 : {
2041 38500 : Path *pathnode = makeNode(Path);
2042 :
2043 38500 : pathnode->pathtype = T_FunctionScan;
2044 38500 : pathnode->parent = rel;
2045 38500 : pathnode->pathtarget = rel->reltarget;
2046 38500 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
2047 : required_outer);
2048 38500 : pathnode->parallel_aware = false;
2049 38500 : pathnode->parallel_safe = rel->consider_parallel;
2050 38500 : pathnode->parallel_workers = 0;
2051 38500 : pathnode->pathkeys = pathkeys;
2052 :
2053 38500 : cost_functionscan(pathnode, root, rel, pathnode->param_info);
2054 :
2055 38500 : return pathnode;
2056 : }
2057 :
2058 : /*
2059 : * create_tablefuncscan_path
2060 : * Creates a path corresponding to a sequential scan of a table function,
2061 : * returning the pathnode.
2062 : */
2063 : Path *
2064 216 : create_tablefuncscan_path(PlannerInfo *root, RelOptInfo *rel,
2065 : Relids required_outer)
2066 : {
2067 216 : Path *pathnode = makeNode(Path);
2068 :
2069 216 : pathnode->pathtype = T_TableFuncScan;
2070 216 : pathnode->parent = rel;
2071 216 : pathnode->pathtarget = rel->reltarget;
2072 216 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
2073 : required_outer);
2074 216 : pathnode->parallel_aware = false;
2075 216 : pathnode->parallel_safe = rel->consider_parallel;
2076 216 : pathnode->parallel_workers = 0;
2077 216 : pathnode->pathkeys = NIL; /* result is always unordered */
2078 :
2079 216 : cost_tablefuncscan(pathnode, root, rel, pathnode->param_info);
2080 :
2081 216 : return pathnode;
2082 : }
2083 :
2084 : /*
2085 : * create_valuesscan_path
2086 : * Creates a path corresponding to a scan of a VALUES list,
2087 : * returning the pathnode.
2088 : */
2089 : Path *
2090 7332 : create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel,
2091 : Relids required_outer)
2092 : {
2093 7332 : Path *pathnode = makeNode(Path);
2094 :
2095 7332 : pathnode->pathtype = T_ValuesScan;
2096 7332 : pathnode->parent = rel;
2097 7332 : pathnode->pathtarget = rel->reltarget;
2098 7332 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
2099 : required_outer);
2100 7332 : pathnode->parallel_aware = false;
2101 7332 : pathnode->parallel_safe = rel->consider_parallel;
2102 7332 : pathnode->parallel_workers = 0;
2103 7332 : pathnode->pathkeys = NIL; /* result is always unordered */
2104 :
2105 7332 : cost_valuesscan(pathnode, root, rel, pathnode->param_info);
2106 :
2107 7332 : return pathnode;
2108 : }
2109 :
2110 : /*
2111 : * create_ctescan_path
2112 : * Creates a path corresponding to a scan of a non-self-reference CTE,
2113 : * returning the pathnode.
2114 : */
2115 : Path *
2116 3004 : create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
2117 : {
2118 3004 : Path *pathnode = makeNode(Path);
2119 :
2120 3004 : pathnode->pathtype = T_CteScan;
2121 3004 : pathnode->parent = rel;
2122 3004 : pathnode->pathtarget = rel->reltarget;
2123 3004 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
2124 : required_outer);
2125 3004 : pathnode->parallel_aware = false;
2126 3004 : pathnode->parallel_safe = rel->consider_parallel;
2127 3004 : pathnode->parallel_workers = 0;
2128 3004 : pathnode->pathkeys = NIL; /* XXX for now, result is always unordered */
2129 :
2130 3004 : cost_ctescan(pathnode, root, rel, pathnode->param_info);
2131 :
2132 3004 : return pathnode;
2133 : }
2134 :
2135 : /*
2136 : * create_namedtuplestorescan_path
2137 : * Creates a path corresponding to a scan of a named tuplestore, returning
2138 : * the pathnode.
2139 : */
2140 : Path *
2141 438 : create_namedtuplestorescan_path(PlannerInfo *root, RelOptInfo *rel,
2142 : Relids required_outer)
2143 : {
2144 438 : Path *pathnode = makeNode(Path);
2145 :
2146 438 : pathnode->pathtype = T_NamedTuplestoreScan;
2147 438 : pathnode->parent = rel;
2148 438 : pathnode->pathtarget = rel->reltarget;
2149 438 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
2150 : required_outer);
2151 438 : pathnode->parallel_aware = false;
2152 438 : pathnode->parallel_safe = rel->consider_parallel;
2153 438 : pathnode->parallel_workers = 0;
2154 438 : pathnode->pathkeys = NIL; /* result is always unordered */
2155 :
2156 438 : cost_namedtuplestorescan(pathnode, root, rel, pathnode->param_info);
2157 :
2158 438 : return pathnode;
2159 : }
2160 :
2161 : /*
2162 : * create_resultscan_path
2163 : * Creates a path corresponding to a scan of an RTE_RESULT relation,
2164 : * returning the pathnode.
2165 : */
2166 : Path *
2167 1402 : create_resultscan_path(PlannerInfo *root, RelOptInfo *rel,
2168 : Relids required_outer)
2169 : {
2170 1402 : Path *pathnode = makeNode(Path);
2171 :
2172 1402 : pathnode->pathtype = T_Result;
2173 1402 : pathnode->parent = rel;
2174 1402 : pathnode->pathtarget = rel->reltarget;
2175 1402 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
2176 : required_outer);
2177 1402 : pathnode->parallel_aware = false;
2178 1402 : pathnode->parallel_safe = rel->consider_parallel;
2179 1402 : pathnode->parallel_workers = 0;
2180 1402 : pathnode->pathkeys = NIL; /* result is always unordered */
2181 :
2182 1402 : cost_resultscan(pathnode, root, rel, pathnode->param_info);
2183 :
2184 1402 : return pathnode;
2185 : }
2186 :
2187 : /*
2188 : * create_worktablescan_path
2189 : * Creates a path corresponding to a scan of a self-reference CTE,
2190 : * returning the pathnode.
2191 : */
2192 : Path *
2193 784 : create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel,
2194 : Relids required_outer)
2195 : {
2196 784 : Path *pathnode = makeNode(Path);
2197 :
2198 784 : pathnode->pathtype = T_WorkTableScan;
2199 784 : pathnode->parent = rel;
2200 784 : pathnode->pathtarget = rel->reltarget;
2201 784 : pathnode->param_info = get_baserel_parampathinfo(root, rel,
2202 : required_outer);
2203 784 : pathnode->parallel_aware = false;
2204 784 : pathnode->parallel_safe = rel->consider_parallel;
2205 784 : pathnode->parallel_workers = 0;
2206 784 : pathnode->pathkeys = NIL; /* result is always unordered */
2207 :
2208 : /* Cost is the same as for a regular CTE scan */
2209 784 : cost_ctescan(pathnode, root, rel, pathnode->param_info);
2210 :
2211 784 : return pathnode;
2212 : }
2213 :
2214 : /*
2215 : * create_foreignscan_path
2216 : * Creates a path corresponding to a scan of a foreign base table,
2217 : * returning the pathnode.
2218 : *
2219 : * This function is never called from core Postgres; rather, it's expected
2220 : * to be called by the GetForeignPaths function of a foreign data wrapper.
2221 : * We make the FDW supply all fields of the path, since we do not have any way
2222 : * to calculate them in core. However, there is a usually-sane default for
2223 : * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2224 : */
2225 : ForeignPath *
2226 3218 : create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel,
2227 : PathTarget *target,
2228 : double rows, Cost startup_cost, Cost total_cost,
2229 : List *pathkeys,
2230 : Relids required_outer,
2231 : Path *fdw_outerpath,
2232 : List *fdw_restrictinfo,
2233 : List *fdw_private)
2234 : {
2235 3218 : ForeignPath *pathnode = makeNode(ForeignPath);
2236 :
2237 : /* Historically some FDWs were confused about when to use this */
2238 : Assert(IS_SIMPLE_REL(rel));
2239 :
2240 3218 : pathnode->path.pathtype = T_ForeignScan;
2241 3218 : pathnode->path.parent = rel;
2242 3218 : pathnode->path.pathtarget = target ? target : rel->reltarget;
2243 3218 : pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
2244 : required_outer);
2245 3218 : pathnode->path.parallel_aware = false;
2246 3218 : pathnode->path.parallel_safe = rel->consider_parallel;
2247 3218 : pathnode->path.parallel_workers = 0;
2248 3218 : pathnode->path.rows = rows;
2249 3218 : pathnode->path.startup_cost = startup_cost;
2250 3218 : pathnode->path.total_cost = total_cost;
2251 3218 : pathnode->path.pathkeys = pathkeys;
2252 :
2253 3218 : pathnode->fdw_outerpath = fdw_outerpath;
2254 3218 : pathnode->fdw_restrictinfo = fdw_restrictinfo;
2255 3218 : pathnode->fdw_private = fdw_private;
2256 :
2257 3218 : return pathnode;
2258 : }
2259 :
2260 : /*
2261 : * create_foreign_join_path
2262 : * Creates a path corresponding to a scan of a foreign join,
2263 : * returning the pathnode.
2264 : *
2265 : * This function is never called from core Postgres; rather, it's expected
2266 : * to be called by the GetForeignJoinPaths function of a foreign data wrapper.
2267 : * We make the FDW supply all fields of the path, since we do not have any way
2268 : * to calculate them in core. However, there is a usually-sane default for
2269 : * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2270 : */
2271 : ForeignPath *
2272 740 : create_foreign_join_path(PlannerInfo *root, RelOptInfo *rel,
2273 : PathTarget *target,
2274 : double rows, Cost startup_cost, Cost total_cost,
2275 : List *pathkeys,
2276 : Relids required_outer,
2277 : Path *fdw_outerpath,
2278 : List *fdw_restrictinfo,
2279 : List *fdw_private)
2280 : {
2281 740 : ForeignPath *pathnode = makeNode(ForeignPath);
2282 :
2283 : /*
2284 : * We should use get_joinrel_parampathinfo to handle parameterized paths,
2285 : * but the API of this function doesn't support it, and existing
2286 : * extensions aren't yet trying to build such paths anyway. For the
2287 : * moment just throw an error if someone tries it; eventually we should
2288 : * revisit this.
2289 : */
2290 740 : if (!bms_is_empty(required_outer) || !bms_is_empty(rel->lateral_relids))
2291 0 : elog(ERROR, "parameterized foreign joins are not supported yet");
2292 :
2293 740 : pathnode->path.pathtype = T_ForeignScan;
2294 740 : pathnode->path.parent = rel;
2295 740 : pathnode->path.pathtarget = target ? target : rel->reltarget;
2296 740 : pathnode->path.param_info = NULL; /* XXX see above */
2297 740 : pathnode->path.parallel_aware = false;
2298 740 : pathnode->path.parallel_safe = rel->consider_parallel;
2299 740 : pathnode->path.parallel_workers = 0;
2300 740 : pathnode->path.rows = rows;
2301 740 : pathnode->path.startup_cost = startup_cost;
2302 740 : pathnode->path.total_cost = total_cost;
2303 740 : pathnode->path.pathkeys = pathkeys;
2304 :
2305 740 : pathnode->fdw_outerpath = fdw_outerpath;
2306 740 : pathnode->fdw_restrictinfo = fdw_restrictinfo;
2307 740 : pathnode->fdw_private = fdw_private;
2308 :
2309 740 : return pathnode;
2310 : }
2311 :
2312 : /*
2313 : * create_foreign_upper_path
2314 : * Creates a path corresponding to an upper relation that's computed
2315 : * directly by an FDW, returning the pathnode.
2316 : *
2317 : * This function is never called from core Postgres; rather, it's expected to
2318 : * be called by the GetForeignUpperPaths function of a foreign data wrapper.
2319 : * We make the FDW supply all fields of the path, since we do not have any way
2320 : * to calculate them in core. However, there is a usually-sane default for
2321 : * the pathtarget (rel->reltarget), so we let a NULL for "target" select that.
2322 : */
2323 : ForeignPath *
2324 544 : create_foreign_upper_path(PlannerInfo *root, RelOptInfo *rel,
2325 : PathTarget *target,
2326 : double rows, Cost startup_cost, Cost total_cost,
2327 : List *pathkeys,
2328 : Path *fdw_outerpath,
2329 : List *fdw_restrictinfo,
2330 : List *fdw_private)
2331 : {
2332 544 : ForeignPath *pathnode = makeNode(ForeignPath);
2333 :
2334 : /*
2335 : * Upper relations should never have any lateral references, since joining
2336 : * is complete.
2337 : */
2338 : Assert(bms_is_empty(rel->lateral_relids));
2339 :
2340 544 : pathnode->path.pathtype = T_ForeignScan;
2341 544 : pathnode->path.parent = rel;
2342 544 : pathnode->path.pathtarget = target ? target : rel->reltarget;
2343 544 : pathnode->path.param_info = NULL;
2344 544 : pathnode->path.parallel_aware = false;
2345 544 : pathnode->path.parallel_safe = rel->consider_parallel;
2346 544 : pathnode->path.parallel_workers = 0;
2347 544 : pathnode->path.rows = rows;
2348 544 : pathnode->path.startup_cost = startup_cost;
2349 544 : pathnode->path.total_cost = total_cost;
2350 544 : pathnode->path.pathkeys = pathkeys;
2351 :
2352 544 : pathnode->fdw_outerpath = fdw_outerpath;
2353 544 : pathnode->fdw_restrictinfo = fdw_restrictinfo;
2354 544 : pathnode->fdw_private = fdw_private;
2355 :
2356 544 : return pathnode;
2357 : }
2358 :
2359 : /*
2360 : * calc_nestloop_required_outer
2361 : * Compute the required_outer set for a nestloop join path
2362 : *
2363 : * Note: result must not share storage with either input
2364 : */
2365 : Relids
2366 2188638 : calc_nestloop_required_outer(Relids outerrelids,
2367 : Relids outer_paramrels,
2368 : Relids innerrelids,
2369 : Relids inner_paramrels)
2370 : {
2371 : Relids required_outer;
2372 :
2373 : /* inner_path can require rels from outer path, but not vice versa */
2374 : Assert(!bms_overlap(outer_paramrels, innerrelids));
2375 : /* easy case if inner path is not parameterized */
2376 2188638 : if (!inner_paramrels)
2377 1474518 : return bms_copy(outer_paramrels);
2378 : /* else, form the union ... */
2379 714120 : required_outer = bms_union(outer_paramrels, inner_paramrels);
2380 : /* ... and remove any mention of now-satisfied outer rels */
2381 714120 : required_outer = bms_del_members(required_outer,
2382 : outerrelids);
2383 714120 : return required_outer;
2384 : }
2385 :
2386 : /*
2387 : * calc_non_nestloop_required_outer
2388 : * Compute the required_outer set for a merge or hash join path
2389 : *
2390 : * Note: result must not share storage with either input
2391 : */
2392 : Relids
2393 1488472 : calc_non_nestloop_required_outer(Path *outer_path, Path *inner_path)
2394 : {
2395 1488472 : Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
2396 1488472 : Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
2397 : Relids required_outer;
2398 :
2399 : /* neither path can require rels from the other */
2400 : Assert(!bms_overlap(outer_paramrels, inner_path->parent->relids));
2401 : Assert(!bms_overlap(inner_paramrels, outer_path->parent->relids));
2402 : /* form the union ... */
2403 1488472 : required_outer = bms_union(outer_paramrels, inner_paramrels);
2404 : /* we do not need an explicit test for empty; bms_union gets it right */
2405 1488472 : return required_outer;
2406 : }
2407 :
2408 : /*
2409 : * create_nestloop_path
2410 : * Creates a pathnode corresponding to a nestloop join between two
2411 : * relations.
2412 : *
2413 : * 'joinrel' is the join relation.
2414 : * 'jointype' is the type of join required
2415 : * 'workspace' is the result from initial_cost_nestloop
2416 : * 'extra' contains various information about the join
2417 : * 'outer_path' is the outer path
2418 : * 'inner_path' is the inner path
2419 : * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2420 : * 'pathkeys' are the path keys of the new join path
2421 : * 'required_outer' is the set of required outer rels
2422 : *
2423 : * Returns the resulting path node.
2424 : */
2425 : NestPath *
2426 993788 : create_nestloop_path(PlannerInfo *root,
2427 : RelOptInfo *joinrel,
2428 : JoinType jointype,
2429 : JoinCostWorkspace *workspace,
2430 : JoinPathExtraData *extra,
2431 : Path *outer_path,
2432 : Path *inner_path,
2433 : List *restrict_clauses,
2434 : List *pathkeys,
2435 : Relids required_outer)
2436 : {
2437 993788 : NestPath *pathnode = makeNode(NestPath);
2438 993788 : Relids inner_req_outer = PATH_REQ_OUTER(inner_path);
2439 :
2440 : /*
2441 : * If the inner path is parameterized by the outer, we must drop any
2442 : * restrict_clauses that are due to be moved into the inner path. We have
2443 : * to do this now, rather than postpone the work till createplan time,
2444 : * because the restrict_clauses list can affect the size and cost
2445 : * estimates for this path. We detect such clauses by checking for serial
2446 : * number match to clauses already enforced in the inner path.
2447 : */
2448 993788 : if (bms_overlap(inner_req_outer, outer_path->parent->relids))
2449 : {
2450 285532 : Bitmapset *enforced_serials = get_param_path_clause_serials(inner_path);
2451 285532 : List *jclauses = NIL;
2452 : ListCell *lc;
2453 :
2454 614506 : foreach(lc, restrict_clauses)
2455 : {
2456 328974 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
2457 :
2458 328974 : if (!bms_is_member(rinfo->rinfo_serial, enforced_serials))
2459 36134 : jclauses = lappend(jclauses, rinfo);
2460 : }
2461 285532 : restrict_clauses = jclauses;
2462 : }
2463 :
2464 993788 : pathnode->jpath.path.pathtype = T_NestLoop;
2465 993788 : pathnode->jpath.path.parent = joinrel;
2466 993788 : pathnode->jpath.path.pathtarget = joinrel->reltarget;
2467 993788 : pathnode->jpath.path.param_info =
2468 993788 : get_joinrel_parampathinfo(root,
2469 : joinrel,
2470 : outer_path,
2471 : inner_path,
2472 : extra->sjinfo,
2473 : required_outer,
2474 : &restrict_clauses);
2475 993788 : pathnode->jpath.path.parallel_aware = false;
2476 2894862 : pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2477 993788 : outer_path->parallel_safe && inner_path->parallel_safe;
2478 : /* This is a foolish way to estimate parallel_workers, but for now... */
2479 993788 : pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2480 993788 : pathnode->jpath.path.pathkeys = pathkeys;
2481 993788 : pathnode->jpath.jointype = jointype;
2482 993788 : pathnode->jpath.inner_unique = extra->inner_unique;
2483 993788 : pathnode->jpath.outerjoinpath = outer_path;
2484 993788 : pathnode->jpath.innerjoinpath = inner_path;
2485 993788 : pathnode->jpath.joinrestrictinfo = restrict_clauses;
2486 :
2487 993788 : final_cost_nestloop(root, pathnode, workspace, extra);
2488 :
2489 993788 : return pathnode;
2490 : }
2491 :
2492 : /*
2493 : * create_mergejoin_path
2494 : * Creates a pathnode corresponding to a mergejoin join between
2495 : * two relations
2496 : *
2497 : * 'joinrel' is the join relation
2498 : * 'jointype' is the type of join required
2499 : * 'workspace' is the result from initial_cost_mergejoin
2500 : * 'extra' contains various information about the join
2501 : * 'outer_path' is the outer path
2502 : * 'inner_path' is the inner path
2503 : * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2504 : * 'pathkeys' are the path keys of the new join path
2505 : * 'required_outer' is the set of required outer rels
2506 : * 'mergeclauses' are the RestrictInfo nodes to use as merge clauses
2507 : * (this should be a subset of the restrict_clauses list)
2508 : * 'outersortkeys' are the sort varkeys for the outer relation
2509 : * 'innersortkeys' are the sort varkeys for the inner relation
2510 : */
2511 : MergePath *
2512 232568 : create_mergejoin_path(PlannerInfo *root,
2513 : RelOptInfo *joinrel,
2514 : JoinType jointype,
2515 : JoinCostWorkspace *workspace,
2516 : JoinPathExtraData *extra,
2517 : Path *outer_path,
2518 : Path *inner_path,
2519 : List *restrict_clauses,
2520 : List *pathkeys,
2521 : Relids required_outer,
2522 : List *mergeclauses,
2523 : List *outersortkeys,
2524 : List *innersortkeys)
2525 : {
2526 232568 : MergePath *pathnode = makeNode(MergePath);
2527 :
2528 232568 : pathnode->jpath.path.pathtype = T_MergeJoin;
2529 232568 : pathnode->jpath.path.parent = joinrel;
2530 232568 : pathnode->jpath.path.pathtarget = joinrel->reltarget;
2531 232568 : pathnode->jpath.path.param_info =
2532 232568 : get_joinrel_parampathinfo(root,
2533 : joinrel,
2534 : outer_path,
2535 : inner_path,
2536 : extra->sjinfo,
2537 : required_outer,
2538 : &restrict_clauses);
2539 232568 : pathnode->jpath.path.parallel_aware = false;
2540 672014 : pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2541 232568 : outer_path->parallel_safe && inner_path->parallel_safe;
2542 : /* This is a foolish way to estimate parallel_workers, but for now... */
2543 232568 : pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2544 232568 : pathnode->jpath.path.pathkeys = pathkeys;
2545 232568 : pathnode->jpath.jointype = jointype;
2546 232568 : pathnode->jpath.inner_unique = extra->inner_unique;
2547 232568 : pathnode->jpath.outerjoinpath = outer_path;
2548 232568 : pathnode->jpath.innerjoinpath = inner_path;
2549 232568 : pathnode->jpath.joinrestrictinfo = restrict_clauses;
2550 232568 : pathnode->path_mergeclauses = mergeclauses;
2551 232568 : pathnode->outersortkeys = outersortkeys;
2552 232568 : pathnode->innersortkeys = innersortkeys;
2553 : /* pathnode->skip_mark_restore will be set by final_cost_mergejoin */
2554 : /* pathnode->materialize_inner will be set by final_cost_mergejoin */
2555 :
2556 232568 : final_cost_mergejoin(root, pathnode, workspace, extra);
2557 :
2558 232568 : return pathnode;
2559 : }
2560 :
2561 : /*
2562 : * create_hashjoin_path
2563 : * Creates a pathnode corresponding to a hash join between two relations.
2564 : *
2565 : * 'joinrel' is the join relation
2566 : * 'jointype' is the type of join required
2567 : * 'workspace' is the result from initial_cost_hashjoin
2568 : * 'extra' contains various information about the join
2569 : * 'outer_path' is the cheapest outer path
2570 : * 'inner_path' is the cheapest inner path
2571 : * 'parallel_hash' to select Parallel Hash of inner path (shared hash table)
2572 : * 'restrict_clauses' are the RestrictInfo nodes to apply at the join
2573 : * 'required_outer' is the set of required outer rels
2574 : * 'hashclauses' are the RestrictInfo nodes to use as hash clauses
2575 : * (this should be a subset of the restrict_clauses list)
2576 : */
2577 : HashPath *
2578 204996 : create_hashjoin_path(PlannerInfo *root,
2579 : RelOptInfo *joinrel,
2580 : JoinType jointype,
2581 : JoinCostWorkspace *workspace,
2582 : JoinPathExtraData *extra,
2583 : Path *outer_path,
2584 : Path *inner_path,
2585 : bool parallel_hash,
2586 : List *restrict_clauses,
2587 : Relids required_outer,
2588 : List *hashclauses)
2589 : {
2590 204996 : HashPath *pathnode = makeNode(HashPath);
2591 :
2592 204996 : pathnode->jpath.path.pathtype = T_HashJoin;
2593 204996 : pathnode->jpath.path.parent = joinrel;
2594 204996 : pathnode->jpath.path.pathtarget = joinrel->reltarget;
2595 204996 : pathnode->jpath.path.param_info =
2596 204996 : get_joinrel_parampathinfo(root,
2597 : joinrel,
2598 : outer_path,
2599 : inner_path,
2600 : extra->sjinfo,
2601 : required_outer,
2602 : &restrict_clauses);
2603 204996 : pathnode->jpath.path.parallel_aware =
2604 204996 : joinrel->consider_parallel && parallel_hash;
2605 590160 : pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
2606 204996 : outer_path->parallel_safe && inner_path->parallel_safe;
2607 : /* This is a foolish way to estimate parallel_workers, but for now... */
2608 204996 : pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
2609 :
2610 : /*
2611 : * A hashjoin never has pathkeys, since its output ordering is
2612 : * unpredictable due to possible batching. XXX If the inner relation is
2613 : * small enough, we could instruct the executor that it must not batch,
2614 : * and then we could assume that the output inherits the outer relation's
2615 : * ordering, which might save a sort step. However there is considerable
2616 : * downside if our estimate of the inner relation size is badly off. For
2617 : * the moment we don't risk it. (Note also that if we wanted to take this
2618 : * seriously, joinpath.c would have to consider many more paths for the
2619 : * outer rel than it does now.)
2620 : */
2621 204996 : pathnode->jpath.path.pathkeys = NIL;
2622 204996 : pathnode->jpath.jointype = jointype;
2623 204996 : pathnode->jpath.inner_unique = extra->inner_unique;
2624 204996 : pathnode->jpath.outerjoinpath = outer_path;
2625 204996 : pathnode->jpath.innerjoinpath = inner_path;
2626 204996 : pathnode->jpath.joinrestrictinfo = restrict_clauses;
2627 204996 : pathnode->path_hashclauses = hashclauses;
2628 : /* final_cost_hashjoin will fill in pathnode->num_batches */
2629 :
2630 204996 : final_cost_hashjoin(root, pathnode, workspace, extra);
2631 :
2632 204996 : return pathnode;
2633 : }
2634 :
2635 : /*
2636 : * create_projection_path
2637 : * Creates a pathnode that represents performing a projection.
2638 : *
2639 : * 'rel' is the parent relation associated with the result
2640 : * 'subpath' is the path representing the source of data
2641 : * 'target' is the PathTarget to be computed
2642 : */
2643 : ProjectionPath *
2644 341976 : create_projection_path(PlannerInfo *root,
2645 : RelOptInfo *rel,
2646 : Path *subpath,
2647 : PathTarget *target)
2648 : {
2649 341976 : ProjectionPath *pathnode = makeNode(ProjectionPath);
2650 : PathTarget *oldtarget;
2651 :
2652 : /*
2653 : * We mustn't put a ProjectionPath directly above another; it's useless
2654 : * and will confuse create_projection_plan. Rather than making sure all
2655 : * callers handle that, let's implement it here, by stripping off any
2656 : * ProjectionPath in what we're given. Given this rule, there won't be
2657 : * more than one.
2658 : */
2659 341976 : if (IsA(subpath, ProjectionPath))
2660 : {
2661 12 : ProjectionPath *subpp = (ProjectionPath *) subpath;
2662 :
2663 : Assert(subpp->path.parent == rel);
2664 12 : subpath = subpp->subpath;
2665 : Assert(!IsA(subpath, ProjectionPath));
2666 : }
2667 :
2668 341976 : pathnode->path.pathtype = T_Result;
2669 341976 : pathnode->path.parent = rel;
2670 341976 : pathnode->path.pathtarget = target;
2671 : /* For now, assume we are above any joins, so no parameterization */
2672 341976 : pathnode->path.param_info = NULL;
2673 341976 : pathnode->path.parallel_aware = false;
2674 761552 : pathnode->path.parallel_safe = rel->consider_parallel &&
2675 418954 : subpath->parallel_safe &&
2676 76978 : is_parallel_safe(root, (Node *) target->exprs);
2677 341976 : pathnode->path.parallel_workers = subpath->parallel_workers;
2678 : /* Projection does not change the sort order */
2679 341976 : pathnode->path.pathkeys = subpath->pathkeys;
2680 :
2681 341976 : pathnode->subpath = subpath;
2682 :
2683 : /*
2684 : * We might not need a separate Result node. If the input plan node type
2685 : * can project, we can just tell it to project something else. Or, if it
2686 : * can't project but the desired target has the same expression list as
2687 : * what the input will produce anyway, we can still give it the desired
2688 : * tlist (possibly changing its ressortgroupref labels, but nothing else).
2689 : * Note: in the latter case, create_projection_plan has to recheck our
2690 : * conclusion; see comments therein.
2691 : */
2692 341976 : oldtarget = subpath->pathtarget;
2693 354228 : if (is_projection_capable_path(subpath) ||
2694 12252 : equal(oldtarget->exprs, target->exprs))
2695 : {
2696 : /* No separate Result node needed */
2697 339958 : pathnode->dummypp = true;
2698 :
2699 : /*
2700 : * Set cost of plan as subpath's cost, adjusted for tlist replacement.
2701 : */
2702 339958 : pathnode->path.rows = subpath->rows;
2703 339958 : pathnode->path.startup_cost = subpath->startup_cost +
2704 339958 : (target->cost.startup - oldtarget->cost.startup);
2705 339958 : pathnode->path.total_cost = subpath->total_cost +
2706 339958 : (target->cost.startup - oldtarget->cost.startup) +
2707 339958 : (target->cost.per_tuple - oldtarget->cost.per_tuple) * subpath->rows;
2708 : }
2709 : else
2710 : {
2711 : /* We really do need the Result node */
2712 2018 : pathnode->dummypp = false;
2713 :
2714 : /*
2715 : * The Result node's cost is cpu_tuple_cost per row, plus the cost of
2716 : * evaluating the tlist. There is no qual to worry about.
2717 : */
2718 2018 : pathnode->path.rows = subpath->rows;
2719 2018 : pathnode->path.startup_cost = subpath->startup_cost +
2720 2018 : target->cost.startup;
2721 2018 : pathnode->path.total_cost = subpath->total_cost +
2722 2018 : target->cost.startup +
2723 2018 : (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows;
2724 : }
2725 :
2726 341976 : return pathnode;
2727 : }
2728 :
2729 : /*
2730 : * apply_projection_to_path
2731 : * Add a projection step, or just apply the target directly to given path.
2732 : *
2733 : * This has the same net effect as create_projection_path(), except that if
2734 : * a separate Result plan node isn't needed, we just replace the given path's
2735 : * pathtarget with the desired one. This must be used only when the caller
2736 : * knows that the given path isn't referenced elsewhere and so can be modified
2737 : * in-place.
2738 : *
2739 : * If the input path is a GatherPath or GatherMergePath, we try to push the
2740 : * new target down to its input as well; this is a yet more invasive
2741 : * modification of the input path, which create_projection_path() can't do.
2742 : *
2743 : * Note that we mustn't change the source path's parent link; so when it is
2744 : * add_path'd to "rel" things will be a bit inconsistent. So far that has
2745 : * not caused any trouble.
2746 : *
2747 : * 'rel' is the parent relation associated with the result
2748 : * 'path' is the path representing the source of data
2749 : * 'target' is the PathTarget to be computed
2750 : */
2751 : Path *
2752 22580 : apply_projection_to_path(PlannerInfo *root,
2753 : RelOptInfo *rel,
2754 : Path *path,
2755 : PathTarget *target)
2756 : {
2757 : QualCost oldcost;
2758 :
2759 : /*
2760 : * If given path can't project, we might need a Result node, so make a
2761 : * separate ProjectionPath.
2762 : */
2763 22580 : if (!is_projection_capable_path(path))
2764 11168 : return (Path *) create_projection_path(root, rel, path, target);
2765 :
2766 : /*
2767 : * We can just jam the desired tlist into the existing path, being sure to
2768 : * update its cost estimates appropriately.
2769 : */
2770 11412 : oldcost = path->pathtarget->cost;
2771 11412 : path->pathtarget = target;
2772 :
2773 11412 : path->startup_cost += target->cost.startup - oldcost.startup;
2774 11412 : path->total_cost += target->cost.startup - oldcost.startup +
2775 11412 : (target->cost.per_tuple - oldcost.per_tuple) * path->rows;
2776 :
2777 : /*
2778 : * If the path happens to be a Gather or GatherMerge path, we'd like to
2779 : * arrange for the subpath to return the required target list so that
2780 : * workers can help project. But if there is something that is not
2781 : * parallel-safe in the target expressions, then we can't.
2782 : */
2783 11848 : if ((IsA(path, GatherPath) || IsA(path, GatherMergePath)) &&
2784 436 : is_parallel_safe(root, (Node *) target->exprs))
2785 : {
2786 : /*
2787 : * We always use create_projection_path here, even if the subpath is
2788 : * projection-capable, so as to avoid modifying the subpath in place.
2789 : * It seems unlikely at present that there could be any other
2790 : * references to the subpath, but better safe than sorry.
2791 : *
2792 : * Note that we don't change the parallel path's cost estimates; it
2793 : * might be appropriate to do so, to reflect the fact that the bulk of
2794 : * the target evaluation will happen in workers.
2795 : */
2796 436 : if (IsA(path, GatherPath))
2797 : {
2798 0 : GatherPath *gpath = (GatherPath *) path;
2799 :
2800 0 : gpath->subpath = (Path *)
2801 0 : create_projection_path(root,
2802 0 : gpath->subpath->parent,
2803 : gpath->subpath,
2804 : target);
2805 : }
2806 : else
2807 : {
2808 436 : GatherMergePath *gmpath = (GatherMergePath *) path;
2809 :
2810 436 : gmpath->subpath = (Path *)
2811 436 : create_projection_path(root,
2812 436 : gmpath->subpath->parent,
2813 : gmpath->subpath,
2814 : target);
2815 : }
2816 : }
2817 10976 : else if (path->parallel_safe &&
2818 5352 : !is_parallel_safe(root, (Node *) target->exprs))
2819 : {
2820 : /*
2821 : * We're inserting a parallel-restricted target list into a path
2822 : * currently marked parallel-safe, so we have to mark it as no longer
2823 : * safe.
2824 : */
2825 12 : path->parallel_safe = false;
2826 : }
2827 :
2828 11412 : return path;
2829 : }
2830 :
2831 : /*
2832 : * create_set_projection_path
2833 : * Creates a pathnode that represents performing a projection that
2834 : * includes set-returning functions.
2835 : *
2836 : * 'rel' is the parent relation associated with the result
2837 : * 'subpath' is the path representing the source of data
2838 : * 'target' is the PathTarget to be computed
2839 : */
2840 : ProjectSetPath *
2841 6138 : create_set_projection_path(PlannerInfo *root,
2842 : RelOptInfo *rel,
2843 : Path *subpath,
2844 : PathTarget *target)
2845 : {
2846 6138 : ProjectSetPath *pathnode = makeNode(ProjectSetPath);
2847 : double tlist_rows;
2848 : ListCell *lc;
2849 :
2850 6138 : pathnode->path.pathtype = T_ProjectSet;
2851 6138 : pathnode->path.parent = rel;
2852 6138 : pathnode->path.pathtarget = target;
2853 : /* For now, assume we are above any joins, so no parameterization */
2854 6138 : pathnode->path.param_info = NULL;
2855 6138 : pathnode->path.parallel_aware = false;
2856 13864 : pathnode->path.parallel_safe = rel->consider_parallel &&
2857 7690 : subpath->parallel_safe &&
2858 1552 : is_parallel_safe(root, (Node *) target->exprs);
2859 6138 : pathnode->path.parallel_workers = subpath->parallel_workers;
2860 : /* Projection does not change the sort order XXX? */
2861 6138 : pathnode->path.pathkeys = subpath->pathkeys;
2862 :
2863 6138 : pathnode->subpath = subpath;
2864 :
2865 : /*
2866 : * Estimate number of rows produced by SRFs for each row of input; if
2867 : * there's more than one in this node, use the maximum.
2868 : */
2869 6138 : tlist_rows = 1;
2870 14130 : foreach(lc, target->exprs)
2871 : {
2872 7992 : Node *node = (Node *) lfirst(lc);
2873 : double itemrows;
2874 :
2875 7992 : itemrows = expression_returns_set_rows(root, node);
2876 7992 : if (tlist_rows < itemrows)
2877 5938 : tlist_rows = itemrows;
2878 : }
2879 :
2880 : /*
2881 : * In addition to the cost of evaluating the tlist, charge cpu_tuple_cost
2882 : * per input row, and half of cpu_tuple_cost for each added output row.
2883 : * This is slightly bizarre maybe, but it's what 9.6 did; we may revisit
2884 : * this estimate later.
2885 : */
2886 6138 : pathnode->path.rows = subpath->rows * tlist_rows;
2887 6138 : pathnode->path.startup_cost = subpath->startup_cost +
2888 6138 : target->cost.startup;
2889 6138 : pathnode->path.total_cost = subpath->total_cost +
2890 6138 : target->cost.startup +
2891 6138 : (cpu_tuple_cost + target->cost.per_tuple) * subpath->rows +
2892 6138 : (pathnode->path.rows - subpath->rows) * cpu_tuple_cost / 2;
2893 :
2894 6138 : return pathnode;
2895 : }
2896 :
2897 : /*
2898 : * create_incremental_sort_path
2899 : * Creates a pathnode that represents performing an incremental sort.
2900 : *
2901 : * 'rel' is the parent relation associated with the result
2902 : * 'subpath' is the path representing the source of data
2903 : * 'pathkeys' represents the desired sort order
2904 : * 'presorted_keys' is the number of keys by which the input path is
2905 : * already sorted
2906 : * 'limit_tuples' is the estimated bound on the number of output tuples,
2907 : * or -1 if no LIMIT or couldn't estimate
2908 : */
2909 : IncrementalSortPath *
2910 4582 : create_incremental_sort_path(PlannerInfo *root,
2911 : RelOptInfo *rel,
2912 : Path *subpath,
2913 : List *pathkeys,
2914 : int presorted_keys,
2915 : double limit_tuples)
2916 : {
2917 4582 : IncrementalSortPath *sort = makeNode(IncrementalSortPath);
2918 4582 : SortPath *pathnode = &sort->spath;
2919 :
2920 4582 : pathnode->path.pathtype = T_IncrementalSort;
2921 4582 : pathnode->path.parent = rel;
2922 : /* Sort doesn't project, so use source path's pathtarget */
2923 4582 : pathnode->path.pathtarget = subpath->pathtarget;
2924 : /* For now, assume we are above any joins, so no parameterization */
2925 4582 : pathnode->path.param_info = NULL;
2926 4582 : pathnode->path.parallel_aware = false;
2927 7828 : pathnode->path.parallel_safe = rel->consider_parallel &&
2928 3246 : subpath->parallel_safe;
2929 4582 : pathnode->path.parallel_workers = subpath->parallel_workers;
2930 4582 : pathnode->path.pathkeys = pathkeys;
2931 :
2932 4582 : pathnode->subpath = subpath;
2933 :
2934 4582 : cost_incremental_sort(&pathnode->path,
2935 : root, pathkeys, presorted_keys,
2936 : subpath->startup_cost,
2937 : subpath->total_cost,
2938 : subpath->rows,
2939 4582 : subpath->pathtarget->width,
2940 : 0.0, /* XXX comparison_cost shouldn't be 0? */
2941 : work_mem, limit_tuples);
2942 :
2943 4582 : sort->nPresortedCols = presorted_keys;
2944 :
2945 4582 : return sort;
2946 : }
2947 :
2948 : /*
2949 : * create_sort_path
2950 : * Creates a pathnode that represents performing an explicit sort.
2951 : *
2952 : * 'rel' is the parent relation associated with the result
2953 : * 'subpath' is the path representing the source of data
2954 : * 'pathkeys' represents the desired sort order
2955 : * 'limit_tuples' is the estimated bound on the number of output tuples,
2956 : * or -1 if no LIMIT or couldn't estimate
2957 : */
2958 : SortPath *
2959 64532 : create_sort_path(PlannerInfo *root,
2960 : RelOptInfo *rel,
2961 : Path *subpath,
2962 : List *pathkeys,
2963 : double limit_tuples)
2964 : {
2965 64532 : SortPath *pathnode = makeNode(SortPath);
2966 :
2967 64532 : pathnode->path.pathtype = T_Sort;
2968 64532 : pathnode->path.parent = rel;
2969 : /* Sort doesn't project, so use source path's pathtarget */
2970 64532 : pathnode->path.pathtarget = subpath->pathtarget;
2971 : /* For now, assume we are above any joins, so no parameterization */
2972 64532 : pathnode->path.param_info = NULL;
2973 64532 : pathnode->path.parallel_aware = false;
2974 110370 : pathnode->path.parallel_safe = rel->consider_parallel &&
2975 45838 : subpath->parallel_safe;
2976 64532 : pathnode->path.parallel_workers = subpath->parallel_workers;
2977 64532 : pathnode->path.pathkeys = pathkeys;
2978 :
2979 64532 : pathnode->subpath = subpath;
2980 :
2981 64532 : cost_sort(&pathnode->path, root, pathkeys,
2982 : subpath->total_cost,
2983 : subpath->rows,
2984 64532 : subpath->pathtarget->width,
2985 : 0.0, /* XXX comparison_cost shouldn't be 0? */
2986 : work_mem, limit_tuples);
2987 :
2988 64532 : return pathnode;
2989 : }
2990 :
2991 : /*
2992 : * create_group_path
2993 : * Creates a pathnode that represents performing grouping of presorted input
2994 : *
2995 : * 'rel' is the parent relation associated with the result
2996 : * 'subpath' is the path representing the source of data
2997 : * 'target' is the PathTarget to be computed
2998 : * 'groupClause' is a list of SortGroupClause's representing the grouping
2999 : * 'qual' is the HAVING quals if any
3000 : * 'numGroups' is the estimated number of groups
3001 : */
3002 : GroupPath *
3003 1074 : create_group_path(PlannerInfo *root,
3004 : RelOptInfo *rel,
3005 : Path *subpath,
3006 : List *groupClause,
3007 : List *qual,
3008 : double numGroups)
3009 : {
3010 1074 : GroupPath *pathnode = makeNode(GroupPath);
3011 1074 : PathTarget *target = rel->reltarget;
3012 :
3013 1074 : pathnode->path.pathtype = T_Group;
3014 1074 : pathnode->path.parent = rel;
3015 1074 : pathnode->path.pathtarget = target;
3016 : /* For now, assume we are above any joins, so no parameterization */
3017 1074 : pathnode->path.param_info = NULL;
3018 1074 : pathnode->path.parallel_aware = false;
3019 1780 : pathnode->path.parallel_safe = rel->consider_parallel &&
3020 706 : subpath->parallel_safe;
3021 1074 : pathnode->path.parallel_workers = subpath->parallel_workers;
3022 : /* Group doesn't change sort ordering */
3023 1074 : pathnode->path.pathkeys = subpath->pathkeys;
3024 :
3025 1074 : pathnode->subpath = subpath;
3026 :
3027 1074 : pathnode->groupClause = groupClause;
3028 1074 : pathnode->qual = qual;
3029 :
3030 1074 : cost_group(&pathnode->path, root,
3031 : list_length(groupClause),
3032 : numGroups,
3033 : qual,
3034 : subpath->startup_cost, subpath->total_cost,
3035 : subpath->rows);
3036 :
3037 : /* add tlist eval cost for each output row */
3038 1074 : pathnode->path.startup_cost += target->cost.startup;
3039 1074 : pathnode->path.total_cost += target->cost.startup +
3040 1074 : target->cost.per_tuple * pathnode->path.rows;
3041 :
3042 1074 : return pathnode;
3043 : }
3044 :
3045 : /*
3046 : * create_upper_unique_path
3047 : * Creates a pathnode that represents performing an explicit Unique step
3048 : * on presorted input.
3049 : *
3050 : * This produces a Unique plan node, but the use-case is so different from
3051 : * create_unique_path that it doesn't seem worth trying to merge the two.
3052 : *
3053 : * 'rel' is the parent relation associated with the result
3054 : * 'subpath' is the path representing the source of data
3055 : * 'numCols' is the number of grouping columns
3056 : * 'numGroups' is the estimated number of groups
3057 : *
3058 : * The input path must be sorted on the grouping columns, plus possibly
3059 : * additional columns; so the first numCols pathkeys are the grouping columns
3060 : */
3061 : UpperUniquePath *
3062 3242 : create_upper_unique_path(PlannerInfo *root,
3063 : RelOptInfo *rel,
3064 : Path *subpath,
3065 : int numCols,
3066 : double numGroups)
3067 : {
3068 3242 : UpperUniquePath *pathnode = makeNode(UpperUniquePath);
3069 :
3070 3242 : pathnode->path.pathtype = T_Unique;
3071 3242 : pathnode->path.parent = rel;
3072 : /* Unique doesn't project, so use source path's pathtarget */
3073 3242 : pathnode->path.pathtarget = subpath->pathtarget;
3074 : /* For now, assume we are above any joins, so no parameterization */
3075 3242 : pathnode->path.param_info = NULL;
3076 3242 : pathnode->path.parallel_aware = false;
3077 5988 : pathnode->path.parallel_safe = rel->consider_parallel &&
3078 2746 : subpath->parallel_safe;
3079 3242 : pathnode->path.parallel_workers = subpath->parallel_workers;
3080 : /* Unique doesn't change the input ordering */
3081 3242 : pathnode->path.pathkeys = subpath->pathkeys;
3082 :
3083 3242 : pathnode->subpath = subpath;
3084 3242 : pathnode->numkeys = numCols;
3085 :
3086 : /*
3087 : * Charge one cpu_operator_cost per comparison per input tuple. We assume
3088 : * all columns get compared at most of the tuples. (XXX probably this is
3089 : * an overestimate.)
3090 : */
3091 3242 : pathnode->path.startup_cost = subpath->startup_cost;
3092 3242 : pathnode->path.total_cost = subpath->total_cost +
3093 3242 : cpu_operator_cost * subpath->rows * numCols;
3094 3242 : pathnode->path.rows = numGroups;
3095 :
3096 3242 : return pathnode;
3097 : }
3098 :
3099 : /*
3100 : * create_agg_path
3101 : * Creates a pathnode that represents performing aggregation/grouping
3102 : *
3103 : * 'rel' is the parent relation associated with the result
3104 : * 'subpath' is the path representing the source of data
3105 : * 'target' is the PathTarget to be computed
3106 : * 'aggstrategy' is the Agg node's basic implementation strategy
3107 : * 'aggsplit' is the Agg node's aggregate-splitting mode
3108 : * 'groupClause' is a list of SortGroupClause's representing the grouping
3109 : * 'qual' is the HAVING quals if any
3110 : * 'aggcosts' contains cost info about the aggregate functions to be computed
3111 : * 'numGroups' is the estimated number of groups (1 if not grouping)
3112 : */
3113 : AggPath *
3114 48936 : create_agg_path(PlannerInfo *root,
3115 : RelOptInfo *rel,
3116 : Path *subpath,
3117 : PathTarget *target,
3118 : AggStrategy aggstrategy,
3119 : AggSplit aggsplit,
3120 : List *groupClause,
3121 : List *qual,
3122 : const AggClauseCosts *aggcosts,
3123 : double numGroups)
3124 : {
3125 48936 : AggPath *pathnode = makeNode(AggPath);
3126 :
3127 48936 : pathnode->path.pathtype = T_Agg;
3128 48936 : pathnode->path.parent = rel;
3129 48936 : pathnode->path.pathtarget = target;
3130 : /* For now, assume we are above any joins, so no parameterization */
3131 48936 : pathnode->path.param_info = NULL;
3132 48936 : pathnode->path.parallel_aware = false;
3133 83372 : pathnode->path.parallel_safe = rel->consider_parallel &&
3134 34436 : subpath->parallel_safe;
3135 48936 : pathnode->path.parallel_workers = subpath->parallel_workers;
3136 :
3137 48936 : if (aggstrategy == AGG_SORTED)
3138 : {
3139 : /*
3140 : * Attempt to preserve the order of the subpath. Additional pathkeys
3141 : * may have been added in adjust_group_pathkeys_for_groupagg() to
3142 : * support ORDER BY / DISTINCT aggregates. Pathkeys added there
3143 : * belong to columns within the aggregate function, so we must strip
3144 : * these additional pathkeys off as those columns are unavailable
3145 : * above the aggregate node.
3146 : */
3147 7066 : if (list_length(subpath->pathkeys) > root->num_groupby_pathkeys)
3148 284 : pathnode->path.pathkeys = list_copy_head(subpath->pathkeys,
3149 : root->num_groupby_pathkeys);
3150 : else
3151 6782 : pathnode->path.pathkeys = subpath->pathkeys; /* preserves order */
3152 : }
3153 : else
3154 41870 : pathnode->path.pathkeys = NIL; /* output is unordered */
3155 :
3156 48936 : pathnode->subpath = subpath;
3157 :
3158 48936 : pathnode->aggstrategy = aggstrategy;
3159 48936 : pathnode->aggsplit = aggsplit;
3160 48936 : pathnode->numGroups = numGroups;
3161 48936 : pathnode->transitionSpace = aggcosts ? aggcosts->transitionSpace : 0;
3162 48936 : pathnode->groupClause = groupClause;
3163 48936 : pathnode->qual = qual;
3164 :
3165 48936 : cost_agg(&pathnode->path, root,
3166 : aggstrategy, aggcosts,
3167 : list_length(groupClause), numGroups,
3168 : qual,
3169 : subpath->startup_cost, subpath->total_cost,
3170 48936 : subpath->rows, subpath->pathtarget->width);
3171 :
3172 : /* add tlist eval cost for each output row */
3173 48936 : pathnode->path.startup_cost += target->cost.startup;
3174 48936 : pathnode->path.total_cost += target->cost.startup +
3175 48936 : target->cost.per_tuple * pathnode->path.rows;
3176 :
3177 48936 : return pathnode;
3178 : }
3179 :
3180 : /*
3181 : * create_groupingsets_path
3182 : * Creates a pathnode that represents performing GROUPING SETS aggregation
3183 : *
3184 : * GroupingSetsPath represents sorted grouping with one or more grouping sets.
3185 : * The input path's result must be sorted to match the last entry in
3186 : * rollup_groupclauses.
3187 : *
3188 : * 'rel' is the parent relation associated with the result
3189 : * 'subpath' is the path representing the source of data
3190 : * 'target' is the PathTarget to be computed
3191 : * 'having_qual' is the HAVING quals if any
3192 : * 'rollups' is a list of RollupData nodes
3193 : * 'agg_costs' contains cost info about the aggregate functions to be computed
3194 : */
3195 : GroupingSetsPath *
3196 1792 : create_groupingsets_path(PlannerInfo *root,
3197 : RelOptInfo *rel,
3198 : Path *subpath,
3199 : List *having_qual,
3200 : AggStrategy aggstrategy,
3201 : List *rollups,
3202 : const AggClauseCosts *agg_costs)
3203 : {
3204 1792 : GroupingSetsPath *pathnode = makeNode(GroupingSetsPath);
3205 1792 : PathTarget *target = rel->reltarget;
3206 : ListCell *lc;
3207 1792 : bool is_first = true;
3208 1792 : bool is_first_sort = true;
3209 :
3210 : /* The topmost generated Plan node will be an Agg */
3211 1792 : pathnode->path.pathtype = T_Agg;
3212 1792 : pathnode->path.parent = rel;
3213 1792 : pathnode->path.pathtarget = target;
3214 1792 : pathnode->path.param_info = subpath->param_info;
3215 1792 : pathnode->path.parallel_aware = false;
3216 2614 : pathnode->path.parallel_safe = rel->consider_parallel &&
3217 822 : subpath->parallel_safe;
3218 1792 : pathnode->path.parallel_workers = subpath->parallel_workers;
3219 1792 : pathnode->subpath = subpath;
3220 :
3221 : /*
3222 : * Simplify callers by downgrading AGG_SORTED to AGG_PLAIN, and AGG_MIXED
3223 : * to AGG_HASHED, here if possible.
3224 : */
3225 2556 : if (aggstrategy == AGG_SORTED &&
3226 764 : list_length(rollups) == 1 &&
3227 362 : ((RollupData *) linitial(rollups))->groupClause == NIL)
3228 42 : aggstrategy = AGG_PLAIN;
3229 :
3230 2636 : if (aggstrategy == AGG_MIXED &&
3231 844 : list_length(rollups) == 1)
3232 0 : aggstrategy = AGG_HASHED;
3233 :
3234 : /*
3235 : * Output will be in sorted order by group_pathkeys if, and only if, there
3236 : * is a single rollup operation on a non-empty list of grouping
3237 : * expressions.
3238 : */
3239 1792 : if (aggstrategy == AGG_SORTED && list_length(rollups) == 1)
3240 320 : pathnode->path.pathkeys = root->group_pathkeys;
3241 : else
3242 1472 : pathnode->path.pathkeys = NIL;
3243 :
3244 1792 : pathnode->aggstrategy = aggstrategy;
3245 1792 : pathnode->rollups = rollups;
3246 1792 : pathnode->qual = having_qual;
3247 1792 : pathnode->transitionSpace = agg_costs ? agg_costs->transitionSpace : 0;
3248 :
3249 : Assert(rollups != NIL);
3250 : Assert(aggstrategy != AGG_PLAIN || list_length(rollups) == 1);
3251 : Assert(aggstrategy != AGG_MIXED || list_length(rollups) > 1);
3252 :
3253 6396 : foreach(lc, rollups)
3254 : {
3255 4604 : RollupData *rollup = lfirst(lc);
3256 4604 : List *gsets = rollup->gsets;
3257 4604 : int numGroupCols = list_length(linitial(gsets));
3258 :
3259 : /*
3260 : * In AGG_SORTED or AGG_PLAIN mode, the first rollup takes the
3261 : * (already-sorted) input, and following ones do their own sort.
3262 : *
3263 : * In AGG_HASHED mode, there is one rollup for each grouping set.
3264 : *
3265 : * In AGG_MIXED mode, the first rollups are hashed, the first
3266 : * non-hashed one takes the (already-sorted) input, and following ones
3267 : * do their own sort.
3268 : */
3269 4604 : if (is_first)
3270 : {
3271 1792 : cost_agg(&pathnode->path, root,
3272 : aggstrategy,
3273 : agg_costs,
3274 : numGroupCols,
3275 : rollup->numGroups,
3276 : having_qual,
3277 : subpath->startup_cost,
3278 : subpath->total_cost,
3279 : subpath->rows,
3280 1792 : subpath->pathtarget->width);
3281 1792 : is_first = false;
3282 1792 : if (!rollup->is_hashed)
3283 764 : is_first_sort = false;
3284 : }
3285 : else
3286 : {
3287 : Path sort_path; /* dummy for result of cost_sort */
3288 : Path agg_path; /* dummy for result of cost_agg */
3289 :
3290 2812 : if (rollup->is_hashed || is_first_sort)
3291 : {
3292 : /*
3293 : * Account for cost of aggregation, but don't charge input
3294 : * cost again
3295 : */
3296 2158 : cost_agg(&agg_path, root,
3297 2158 : rollup->is_hashed ? AGG_HASHED : AGG_SORTED,
3298 : agg_costs,
3299 : numGroupCols,
3300 : rollup->numGroups,
3301 : having_qual,
3302 : 0.0, 0.0,
3303 : subpath->rows,
3304 2158 : subpath->pathtarget->width);
3305 2158 : if (!rollup->is_hashed)
3306 844 : is_first_sort = false;
3307 : }
3308 : else
3309 : {
3310 : /* Account for cost of sort, but don't charge input cost again */
3311 654 : cost_sort(&sort_path, root, NIL,
3312 : 0.0,
3313 : subpath->rows,
3314 654 : subpath->pathtarget->width,
3315 : 0.0,
3316 : work_mem,
3317 : -1.0);
3318 :
3319 : /* Account for cost of aggregation */
3320 :
3321 654 : cost_agg(&agg_path, root,
3322 : AGG_SORTED,
3323 : agg_costs,
3324 : numGroupCols,
3325 : rollup->numGroups,
3326 : having_qual,
3327 : sort_path.startup_cost,
3328 : sort_path.total_cost,
3329 : sort_path.rows,
3330 654 : subpath->pathtarget->width);
3331 : }
3332 :
3333 2812 : pathnode->path.total_cost += agg_path.total_cost;
3334 2812 : pathnode->path.rows += agg_path.rows;
3335 : }
3336 : }
3337 :
3338 : /* add tlist eval cost for each output row */
3339 1792 : pathnode->path.startup_cost += target->cost.startup;
3340 1792 : pathnode->path.total_cost += target->cost.startup +
3341 1792 : target->cost.per_tuple * pathnode->path.rows;
3342 :
3343 1792 : return pathnode;
3344 : }
3345 :
3346 : /*
3347 : * create_minmaxagg_path
3348 : * Creates a pathnode that represents computation of MIN/MAX aggregates
3349 : *
3350 : * 'rel' is the parent relation associated with the result
3351 : * 'target' is the PathTarget to be computed
3352 : * 'mmaggregates' is a list of MinMaxAggInfo structs
3353 : * 'quals' is the HAVING quals if any
3354 : */
3355 : MinMaxAggPath *
3356 380 : create_minmaxagg_path(PlannerInfo *root,
3357 : RelOptInfo *rel,
3358 : PathTarget *target,
3359 : List *mmaggregates,
3360 : List *quals)
3361 : {
3362 380 : MinMaxAggPath *pathnode = makeNode(MinMaxAggPath);
3363 : Cost initplan_cost;
3364 : ListCell *lc;
3365 :
3366 : /* The topmost generated Plan node will be a Result */
3367 380 : pathnode->path.pathtype = T_Result;
3368 380 : pathnode->path.parent = rel;
3369 380 : pathnode->path.pathtarget = target;
3370 : /* For now, assume we are above any joins, so no parameterization */
3371 380 : pathnode->path.param_info = NULL;
3372 380 : pathnode->path.parallel_aware = false;
3373 380 : pathnode->path.parallel_safe = true; /* might change below */
3374 380 : pathnode->path.parallel_workers = 0;
3375 : /* Result is one unordered row */
3376 380 : pathnode->path.rows = 1;
3377 380 : pathnode->path.pathkeys = NIL;
3378 :
3379 380 : pathnode->mmaggregates = mmaggregates;
3380 380 : pathnode->quals = quals;
3381 :
3382 : /* Calculate cost of all the initplans, and check parallel safety */
3383 380 : initplan_cost = 0;
3384 796 : foreach(lc, mmaggregates)
3385 : {
3386 416 : MinMaxAggInfo *mminfo = (MinMaxAggInfo *) lfirst(lc);
3387 :
3388 416 : initplan_cost += mminfo->pathcost;
3389 416 : if (!mminfo->path->parallel_safe)
3390 80 : pathnode->path.parallel_safe = false;
3391 : }
3392 :
3393 : /* add tlist eval cost for each output row, plus cpu_tuple_cost */
3394 380 : pathnode->path.startup_cost = initplan_cost + target->cost.startup;
3395 380 : pathnode->path.total_cost = initplan_cost + target->cost.startup +
3396 380 : target->cost.per_tuple + cpu_tuple_cost;
3397 :
3398 : /*
3399 : * Add cost of qual, if any --- but we ignore its selectivity, since our
3400 : * rowcount estimate should be 1 no matter what the qual is.
3401 : */
3402 380 : if (quals)
3403 : {
3404 : QualCost qual_cost;
3405 :
3406 0 : cost_qual_eval(&qual_cost, quals, root);
3407 0 : pathnode->path.startup_cost += qual_cost.startup;
3408 0 : pathnode->path.total_cost += qual_cost.startup + qual_cost.per_tuple;
3409 : }
3410 :
3411 : /*
3412 : * If the initplans were all parallel-safe, also check safety of the
3413 : * target and quals. (The Result node itself isn't parallelizable, but if
3414 : * we are in a subquery then it can be useful for the outer query to know
3415 : * that this one is parallel-safe.)
3416 : */
3417 380 : if (pathnode->path.parallel_safe)
3418 300 : pathnode->path.parallel_safe =
3419 600 : is_parallel_safe(root, (Node *) target->exprs) &&
3420 300 : is_parallel_safe(root, (Node *) quals);
3421 :
3422 380 : return pathnode;
3423 : }
3424 :
3425 : /*
3426 : * create_windowagg_path
3427 : * Creates a pathnode that represents computation of window functions
3428 : *
3429 : * 'rel' is the parent relation associated with the result
3430 : * 'subpath' is the path representing the source of data
3431 : * 'target' is the PathTarget to be computed
3432 : * 'windowFuncs' is a list of WindowFunc structs
3433 : * 'winclause' is a WindowClause that is common to all the WindowFuncs
3434 : * 'qual' WindowClause.runconditions from lower-level WindowAggPaths.
3435 : * Must always be NIL when topwindow == false
3436 : * 'topwindow' pass as true only for the top-level WindowAgg. False for all
3437 : * intermediate WindowAggs.
3438 : *
3439 : * The input must be sorted according to the WindowClause's PARTITION keys
3440 : * plus ORDER BY keys.
3441 : */
3442 : WindowAggPath *
3443 2592 : create_windowagg_path(PlannerInfo *root,
3444 : RelOptInfo *rel,
3445 : Path *subpath,
3446 : PathTarget *target,
3447 : List *windowFuncs,
3448 : WindowClause *winclause,
3449 : List *qual,
3450 : bool topwindow)
3451 : {
3452 2592 : WindowAggPath *pathnode = makeNode(WindowAggPath);
3453 :
3454 : /* qual can only be set for the topwindow */
3455 : Assert(qual == NIL || topwindow);
3456 :
3457 2592 : pathnode->path.pathtype = T_WindowAgg;
3458 2592 : pathnode->path.parent = rel;
3459 2592 : pathnode->path.pathtarget = target;
3460 : /* For now, assume we are above any joins, so no parameterization */
3461 2592 : pathnode->path.param_info = NULL;
3462 2592 : pathnode->path.parallel_aware = false;
3463 2592 : pathnode->path.parallel_safe = rel->consider_parallel &&
3464 0 : subpath->parallel_safe;
3465 2592 : pathnode->path.parallel_workers = subpath->parallel_workers;
3466 : /* WindowAgg preserves the input sort order */
3467 2592 : pathnode->path.pathkeys = subpath->pathkeys;
3468 :
3469 2592 : pathnode->subpath = subpath;
3470 2592 : pathnode->winclause = winclause;
3471 2592 : pathnode->qual = qual;
3472 2592 : pathnode->topwindow = topwindow;
3473 :
3474 : /*
3475 : * For costing purposes, assume that there are no redundant partitioning
3476 : * or ordering columns; it's not worth the trouble to deal with that
3477 : * corner case here. So we just pass the unmodified list lengths to
3478 : * cost_windowagg.
3479 : */
3480 2592 : cost_windowagg(&pathnode->path, root,
3481 : windowFuncs,
3482 : winclause,
3483 : subpath->startup_cost,
3484 : subpath->total_cost,
3485 : subpath->rows);
3486 :
3487 : /* add tlist eval cost for each output row */
3488 2592 : pathnode->path.startup_cost += target->cost.startup;
3489 2592 : pathnode->path.total_cost += target->cost.startup +
3490 2592 : target->cost.per_tuple * pathnode->path.rows;
3491 :
3492 2592 : return pathnode;
3493 : }
3494 :
3495 : /*
3496 : * create_setop_path
3497 : * Creates a pathnode that represents computation of INTERSECT or EXCEPT
3498 : *
3499 : * 'rel' is the parent relation associated with the result
3500 : * 'subpath' is the path representing the source of data
3501 : * 'cmd' is the specific semantics (INTERSECT or EXCEPT, with/without ALL)
3502 : * 'strategy' is the implementation strategy (sorted or hashed)
3503 : * 'distinctList' is a list of SortGroupClause's representing the grouping
3504 : * 'flagColIdx' is the column number where the flag column will be, if any
3505 : * 'firstFlag' is the flag value for the first input relation when hashing;
3506 : * or -1 when sorting
3507 : * 'numGroups' is the estimated number of distinct groups
3508 : * 'outputRows' is the estimated number of output rows
3509 : */
3510 : SetOpPath *
3511 544 : create_setop_path(PlannerInfo *root,
3512 : RelOptInfo *rel,
3513 : Path *subpath,
3514 : SetOpCmd cmd,
3515 : SetOpStrategy strategy,
3516 : List *distinctList,
3517 : AttrNumber flagColIdx,
3518 : int firstFlag,
3519 : double numGroups,
3520 : double outputRows)
3521 : {
3522 544 : SetOpPath *pathnode = makeNode(SetOpPath);
3523 :
3524 544 : pathnode->path.pathtype = T_SetOp;
3525 544 : pathnode->path.parent = rel;
3526 : /* SetOp doesn't project, so use source path's pathtarget */
3527 544 : pathnode->path.pathtarget = subpath->pathtarget;
3528 : /* For now, assume we are above any joins, so no parameterization */
3529 544 : pathnode->path.param_info = NULL;
3530 544 : pathnode->path.parallel_aware = false;
3531 544 : pathnode->path.parallel_safe = rel->consider_parallel &&
3532 0 : subpath->parallel_safe;
3533 544 : pathnode->path.parallel_workers = subpath->parallel_workers;
3534 : /* SetOp preserves the input sort order if in sort mode */
3535 544 : pathnode->path.pathkeys =
3536 544 : (strategy == SETOP_SORTED) ? subpath->pathkeys : NIL;
3537 :
3538 544 : pathnode->subpath = subpath;
3539 544 : pathnode->cmd = cmd;
3540 544 : pathnode->strategy = strategy;
3541 544 : pathnode->distinctList = distinctList;
3542 544 : pathnode->flagColIdx = flagColIdx;
3543 544 : pathnode->firstFlag = firstFlag;
3544 544 : pathnode->numGroups = numGroups;
3545 :
3546 : /*
3547 : * Charge one cpu_operator_cost per comparison per input tuple. We assume
3548 : * all columns get compared at most of the tuples.
3549 : */
3550 544 : pathnode->path.startup_cost = subpath->startup_cost;
3551 1088 : pathnode->path.total_cost = subpath->total_cost +
3552 544 : cpu_operator_cost * subpath->rows * list_length(distinctList);
3553 544 : pathnode->path.rows = outputRows;
3554 :
3555 544 : return pathnode;
3556 : }
3557 :
3558 : /*
3559 : * create_recursiveunion_path
3560 : * Creates a pathnode that represents a recursive UNION node
3561 : *
3562 : * 'rel' is the parent relation associated with the result
3563 : * 'leftpath' is the source of data for the non-recursive term
3564 : * 'rightpath' is the source of data for the recursive term
3565 : * 'target' is the PathTarget to be computed
3566 : * 'distinctList' is a list of SortGroupClause's representing the grouping
3567 : * 'wtParam' is the ID of Param representing work table
3568 : * 'numGroups' is the estimated number of groups
3569 : *
3570 : * For recursive UNION ALL, distinctList is empty and numGroups is zero
3571 : */
3572 : RecursiveUnionPath *
3573 778 : create_recursiveunion_path(PlannerInfo *root,
3574 : RelOptInfo *rel,
3575 : Path *leftpath,
3576 : Path *rightpath,
3577 : PathTarget *target,
3578 : List *distinctList,
3579 : int wtParam,
3580 : double numGroups)
3581 : {
3582 778 : RecursiveUnionPath *pathnode = makeNode(RecursiveUnionPath);
3583 :
3584 778 : pathnode->path.pathtype = T_RecursiveUnion;
3585 778 : pathnode->path.parent = rel;
3586 778 : pathnode->path.pathtarget = target;
3587 : /* For now, assume we are above any joins, so no parameterization */
3588 778 : pathnode->path.param_info = NULL;
3589 778 : pathnode->path.parallel_aware = false;
3590 1556 : pathnode->path.parallel_safe = rel->consider_parallel &&
3591 778 : leftpath->parallel_safe && rightpath->parallel_safe;
3592 : /* Foolish, but we'll do it like joins for now: */
3593 778 : pathnode->path.parallel_workers = leftpath->parallel_workers;
3594 : /* RecursiveUnion result is always unsorted */
3595 778 : pathnode->path.pathkeys = NIL;
3596 :
3597 778 : pathnode->leftpath = leftpath;
3598 778 : pathnode->rightpath = rightpath;
3599 778 : pathnode->distinctList = distinctList;
3600 778 : pathnode->wtParam = wtParam;
3601 778 : pathnode->numGroups = numGroups;
3602 :
3603 778 : cost_recursive_union(&pathnode->path, leftpath, rightpath);
3604 :
3605 778 : return pathnode;
3606 : }
3607 :
3608 : /*
3609 : * create_lockrows_path
3610 : * Creates a pathnode that represents acquiring row locks
3611 : *
3612 : * 'rel' is the parent relation associated with the result
3613 : * 'subpath' is the path representing the source of data
3614 : * 'rowMarks' is a list of PlanRowMark's
3615 : * 'epqParam' is the ID of Param for EvalPlanQual re-eval
3616 : */
3617 : LockRowsPath *
3618 7114 : create_lockrows_path(PlannerInfo *root, RelOptInfo *rel,
3619 : Path *subpath, List *rowMarks, int epqParam)
3620 : {
3621 7114 : LockRowsPath *pathnode = makeNode(LockRowsPath);
3622 :
3623 7114 : pathnode->path.pathtype = T_LockRows;
3624 7114 : pathnode->path.parent = rel;
3625 : /* LockRows doesn't project, so use source path's pathtarget */
3626 7114 : pathnode->path.pathtarget = subpath->pathtarget;
3627 : /* For now, assume we are above any joins, so no parameterization */
3628 7114 : pathnode->path.param_info = NULL;
3629 7114 : pathnode->path.parallel_aware = false;
3630 7114 : pathnode->path.parallel_safe = false;
3631 7114 : pathnode->path.parallel_workers = 0;
3632 7114 : pathnode->path.rows = subpath->rows;
3633 :
3634 : /*
3635 : * The result cannot be assumed sorted, since locking might cause the sort
3636 : * key columns to be replaced with new values.
3637 : */
3638 7114 : pathnode->path.pathkeys = NIL;
3639 :
3640 7114 : pathnode->subpath = subpath;
3641 7114 : pathnode->rowMarks = rowMarks;
3642 7114 : pathnode->epqParam = epqParam;
3643 :
3644 : /*
3645 : * We should charge something extra for the costs of row locking and
3646 : * possible refetches, but it's hard to say how much. For now, use
3647 : * cpu_tuple_cost per row.
3648 : */
3649 7114 : pathnode->path.startup_cost = subpath->startup_cost;
3650 7114 : pathnode->path.total_cost = subpath->total_cost +
3651 7114 : cpu_tuple_cost * subpath->rows;
3652 :
3653 7114 : return pathnode;
3654 : }
3655 :
3656 : /*
3657 : * create_modifytable_path
3658 : * Creates a pathnode that represents performing INSERT/UPDATE/DELETE/MERGE
3659 : * mods
3660 : *
3661 : * 'rel' is the parent relation associated with the result
3662 : * 'subpath' is a Path producing source data
3663 : * 'operation' is the operation type
3664 : * 'canSetTag' is true if we set the command tag/es_processed
3665 : * 'nominalRelation' is the parent RT index for use of EXPLAIN
3666 : * 'rootRelation' is the partitioned/inherited table root RTI, or 0 if none
3667 : * 'partColsUpdated' is true if any partitioning columns are being updated,
3668 : * either from the target relation or a descendent partitioned table.
3669 : * 'resultRelations' is an integer list of actual RT indexes of target rel(s)
3670 : * 'updateColnosLists' is a list of UPDATE target column number lists
3671 : * (one sublist per rel); or NIL if not an UPDATE
3672 : * 'withCheckOptionLists' is a list of WCO lists (one per rel)
3673 : * 'returningLists' is a list of RETURNING tlists (one per rel)
3674 : * 'rowMarks' is a list of PlanRowMarks (non-locking only)
3675 : * 'onconflict' is the ON CONFLICT clause, or NULL
3676 : * 'epqParam' is the ID of Param for EvalPlanQual re-eval
3677 : * 'mergeActionLists' is a list of lists of MERGE actions (one per rel)
3678 : */
3679 : ModifyTablePath *
3680 74588 : create_modifytable_path(PlannerInfo *root, RelOptInfo *rel,
3681 : Path *subpath,
3682 : CmdType operation, bool canSetTag,
3683 : Index nominalRelation, Index rootRelation,
3684 : bool partColsUpdated,
3685 : List *resultRelations,
3686 : List *updateColnosLists,
3687 : List *withCheckOptionLists, List *returningLists,
3688 : List *rowMarks, OnConflictExpr *onconflict,
3689 : List *mergeActionLists, int epqParam)
3690 : {
3691 74588 : ModifyTablePath *pathnode = makeNode(ModifyTablePath);
3692 :
3693 : Assert(operation == CMD_MERGE ||
3694 : (operation == CMD_UPDATE ?
3695 : list_length(resultRelations) == list_length(updateColnosLists) :
3696 : updateColnosLists == NIL));
3697 : Assert(withCheckOptionLists == NIL ||
3698 : list_length(resultRelations) == list_length(withCheckOptionLists));
3699 : Assert(returningLists == NIL ||
3700 : list_length(resultRelations) == list_length(returningLists));
3701 :
3702 74588 : pathnode->path.pathtype = T_ModifyTable;
3703 74588 : pathnode->path.parent = rel;
3704 : /* pathtarget is not interesting, just make it minimally valid */
3705 74588 : pathnode->path.pathtarget = rel->reltarget;
3706 : /* For now, assume we are above any joins, so no parameterization */
3707 74588 : pathnode->path.param_info = NULL;
3708 74588 : pathnode->path.parallel_aware = false;
3709 74588 : pathnode->path.parallel_safe = false;
3710 74588 : pathnode->path.parallel_workers = 0;
3711 74588 : pathnode->path.pathkeys = NIL;
3712 :
3713 : /*
3714 : * Compute cost & rowcount as subpath cost & rowcount (if RETURNING)
3715 : *
3716 : * Currently, we don't charge anything extra for the actual table
3717 : * modification work, nor for the WITH CHECK OPTIONS or RETURNING
3718 : * expressions if any. It would only be window dressing, since
3719 : * ModifyTable is always a top-level node and there is no way for the
3720 : * costs to change any higher-level planning choices. But we might want
3721 : * to make it look better sometime.
3722 : */
3723 74588 : pathnode->path.startup_cost = subpath->startup_cost;
3724 74588 : pathnode->path.total_cost = subpath->total_cost;
3725 74588 : if (returningLists != NIL)
3726 : {
3727 2268 : pathnode->path.rows = subpath->rows;
3728 :
3729 : /*
3730 : * Set width to match the subpath output. XXX this is totally wrong:
3731 : * we should return an average of the RETURNING tlist widths. But
3732 : * it's what happened historically, and improving it is a task for
3733 : * another day. (Again, it's mostly window dressing.)
3734 : */
3735 2268 : pathnode->path.pathtarget->width = subpath->pathtarget->width;
3736 : }
3737 : else
3738 : {
3739 72320 : pathnode->path.rows = 0;
3740 72320 : pathnode->path.pathtarget->width = 0;
3741 : }
3742 :
3743 74588 : pathnode->subpath = subpath;
3744 74588 : pathnode->operation = operation;
3745 74588 : pathnode->canSetTag = canSetTag;
3746 74588 : pathnode->nominalRelation = nominalRelation;
3747 74588 : pathnode->rootRelation = rootRelation;
3748 74588 : pathnode->partColsUpdated = partColsUpdated;
3749 74588 : pathnode->resultRelations = resultRelations;
3750 74588 : pathnode->updateColnosLists = updateColnosLists;
3751 74588 : pathnode->withCheckOptionLists = withCheckOptionLists;
3752 74588 : pathnode->returningLists = returningLists;
3753 74588 : pathnode->rowMarks = rowMarks;
3754 74588 : pathnode->onconflict = onconflict;
3755 74588 : pathnode->epqParam = epqParam;
3756 74588 : pathnode->mergeActionLists = mergeActionLists;
3757 :
3758 74588 : return pathnode;
3759 : }
3760 :
3761 : /*
3762 : * create_limit_path
3763 : * Creates a pathnode that represents performing LIMIT/OFFSET
3764 : *
3765 : * In addition to providing the actual OFFSET and LIMIT expressions,
3766 : * the caller must provide estimates of their values for costing purposes.
3767 : * The estimates are as computed by preprocess_limit(), ie, 0 represents
3768 : * the clause not being present, and -1 means it's present but we could
3769 : * not estimate its value.
3770 : *
3771 : * 'rel' is the parent relation associated with the result
3772 : * 'subpath' is the path representing the source of data
3773 : * 'limitOffset' is the actual OFFSET expression, or NULL
3774 : * 'limitCount' is the actual LIMIT expression, or NULL
3775 : * 'offset_est' is the estimated value of the OFFSET expression
3776 : * 'count_est' is the estimated value of the LIMIT expression
3777 : */
3778 : LimitPath *
3779 5242 : create_limit_path(PlannerInfo *root, RelOptInfo *rel,
3780 : Path *subpath,
3781 : Node *limitOffset, Node *limitCount,
3782 : LimitOption limitOption,
3783 : int64 offset_est, int64 count_est)
3784 : {
3785 5242 : LimitPath *pathnode = makeNode(LimitPath);
3786 :
3787 5242 : pathnode->path.pathtype = T_Limit;
3788 5242 : pathnode->path.parent = rel;
3789 : /* Limit doesn't project, so use source path's pathtarget */
3790 5242 : pathnode->path.pathtarget = subpath->pathtarget;
3791 : /* For now, assume we are above any joins, so no parameterization */
3792 5242 : pathnode->path.param_info = NULL;
3793 5242 : pathnode->path.parallel_aware = false;
3794 7322 : pathnode->path.parallel_safe = rel->consider_parallel &&
3795 2080 : subpath->parallel_safe;
3796 5242 : pathnode->path.parallel_workers = subpath->parallel_workers;
3797 5242 : pathnode->path.rows = subpath->rows;
3798 5242 : pathnode->path.startup_cost = subpath->startup_cost;
3799 5242 : pathnode->path.total_cost = subpath->total_cost;
3800 5242 : pathnode->path.pathkeys = subpath->pathkeys;
3801 5242 : pathnode->subpath = subpath;
3802 5242 : pathnode->limitOffset = limitOffset;
3803 5242 : pathnode->limitCount = limitCount;
3804 5242 : pathnode->limitOption = limitOption;
3805 :
3806 : /*
3807 : * Adjust the output rows count and costs according to the offset/limit.
3808 : */
3809 5242 : adjust_limit_rows_costs(&pathnode->path.rows,
3810 : &pathnode->path.startup_cost,
3811 : &pathnode->path.total_cost,
3812 : offset_est, count_est);
3813 :
3814 5242 : return pathnode;
3815 : }
3816 :
3817 : /*
3818 : * adjust_limit_rows_costs
3819 : * Adjust the size and cost estimates for a LimitPath node according to the
3820 : * offset/limit.
3821 : *
3822 : * This is only a cosmetic issue if we are at top level, but if we are
3823 : * building a subquery then it's important to report correct info to the outer
3824 : * planner.
3825 : *
3826 : * When the offset or count couldn't be estimated, use 10% of the estimated
3827 : * number of rows emitted from the subpath.
3828 : *
3829 : * XXX we don't bother to add eval costs of the offset/limit expressions
3830 : * themselves to the path costs. In theory we should, but in most cases those
3831 : * expressions are trivial and it's just not worth the trouble.
3832 : */
3833 : void
3834 5424 : adjust_limit_rows_costs(double *rows, /* in/out parameter */
3835 : Cost *startup_cost, /* in/out parameter */
3836 : Cost *total_cost, /* in/out parameter */
3837 : int64 offset_est,
3838 : int64 count_est)
3839 : {
3840 5424 : double input_rows = *rows;
3841 5424 : Cost input_startup_cost = *startup_cost;
3842 5424 : Cost input_total_cost = *total_cost;
3843 :
3844 5424 : if (offset_est != 0)
3845 : {
3846 : double offset_rows;
3847 :
3848 666 : if (offset_est > 0)
3849 642 : offset_rows = (double) offset_est;
3850 : else
3851 24 : offset_rows = clamp_row_est(input_rows * 0.10);
3852 666 : if (offset_rows > *rows)
3853 34 : offset_rows = *rows;
3854 666 : if (input_rows > 0)
3855 666 : *startup_cost +=
3856 666 : (input_total_cost - input_startup_cost)
3857 666 : * offset_rows / input_rows;
3858 666 : *rows -= offset_rows;
3859 666 : if (*rows < 1)
3860 34 : *rows = 1;
3861 : }
3862 :
3863 5424 : if (count_est != 0)
3864 : {
3865 : double count_rows;
3866 :
3867 5376 : if (count_est > 0)
3868 5370 : count_rows = (double) count_est;
3869 : else
3870 6 : count_rows = clamp_row_est(input_rows * 0.10);
3871 5376 : if (count_rows > *rows)
3872 260 : count_rows = *rows;
3873 5376 : if (input_rows > 0)
3874 5376 : *total_cost = *startup_cost +
3875 5376 : (input_total_cost - input_startup_cost)
3876 5376 : * count_rows / input_rows;
3877 5376 : *rows = count_rows;
3878 5376 : if (*rows < 1)
3879 0 : *rows = 1;
3880 : }
3881 5424 : }
3882 :
3883 :
3884 : /*
3885 : * reparameterize_path
3886 : * Attempt to modify a Path to have greater parameterization
3887 : *
3888 : * We use this to attempt to bring all child paths of an appendrel to the
3889 : * same parameterization level, ensuring that they all enforce the same set
3890 : * of join quals (and thus that that parameterization can be attributed to
3891 : * an append path built from such paths). Currently, only a few path types
3892 : * are supported here, though more could be added at need. We return NULL
3893 : * if we can't reparameterize the given path.
3894 : *
3895 : * Note: we intentionally do not pass created paths to add_path(); it would
3896 : * possibly try to delete them on the grounds of being cost-inferior to the
3897 : * paths they were made from, and we don't want that. Paths made here are
3898 : * not necessarily of general-purpose usefulness, but they can be useful
3899 : * as members of an append path.
3900 : */
3901 : Path *
3902 308 : reparameterize_path(PlannerInfo *root, Path *path,
3903 : Relids required_outer,
3904 : double loop_count)
3905 : {
3906 308 : RelOptInfo *rel = path->parent;
3907 :
3908 : /* Can only increase, not decrease, path's parameterization */
3909 308 : if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
3910 0 : return NULL;
3911 308 : switch (path->pathtype)
3912 : {
3913 228 : case T_SeqScan:
3914 228 : return create_seqscan_path(root, rel, required_outer, 0);
3915 0 : case T_SampleScan:
3916 0 : return (Path *) create_samplescan_path(root, rel, required_outer);
3917 0 : case T_IndexScan:
3918 : case T_IndexOnlyScan:
3919 : {
3920 0 : IndexPath *ipath = (IndexPath *) path;
3921 0 : IndexPath *newpath = makeNode(IndexPath);
3922 :
3923 : /*
3924 : * We can't use create_index_path directly, and would not want
3925 : * to because it would re-compute the indexqual conditions
3926 : * which is wasted effort. Instead we hack things a bit:
3927 : * flat-copy the path node, revise its param_info, and redo
3928 : * the cost estimate.
3929 : */
3930 0 : memcpy(newpath, ipath, sizeof(IndexPath));
3931 0 : newpath->path.param_info =
3932 0 : get_baserel_parampathinfo(root, rel, required_outer);
3933 0 : cost_index(newpath, root, loop_count, false);
3934 0 : return (Path *) newpath;
3935 : }
3936 0 : case T_BitmapHeapScan:
3937 : {
3938 0 : BitmapHeapPath *bpath = (BitmapHeapPath *) path;
3939 :
3940 0 : return (Path *) create_bitmap_heap_path(root,
3941 : rel,
3942 : bpath->bitmapqual,
3943 : required_outer,
3944 : loop_count, 0);
3945 : }
3946 0 : case T_SubqueryScan:
3947 : {
3948 0 : SubqueryScanPath *spath = (SubqueryScanPath *) path;
3949 0 : Path *subpath = spath->subpath;
3950 : bool trivial_pathtarget;
3951 :
3952 : /*
3953 : * If existing node has zero extra cost, we must have decided
3954 : * its target is trivial. (The converse is not true, because
3955 : * it might have a trivial target but quals to enforce; but in
3956 : * that case the new node will too, so it doesn't matter
3957 : * whether we get the right answer here.)
3958 : */
3959 0 : trivial_pathtarget =
3960 0 : (subpath->total_cost == spath->path.total_cost);
3961 :
3962 0 : return (Path *) create_subqueryscan_path(root,
3963 : rel,
3964 : subpath,
3965 : trivial_pathtarget,
3966 : spath->path.pathkeys,
3967 : required_outer);
3968 : }
3969 48 : case T_Result:
3970 : /* Supported only for RTE_RESULT scan paths */
3971 48 : if (IsA(path, Path))
3972 48 : return create_resultscan_path(root, rel, required_outer);
3973 0 : break;
3974 0 : case T_Append:
3975 : {
3976 0 : AppendPath *apath = (AppendPath *) path;
3977 0 : List *childpaths = NIL;
3978 0 : List *partialpaths = NIL;
3979 : int i;
3980 : ListCell *lc;
3981 :
3982 : /* Reparameterize the children */
3983 0 : i = 0;
3984 0 : foreach(lc, apath->subpaths)
3985 : {
3986 0 : Path *spath = (Path *) lfirst(lc);
3987 :
3988 0 : spath = reparameterize_path(root, spath,
3989 : required_outer,
3990 : loop_count);
3991 0 : if (spath == NULL)
3992 0 : return NULL;
3993 : /* We have to re-split the regular and partial paths */
3994 0 : if (i < apath->first_partial_path)
3995 0 : childpaths = lappend(childpaths, spath);
3996 : else
3997 0 : partialpaths = lappend(partialpaths, spath);
3998 0 : i++;
3999 : }
4000 0 : return (Path *)
4001 0 : create_append_path(root, rel, childpaths, partialpaths,
4002 : apath->path.pathkeys, required_outer,
4003 : apath->path.parallel_workers,
4004 0 : apath->path.parallel_aware,
4005 : -1);
4006 : }
4007 0 : case T_Material:
4008 : {
4009 0 : MaterialPath *mpath = (MaterialPath *) path;
4010 0 : Path *spath = mpath->subpath;
4011 :
4012 0 : spath = reparameterize_path(root, spath,
4013 : required_outer,
4014 : loop_count);
4015 0 : if (spath == NULL)
4016 0 : return NULL;
4017 0 : return (Path *) create_material_path(rel, spath);
4018 : }
4019 0 : case T_Memoize:
4020 : {
4021 0 : MemoizePath *mpath = (MemoizePath *) path;
4022 0 : Path *spath = mpath->subpath;
4023 :
4024 0 : spath = reparameterize_path(root, spath,
4025 : required_outer,
4026 : loop_count);
4027 0 : if (spath == NULL)
4028 0 : return NULL;
4029 0 : return (Path *) create_memoize_path(root, rel,
4030 : spath,
4031 : mpath->param_exprs,
4032 : mpath->hash_operators,
4033 0 : mpath->singlerow,
4034 0 : mpath->binary_mode,
4035 : mpath->calls);
4036 : }
4037 32 : default:
4038 32 : break;
4039 : }
4040 32 : return NULL;
4041 : }
4042 :
4043 : /*
4044 : * reparameterize_path_by_child
4045 : * Given a path parameterized by the parent of the given child relation,
4046 : * translate the path to be parameterized by the given child relation.
4047 : *
4048 : * The function creates a new path of the same type as the given path, but
4049 : * parameterized by the given child relation. Most fields from the original
4050 : * path can simply be flat-copied, but any expressions must be adjusted to
4051 : * refer to the correct varnos, and any paths must be recursively
4052 : * reparameterized. Other fields that refer to specific relids also need
4053 : * adjustment.
4054 : *
4055 : * The cost, number of rows, width and parallel path properties depend upon
4056 : * path->parent, which does not change during the translation. Hence those
4057 : * members are copied as they are.
4058 : *
4059 : * Currently, only a few path types are supported here, though more could be
4060 : * added at need. We return NULL if we can't reparameterize the given path.
4061 : */
4062 : Path *
4063 8512 : reparameterize_path_by_child(PlannerInfo *root, Path *path,
4064 : RelOptInfo *child_rel)
4065 : {
4066 :
4067 : #define FLAT_COPY_PATH(newnode, node, nodetype) \
4068 : ( (newnode) = makeNode(nodetype), \
4069 : memcpy((newnode), (node), sizeof(nodetype)) )
4070 :
4071 : #define ADJUST_CHILD_ATTRS(node) \
4072 : ((node) = \
4073 : (List *) adjust_appendrel_attrs_multilevel(root, (Node *) (node), \
4074 : child_rel, \
4075 : child_rel->top_parent))
4076 :
4077 : #define REPARAMETERIZE_CHILD_PATH(path) \
4078 : do { \
4079 : (path) = reparameterize_path_by_child(root, (path), child_rel); \
4080 : if ((path) == NULL) \
4081 : return NULL; \
4082 : } while(0)
4083 :
4084 : #define REPARAMETERIZE_CHILD_PATH_LIST(pathlist) \
4085 : do { \
4086 : if ((pathlist) != NIL) \
4087 : { \
4088 : (pathlist) = reparameterize_pathlist_by_child(root, (pathlist), \
4089 : child_rel); \
4090 : if ((pathlist) == NIL) \
4091 : return NULL; \
4092 : } \
4093 : } while(0)
4094 :
4095 : Path *new_path;
4096 : ParamPathInfo *new_ppi;
4097 : ParamPathInfo *old_ppi;
4098 : Relids required_outer;
4099 :
4100 : /*
4101 : * If the path is not parameterized by parent of the given relation, it
4102 : * doesn't need reparameterization.
4103 : */
4104 8512 : if (!path->param_info ||
4105 8428 : !bms_overlap(PATH_REQ_OUTER(path), child_rel->top_parent_relids))
4106 240 : return path;
4107 :
4108 : /*
4109 : * If possible, reparameterize the given path, making a copy.
4110 : *
4111 : * This function is currently only applied to the inner side of a nestloop
4112 : * join that is being partitioned by the partitionwise-join code. Hence,
4113 : * we need only support path types that plausibly arise in that context.
4114 : * (In particular, supporting sorted path types would be a waste of code
4115 : * and cycles: even if we translated them here, they'd just lose in
4116 : * subsequent cost comparisons.) If we do see an unsupported path type,
4117 : * that just means we won't be able to generate a partitionwise-join plan
4118 : * using that path type.
4119 : */
4120 8272 : switch (nodeTag(path))
4121 : {
4122 432 : case T_Path:
4123 432 : FLAT_COPY_PATH(new_path, path, Path);
4124 432 : break;
4125 :
4126 5112 : case T_IndexPath:
4127 : {
4128 : IndexPath *ipath;
4129 :
4130 5112 : FLAT_COPY_PATH(ipath, path, IndexPath);
4131 5112 : ADJUST_CHILD_ATTRS(ipath->indexclauses);
4132 5112 : new_path = (Path *) ipath;
4133 : }
4134 5112 : break;
4135 :
4136 48 : case T_BitmapHeapPath:
4137 : {
4138 : BitmapHeapPath *bhpath;
4139 :
4140 48 : FLAT_COPY_PATH(bhpath, path, BitmapHeapPath);
4141 48 : REPARAMETERIZE_CHILD_PATH(bhpath->bitmapqual);
4142 48 : new_path = (Path *) bhpath;
4143 : }
4144 48 : break;
4145 :
4146 24 : case T_BitmapAndPath:
4147 : {
4148 : BitmapAndPath *bapath;
4149 :
4150 24 : FLAT_COPY_PATH(bapath, path, BitmapAndPath);
4151 24 : REPARAMETERIZE_CHILD_PATH_LIST(bapath->bitmapquals);
4152 24 : new_path = (Path *) bapath;
4153 : }
4154 24 : break;
4155 :
4156 24 : case T_BitmapOrPath:
4157 : {
4158 : BitmapOrPath *bopath;
4159 :
4160 24 : FLAT_COPY_PATH(bopath, path, BitmapOrPath);
4161 24 : REPARAMETERIZE_CHILD_PATH_LIST(bopath->bitmapquals);
4162 24 : new_path = (Path *) bopath;
4163 : }
4164 24 : break;
4165 :
4166 52 : case T_ForeignPath:
4167 : {
4168 : ForeignPath *fpath;
4169 : ReparameterizeForeignPathByChild_function rfpc_func;
4170 :
4171 52 : FLAT_COPY_PATH(fpath, path, ForeignPath);
4172 52 : if (fpath->fdw_outerpath)
4173 0 : REPARAMETERIZE_CHILD_PATH(fpath->fdw_outerpath);
4174 52 : if (fpath->fdw_restrictinfo)
4175 0 : ADJUST_CHILD_ATTRS(fpath->fdw_restrictinfo);
4176 :
4177 : /* Hand over to FDW if needed. */
4178 52 : rfpc_func =
4179 52 : path->parent->fdwroutine->ReparameterizeForeignPathByChild;
4180 52 : if (rfpc_func)
4181 0 : fpath->fdw_private = rfpc_func(root, fpath->fdw_private,
4182 : child_rel);
4183 52 : new_path = (Path *) fpath;
4184 : }
4185 52 : break;
4186 :
4187 0 : case T_CustomPath:
4188 : {
4189 : CustomPath *cpath;
4190 :
4191 0 : FLAT_COPY_PATH(cpath, path, CustomPath);
4192 0 : REPARAMETERIZE_CHILD_PATH_LIST(cpath->custom_paths);
4193 0 : if (cpath->custom_restrictinfo)
4194 0 : ADJUST_CHILD_ATTRS(cpath->custom_restrictinfo);
4195 0 : if (cpath->methods &&
4196 0 : cpath->methods->ReparameterizeCustomPathByChild)
4197 0 : cpath->custom_private =
4198 0 : cpath->methods->ReparameterizeCustomPathByChild(root,
4199 : cpath->custom_private,
4200 : child_rel);
4201 0 : new_path = (Path *) cpath;
4202 : }
4203 0 : break;
4204 :
4205 300 : case T_NestPath:
4206 : {
4207 : JoinPath *jpath;
4208 : NestPath *npath;
4209 :
4210 300 : FLAT_COPY_PATH(npath, path, NestPath);
4211 :
4212 300 : jpath = (JoinPath *) npath;
4213 300 : REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath);
4214 300 : REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath);
4215 300 : ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo);
4216 300 : new_path = (Path *) npath;
4217 : }
4218 300 : break;
4219 :
4220 36 : case T_MergePath:
4221 : {
4222 : JoinPath *jpath;
4223 : MergePath *mpath;
4224 :
4225 36 : FLAT_COPY_PATH(mpath, path, MergePath);
4226 :
4227 36 : jpath = (JoinPath *) mpath;
4228 36 : REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath);
4229 36 : REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath);
4230 36 : ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo);
4231 36 : ADJUST_CHILD_ATTRS(mpath->path_mergeclauses);
4232 36 : new_path = (Path *) mpath;
4233 : }
4234 36 : break;
4235 :
4236 168 : case T_HashPath:
4237 : {
4238 : JoinPath *jpath;
4239 : HashPath *hpath;
4240 :
4241 168 : FLAT_COPY_PATH(hpath, path, HashPath);
4242 :
4243 168 : jpath = (JoinPath *) hpath;
4244 168 : REPARAMETERIZE_CHILD_PATH(jpath->outerjoinpath);
4245 168 : REPARAMETERIZE_CHILD_PATH(jpath->innerjoinpath);
4246 168 : ADJUST_CHILD_ATTRS(jpath->joinrestrictinfo);
4247 168 : ADJUST_CHILD_ATTRS(hpath->path_hashclauses);
4248 168 : new_path = (Path *) hpath;
4249 : }
4250 168 : break;
4251 :
4252 120 : case T_AppendPath:
4253 : {
4254 : AppendPath *apath;
4255 :
4256 120 : FLAT_COPY_PATH(apath, path, AppendPath);
4257 120 : REPARAMETERIZE_CHILD_PATH_LIST(apath->subpaths);
4258 120 : new_path = (Path *) apath;
4259 : }
4260 120 : break;
4261 :
4262 0 : case T_MaterialPath:
4263 : {
4264 : MaterialPath *mpath;
4265 :
4266 0 : FLAT_COPY_PATH(mpath, path, MaterialPath);
4267 0 : REPARAMETERIZE_CHILD_PATH(mpath->subpath);
4268 0 : new_path = (Path *) mpath;
4269 : }
4270 0 : break;
4271 :
4272 1956 : case T_MemoizePath:
4273 : {
4274 : MemoizePath *mpath;
4275 :
4276 1956 : FLAT_COPY_PATH(mpath, path, MemoizePath);
4277 1956 : REPARAMETERIZE_CHILD_PATH(mpath->subpath);
4278 1956 : ADJUST_CHILD_ATTRS(mpath->param_exprs);
4279 1956 : new_path = (Path *) mpath;
4280 : }
4281 1956 : break;
4282 :
4283 0 : case T_GatherPath:
4284 : {
4285 : GatherPath *gpath;
4286 :
4287 0 : FLAT_COPY_PATH(gpath, path, GatherPath);
4288 0 : REPARAMETERIZE_CHILD_PATH(gpath->subpath);
4289 0 : new_path = (Path *) gpath;
4290 : }
4291 0 : break;
4292 :
4293 0 : default:
4294 :
4295 : /* We don't know how to reparameterize this path. */
4296 0 : return NULL;
4297 : }
4298 :
4299 : /*
4300 : * Adjust the parameterization information, which refers to the topmost
4301 : * parent. The topmost parent can be multiple levels away from the given
4302 : * child, hence use multi-level expression adjustment routines.
4303 : */
4304 8272 : old_ppi = new_path->param_info;
4305 : required_outer =
4306 8272 : adjust_child_relids_multilevel(root, old_ppi->ppi_req_outer,
4307 : child_rel,
4308 8272 : child_rel->top_parent);
4309 :
4310 : /* If we already have a PPI for this parameterization, just return it */
4311 8272 : new_ppi = find_param_path_info(new_path->parent, required_outer);
4312 :
4313 : /*
4314 : * If not, build a new one and link it to the list of PPIs. For the same
4315 : * reason as explained in mark_dummy_rel(), allocate new PPI in the same
4316 : * context the given RelOptInfo is in.
4317 : */
4318 8272 : if (new_ppi == NULL)
4319 : {
4320 : MemoryContext oldcontext;
4321 2046 : RelOptInfo *rel = path->parent;
4322 :
4323 2046 : oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
4324 :
4325 2046 : new_ppi = makeNode(ParamPathInfo);
4326 2046 : new_ppi->ppi_req_outer = bms_copy(required_outer);
4327 2046 : new_ppi->ppi_rows = old_ppi->ppi_rows;
4328 2046 : new_ppi->ppi_clauses = old_ppi->ppi_clauses;
4329 2046 : ADJUST_CHILD_ATTRS(new_ppi->ppi_clauses);
4330 2046 : new_ppi->ppi_serials = bms_copy(old_ppi->ppi_serials);
4331 2046 : rel->ppilist = lappend(rel->ppilist, new_ppi);
4332 :
4333 2046 : MemoryContextSwitchTo(oldcontext);
4334 : }
4335 8272 : bms_free(required_outer);
4336 :
4337 8272 : new_path->param_info = new_ppi;
4338 :
4339 : /*
4340 : * Adjust the path target if the parent of the outer relation is
4341 : * referenced in the targetlist. This can happen when only the parent of
4342 : * outer relation is laterally referenced in this relation.
4343 : */
4344 8272 : if (bms_overlap(path->parent->lateral_relids,
4345 8272 : child_rel->top_parent_relids))
4346 : {
4347 1152 : new_path->pathtarget = copy_pathtarget(new_path->pathtarget);
4348 1152 : ADJUST_CHILD_ATTRS(new_path->pathtarget->exprs);
4349 : }
4350 :
4351 8272 : return new_path;
4352 : }
4353 :
4354 : /*
4355 : * reparameterize_pathlist_by_child
4356 : * Helper function to reparameterize a list of paths by given child rel.
4357 : */
4358 : static List *
4359 168 : reparameterize_pathlist_by_child(PlannerInfo *root,
4360 : List *pathlist,
4361 : RelOptInfo *child_rel)
4362 : {
4363 : ListCell *lc;
4364 168 : List *result = NIL;
4365 :
4366 504 : foreach(lc, pathlist)
4367 : {
4368 336 : Path *path = reparameterize_path_by_child(root, lfirst(lc),
4369 : child_rel);
4370 :
4371 336 : if (path == NULL)
4372 : {
4373 0 : list_free(result);
4374 0 : return NIL;
4375 : }
4376 :
4377 336 : result = lappend(result, path);
4378 : }
4379 :
4380 168 : return result;
4381 : }
|