Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * costsize.c
4 : * Routines to compute (and set) relation sizes and path costs
5 : *
6 : * Path costs are measured in arbitrary units established by these basic
7 : * parameters:
8 : *
9 : * seq_page_cost Cost of a sequential page fetch
10 : * random_page_cost Cost of a non-sequential page fetch
11 : * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 : * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 : * cpu_operator_cost Cost of CPU time to execute an operator or function
14 : * parallel_tuple_cost Cost of CPU time to pass a tuple from worker to leader backend
15 : * parallel_setup_cost Cost of setting up shared memory for parallelism
16 : *
17 : * We expect that the kernel will typically do some amount of read-ahead
18 : * optimization; this in conjunction with seek costs means that seq_page_cost
19 : * is normally considerably less than random_page_cost. (However, if the
20 : * database is fully cached in RAM, it is reasonable to set them equal.)
21 : *
22 : * We also use a rough estimate "effective_cache_size" of the number of
23 : * disk pages in Postgres + OS-level disk cache. (We can't simply use
24 : * NBuffers for this purpose because that would ignore the effects of
25 : * the kernel's disk cache.)
26 : *
27 : * Obviously, taking constants for these values is an oversimplification,
28 : * but it's tough enough to get any useful estimates even at this level of
29 : * detail. Note that all of these parameters are user-settable, in case
30 : * the default values are drastically off for a particular platform.
31 : *
32 : * seq_page_cost and random_page_cost can also be overridden for an individual
33 : * tablespace, in case some data is on a fast disk and other data is on a slow
34 : * disk. Per-tablespace overrides never apply to temporary work files such as
35 : * an external sort or a materialize node that overflows work_mem.
36 : *
37 : * We compute two separate costs for each path:
38 : * total_cost: total estimated cost to fetch all tuples
39 : * startup_cost: cost that is expended before first tuple is fetched
40 : * In some scenarios, such as when there is a LIMIT or we are implementing
41 : * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
42 : * path's result. A caller can estimate the cost of fetching a partial
43 : * result by interpolating between startup_cost and total_cost. In detail:
44 : * actual_cost = startup_cost +
45 : * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
46 : * Note that a base relation's rows count (and, by extension, plan_rows for
47 : * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
48 : * that this equation works properly. (Note: while path->rows is never zero
49 : * for ordinary relations, it is zero for paths for provably-empty relations,
50 : * so beware of division-by-zero.) The LIMIT is applied as a top-level
51 : * plan node.
52 : *
53 : * For largely historical reasons, most of the routines in this module use
54 : * the passed result Path only to store their results (rows, startup_cost and
55 : * total_cost) into. All the input data they need is passed as separate
56 : * parameters, even though much of it could be extracted from the Path.
57 : * An exception is made for the cost_XXXjoin() routines, which expect all
58 : * the other fields of the passed XXXPath to be filled in, and similarly
59 : * cost_index() assumes the passed IndexPath is valid except for its output
60 : * values.
61 : *
62 : *
63 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
64 : * Portions Copyright (c) 1994, Regents of the University of California
65 : *
66 : * IDENTIFICATION
67 : * src/backend/optimizer/path/costsize.c
68 : *
69 : *-------------------------------------------------------------------------
70 : */
71 :
72 : #include "postgres.h"
73 :
74 : #include <limits.h>
75 : #include <math.h>
76 :
77 : #include "access/amapi.h"
78 : #include "access/htup_details.h"
79 : #include "access/tsmapi.h"
80 : #include "executor/executor.h"
81 : #include "executor/nodeAgg.h"
82 : #include "executor/nodeHash.h"
83 : #include "executor/nodeMemoize.h"
84 : #include "miscadmin.h"
85 : #include "nodes/makefuncs.h"
86 : #include "nodes/nodeFuncs.h"
87 : #include "optimizer/clauses.h"
88 : #include "optimizer/cost.h"
89 : #include "optimizer/optimizer.h"
90 : #include "optimizer/pathnode.h"
91 : #include "optimizer/paths.h"
92 : #include "optimizer/placeholder.h"
93 : #include "optimizer/plancat.h"
94 : #include "optimizer/planmain.h"
95 : #include "optimizer/restrictinfo.h"
96 : #include "parser/parsetree.h"
97 : #include "utils/lsyscache.h"
98 : #include "utils/selfuncs.h"
99 : #include "utils/spccache.h"
100 : #include "utils/tuplesort.h"
101 :
102 :
103 : #define LOG2(x) (log(x) / 0.693147180559945)
104 :
105 : /*
106 : * Append and MergeAppend nodes are less expensive than some other operations
107 : * which use cpu_tuple_cost; instead of adding a separate GUC, estimate the
108 : * per-tuple cost as cpu_tuple_cost multiplied by this value.
109 : */
110 : #define APPEND_CPU_COST_MULTIPLIER 0.5
111 :
112 : /*
113 : * Maximum value for row estimates. We cap row estimates to this to help
114 : * ensure that costs based on these estimates remain within the range of what
115 : * double can represent. add_path() wouldn't act sanely given infinite or NaN
116 : * cost values.
117 : */
118 : #define MAXIMUM_ROWCOUNT 1e100
119 :
120 : double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
121 : double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
122 : double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
123 : double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
124 : double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
125 : double parallel_tuple_cost = DEFAULT_PARALLEL_TUPLE_COST;
126 : double parallel_setup_cost = DEFAULT_PARALLEL_SETUP_COST;
127 : double recursive_worktable_factor = DEFAULT_RECURSIVE_WORKTABLE_FACTOR;
128 :
129 : int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
130 :
131 : Cost disable_cost = 1.0e10;
132 :
133 : int max_parallel_workers_per_gather = 2;
134 :
135 : bool enable_seqscan = true;
136 : bool enable_indexscan = true;
137 : bool enable_indexonlyscan = true;
138 : bool enable_bitmapscan = true;
139 : bool enable_tidscan = true;
140 : bool enable_sort = true;
141 : bool enable_incremental_sort = true;
142 : bool enable_hashagg = true;
143 : bool enable_nestloop = true;
144 : bool enable_material = true;
145 : bool enable_memoize = true;
146 : bool enable_mergejoin = true;
147 : bool enable_hashjoin = true;
148 : bool enable_gathermerge = true;
149 : bool enable_partitionwise_join = false;
150 : bool enable_partitionwise_aggregate = false;
151 : bool enable_parallel_append = true;
152 : bool enable_parallel_hash = true;
153 : bool enable_partition_pruning = true;
154 : bool enable_presorted_aggregate = true;
155 : bool enable_async_append = true;
156 :
157 : typedef struct
158 : {
159 : PlannerInfo *root;
160 : QualCost total;
161 : } cost_qual_eval_context;
162 :
163 : static List *extract_nonindex_conditions(List *qual_clauses, List *indexclauses);
164 : static MergeScanSelCache *cached_scansel(PlannerInfo *root,
165 : RestrictInfo *rinfo,
166 : PathKey *pathkey);
167 : static void cost_rescan(PlannerInfo *root, Path *path,
168 : Cost *rescan_startup_cost, Cost *rescan_total_cost);
169 : static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
170 : static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
171 : ParamPathInfo *param_info,
172 : QualCost *qpqual_cost);
173 : static bool has_indexed_join_quals(NestPath *path);
174 : static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
175 : List *quals);
176 : static double calc_joinrel_size_estimate(PlannerInfo *root,
177 : RelOptInfo *joinrel,
178 : RelOptInfo *outer_rel,
179 : RelOptInfo *inner_rel,
180 : double outer_rows,
181 : double inner_rows,
182 : SpecialJoinInfo *sjinfo,
183 : List *restrictlist);
184 : static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root,
185 : Relids outer_relids,
186 : Relids inner_relids,
187 : SpecialJoinInfo *sjinfo,
188 : List **restrictlist);
189 : static Cost append_nonpartial_cost(List *subpaths, int numpaths,
190 : int parallel_workers);
191 : static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
192 : static int32 get_expr_width(PlannerInfo *root, const Node *expr);
193 : static double relation_byte_size(double tuples, int width);
194 : static double page_size(double tuples, int width);
195 : static double get_parallel_divisor(Path *path);
196 :
197 :
198 : /*
199 : * clamp_row_est
200 : * Force a row-count estimate to a sane value.
201 : */
202 : double
203 6265402 : clamp_row_est(double nrows)
204 : {
205 : /*
206 : * Avoid infinite and NaN row estimates. Costs derived from such values
207 : * are going to be useless. Also force the estimate to be at least one
208 : * row, to make explain output look better and to avoid possible
209 : * divide-by-zero when interpolating costs. Make it an integer, too.
210 : */
211 6265402 : if (nrows > MAXIMUM_ROWCOUNT || isnan(nrows))
212 0 : nrows = MAXIMUM_ROWCOUNT;
213 6265402 : else if (nrows <= 1.0)
214 2387448 : nrows = 1.0;
215 : else
216 3877954 : nrows = rint(nrows);
217 :
218 6265402 : return nrows;
219 : }
220 :
221 : /*
222 : * clamp_cardinality_to_long
223 : * Cast a Cardinality value to a sane long value.
224 : */
225 : long
226 40788 : clamp_cardinality_to_long(Cardinality x)
227 : {
228 : /*
229 : * Just for paranoia's sake, ensure we do something sane with negative or
230 : * NaN values.
231 : */
232 40788 : if (isnan(x))
233 0 : return LONG_MAX;
234 40788 : if (x <= 0)
235 480 : return 0;
236 :
237 : /*
238 : * If "long" is 64 bits, then LONG_MAX cannot be represented exactly as a
239 : * double. Casting it to double and back may well result in overflow due
240 : * to rounding, so avoid doing that. We trust that any double value that
241 : * compares strictly less than "(double) LONG_MAX" will cast to a
242 : * representable "long" value.
243 : */
244 40308 : return (x < (double) LONG_MAX) ? (long) x : LONG_MAX;
245 : }
246 :
247 :
248 : /*
249 : * cost_seqscan
250 : * Determines and returns the cost of scanning a relation sequentially.
251 : *
252 : * 'baserel' is the relation to be scanned
253 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
254 : */
255 : void
256 352704 : cost_seqscan(Path *path, PlannerInfo *root,
257 : RelOptInfo *baserel, ParamPathInfo *param_info)
258 : {
259 352704 : Cost startup_cost = 0;
260 : Cost cpu_run_cost;
261 : Cost disk_run_cost;
262 : double spc_seq_page_cost;
263 : QualCost qpqual_cost;
264 : Cost cpu_per_tuple;
265 :
266 : /* Should only be applied to base relations */
267 : Assert(baserel->relid > 0);
268 : Assert(baserel->rtekind == RTE_RELATION);
269 :
270 : /* Mark the path with the correct row estimate */
271 352704 : if (param_info)
272 606 : path->rows = param_info->ppi_rows;
273 : else
274 352098 : path->rows = baserel->rows;
275 :
276 352704 : if (!enable_seqscan)
277 15174 : startup_cost += disable_cost;
278 :
279 : /* fetch estimated page cost for tablespace containing table */
280 352704 : get_tablespace_page_costs(baserel->reltablespace,
281 : NULL,
282 : &spc_seq_page_cost);
283 :
284 : /*
285 : * disk costs
286 : */
287 352704 : disk_run_cost = spc_seq_page_cost * baserel->pages;
288 :
289 : /* CPU costs */
290 352704 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
291 :
292 352704 : startup_cost += qpqual_cost.startup;
293 352704 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
294 352704 : cpu_run_cost = cpu_per_tuple * baserel->tuples;
295 : /* tlist eval costs are paid per output row, not per tuple scanned */
296 352704 : startup_cost += path->pathtarget->cost.startup;
297 352704 : cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
298 :
299 : /* Adjust costing for parallelism, if used. */
300 352704 : if (path->parallel_workers > 0)
301 : {
302 24698 : double parallel_divisor = get_parallel_divisor(path);
303 :
304 : /* The CPU cost is divided among all the workers. */
305 24698 : cpu_run_cost /= parallel_divisor;
306 :
307 : /*
308 : * It may be possible to amortize some of the I/O cost, but probably
309 : * not very much, because most operating systems already do aggressive
310 : * prefetching. For now, we assume that the disk run cost can't be
311 : * amortized at all.
312 : */
313 :
314 : /*
315 : * In the case of a parallel plan, the row count needs to represent
316 : * the number of tuples processed per worker.
317 : */
318 24698 : path->rows = clamp_row_est(path->rows / parallel_divisor);
319 : }
320 :
321 352704 : path->startup_cost = startup_cost;
322 352704 : path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
323 352704 : }
324 :
325 : /*
326 : * cost_samplescan
327 : * Determines and returns the cost of scanning a relation using sampling.
328 : *
329 : * 'baserel' is the relation to be scanned
330 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
331 : */
332 : void
333 252 : cost_samplescan(Path *path, PlannerInfo *root,
334 : RelOptInfo *baserel, ParamPathInfo *param_info)
335 : {
336 252 : Cost startup_cost = 0;
337 252 : Cost run_cost = 0;
338 : RangeTblEntry *rte;
339 : TableSampleClause *tsc;
340 : TsmRoutine *tsm;
341 : double spc_seq_page_cost,
342 : spc_random_page_cost,
343 : spc_page_cost;
344 : QualCost qpqual_cost;
345 : Cost cpu_per_tuple;
346 :
347 : /* Should only be applied to base relations with tablesample clauses */
348 : Assert(baserel->relid > 0);
349 252 : rte = planner_rt_fetch(baserel->relid, root);
350 : Assert(rte->rtekind == RTE_RELATION);
351 252 : tsc = rte->tablesample;
352 : Assert(tsc != NULL);
353 252 : tsm = GetTsmRoutine(tsc->tsmhandler);
354 :
355 : /* Mark the path with the correct row estimate */
356 252 : if (param_info)
357 18 : path->rows = param_info->ppi_rows;
358 : else
359 234 : path->rows = baserel->rows;
360 :
361 : /* fetch estimated page cost for tablespace containing table */
362 252 : get_tablespace_page_costs(baserel->reltablespace,
363 : &spc_random_page_cost,
364 : &spc_seq_page_cost);
365 :
366 : /* if NextSampleBlock is used, assume random access, else sequential */
367 504 : spc_page_cost = (tsm->NextSampleBlock != NULL) ?
368 252 : spc_random_page_cost : spc_seq_page_cost;
369 :
370 : /*
371 : * disk costs (recall that baserel->pages has already been set to the
372 : * number of pages the sampling method will visit)
373 : */
374 252 : run_cost += spc_page_cost * baserel->pages;
375 :
376 : /*
377 : * CPU costs (recall that baserel->tuples has already been set to the
378 : * number of tuples the sampling method will select). Note that we ignore
379 : * execution cost of the TABLESAMPLE parameter expressions; they will be
380 : * evaluated only once per scan, and in most usages they'll likely be
381 : * simple constants anyway. We also don't charge anything for the
382 : * calculations the sampling method might do internally.
383 : */
384 252 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
385 :
386 252 : startup_cost += qpqual_cost.startup;
387 252 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
388 252 : run_cost += cpu_per_tuple * baserel->tuples;
389 : /* tlist eval costs are paid per output row, not per tuple scanned */
390 252 : startup_cost += path->pathtarget->cost.startup;
391 252 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
392 :
393 252 : path->startup_cost = startup_cost;
394 252 : path->total_cost = startup_cost + run_cost;
395 252 : }
396 :
397 : /*
398 : * cost_gather
399 : * Determines and returns the cost of gather path.
400 : *
401 : * 'rel' is the relation to be operated upon
402 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
403 : * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
404 : * both 'rel' and 'param_info'. This is useful when the path doesn't exactly
405 : * correspond to any particular RelOptInfo.
406 : */
407 : void
408 15354 : cost_gather(GatherPath *path, PlannerInfo *root,
409 : RelOptInfo *rel, ParamPathInfo *param_info,
410 : double *rows)
411 : {
412 15354 : Cost startup_cost = 0;
413 15354 : Cost run_cost = 0;
414 :
415 : /* Mark the path with the correct row estimate */
416 15354 : if (rows)
417 1672 : path->path.rows = *rows;
418 13682 : else if (param_info)
419 0 : path->path.rows = param_info->ppi_rows;
420 : else
421 13682 : path->path.rows = rel->rows;
422 :
423 15354 : startup_cost = path->subpath->startup_cost;
424 :
425 15354 : run_cost = path->subpath->total_cost - path->subpath->startup_cost;
426 :
427 : /* Parallel setup and communication cost. */
428 15354 : startup_cost += parallel_setup_cost;
429 15354 : run_cost += parallel_tuple_cost * path->path.rows;
430 :
431 15354 : path->path.startup_cost = startup_cost;
432 15354 : path->path.total_cost = (startup_cost + run_cost);
433 15354 : }
434 :
435 : /*
436 : * cost_gather_merge
437 : * Determines and returns the cost of gather merge path.
438 : *
439 : * GatherMerge merges several pre-sorted input streams, using a heap that at
440 : * any given instant holds the next tuple from each stream. If there are N
441 : * streams, we need about N*log2(N) tuple comparisons to construct the heap at
442 : * startup, and then for each output tuple, about log2(N) comparisons to
443 : * replace the top heap entry with the next tuple from the same stream.
444 : */
445 : void
446 9446 : cost_gather_merge(GatherMergePath *path, PlannerInfo *root,
447 : RelOptInfo *rel, ParamPathInfo *param_info,
448 : Cost input_startup_cost, Cost input_total_cost,
449 : double *rows)
450 : {
451 9446 : Cost startup_cost = 0;
452 9446 : Cost run_cost = 0;
453 : Cost comparison_cost;
454 : double N;
455 : double logN;
456 :
457 : /* Mark the path with the correct row estimate */
458 9446 : if (rows)
459 4330 : path->path.rows = *rows;
460 5116 : else if (param_info)
461 0 : path->path.rows = param_info->ppi_rows;
462 : else
463 5116 : path->path.rows = rel->rows;
464 :
465 9446 : if (!enable_gathermerge)
466 0 : startup_cost += disable_cost;
467 :
468 : /*
469 : * Add one to the number of workers to account for the leader. This might
470 : * be overgenerous since the leader will do less work than other workers
471 : * in typical cases, but we'll go with it for now.
472 : */
473 : Assert(path->num_workers > 0);
474 9446 : N = (double) path->num_workers + 1;
475 9446 : logN = LOG2(N);
476 :
477 : /* Assumed cost per tuple comparison */
478 9446 : comparison_cost = 2.0 * cpu_operator_cost;
479 :
480 : /* Heap creation cost */
481 9446 : startup_cost += comparison_cost * N * logN;
482 :
483 : /* Per-tuple heap maintenance cost */
484 9446 : run_cost += path->path.rows * comparison_cost * logN;
485 :
486 : /* small cost for heap management, like cost_merge_append */
487 9446 : run_cost += cpu_operator_cost * path->path.rows;
488 :
489 : /*
490 : * Parallel setup and communication cost. Since Gather Merge, unlike
491 : * Gather, requires us to block until a tuple is available from every
492 : * worker, we bump the IPC cost up a little bit as compared with Gather.
493 : * For lack of a better idea, charge an extra 5%.
494 : */
495 9446 : startup_cost += parallel_setup_cost;
496 9446 : run_cost += parallel_tuple_cost * path->path.rows * 1.05;
497 :
498 9446 : path->path.startup_cost = startup_cost + input_startup_cost;
499 9446 : path->path.total_cost = (startup_cost + run_cost + input_total_cost);
500 9446 : }
501 :
502 : /*
503 : * cost_index
504 : * Determines and returns the cost of scanning a relation using an index.
505 : *
506 : * 'path' describes the indexscan under consideration, and is complete
507 : * except for the fields to be set by this routine
508 : * 'loop_count' is the number of repetitions of the indexscan to factor into
509 : * estimates of caching behavior
510 : *
511 : * In addition to rows, startup_cost and total_cost, cost_index() sets the
512 : * path's indextotalcost and indexselectivity fields. These values will be
513 : * needed if the IndexPath is used in a BitmapIndexScan.
514 : *
515 : * NOTE: path->indexquals must contain only clauses usable as index
516 : * restrictions. Any additional quals evaluated as qpquals may reduce the
517 : * number of returned tuples, but they won't reduce the number of tuples
518 : * we have to fetch from the table, so they don't reduce the scan cost.
519 : */
520 : void
521 592926 : cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
522 : bool partial_path)
523 : {
524 592926 : IndexOptInfo *index = path->indexinfo;
525 592926 : RelOptInfo *baserel = index->rel;
526 592926 : bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
527 : amcostestimate_function amcostestimate;
528 : List *qpquals;
529 592926 : Cost startup_cost = 0;
530 592926 : Cost run_cost = 0;
531 592926 : Cost cpu_run_cost = 0;
532 : Cost indexStartupCost;
533 : Cost indexTotalCost;
534 : Selectivity indexSelectivity;
535 : double indexCorrelation,
536 : csquared;
537 : double spc_seq_page_cost,
538 : spc_random_page_cost;
539 : Cost min_IO_cost,
540 : max_IO_cost;
541 : QualCost qpqual_cost;
542 : Cost cpu_per_tuple;
543 : double tuples_fetched;
544 : double pages_fetched;
545 : double rand_heap_pages;
546 : double index_pages;
547 :
548 : /* Should only be applied to base relations */
549 : Assert(IsA(baserel, RelOptInfo) &&
550 : IsA(index, IndexOptInfo));
551 : Assert(baserel->relid > 0);
552 : Assert(baserel->rtekind == RTE_RELATION);
553 :
554 : /*
555 : * Mark the path with the correct row estimate, and identify which quals
556 : * will need to be enforced as qpquals. We need not check any quals that
557 : * are implied by the index's predicate, so we can use indrestrictinfo not
558 : * baserestrictinfo as the list of relevant restriction clauses for the
559 : * rel.
560 : */
561 592926 : if (path->path.param_info)
562 : {
563 108680 : path->path.rows = path->path.param_info->ppi_rows;
564 : /* qpquals come from the rel's restriction clauses and ppi_clauses */
565 108680 : qpquals = list_concat(extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
566 : path->indexclauses),
567 108680 : extract_nonindex_conditions(path->path.param_info->ppi_clauses,
568 : path->indexclauses));
569 : }
570 : else
571 : {
572 484246 : path->path.rows = baserel->rows;
573 : /* qpquals come from just the rel's restriction clauses */
574 484246 : qpquals = extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
575 : path->indexclauses);
576 : }
577 :
578 592926 : if (!enable_indexscan)
579 3872 : startup_cost += disable_cost;
580 : /* we don't need to check enable_indexonlyscan; indxpath.c does that */
581 :
582 : /*
583 : * Call index-access-method-specific code to estimate the processing cost
584 : * for scanning the index, as well as the selectivity of the index (ie,
585 : * the fraction of main-table tuples we will have to retrieve) and its
586 : * correlation to the main-table tuple order. We need a cast here because
587 : * pathnodes.h uses a weak function type to avoid including amapi.h.
588 : */
589 592926 : amcostestimate = (amcostestimate_function) index->amcostestimate;
590 592926 : amcostestimate(root, path, loop_count,
591 : &indexStartupCost, &indexTotalCost,
592 : &indexSelectivity, &indexCorrelation,
593 : &index_pages);
594 :
595 : /*
596 : * Save amcostestimate's results for possible use in bitmap scan planning.
597 : * We don't bother to save indexStartupCost or indexCorrelation, because a
598 : * bitmap scan doesn't care about either.
599 : */
600 592926 : path->indextotalcost = indexTotalCost;
601 592926 : path->indexselectivity = indexSelectivity;
602 :
603 : /* all costs for touching index itself included here */
604 592926 : startup_cost += indexStartupCost;
605 592926 : run_cost += indexTotalCost - indexStartupCost;
606 :
607 : /* estimate number of main-table tuples fetched */
608 592926 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
609 :
610 : /* fetch estimated page costs for tablespace containing table */
611 592926 : get_tablespace_page_costs(baserel->reltablespace,
612 : &spc_random_page_cost,
613 : &spc_seq_page_cost);
614 :
615 : /*----------
616 : * Estimate number of main-table pages fetched, and compute I/O cost.
617 : *
618 : * When the index ordering is uncorrelated with the table ordering,
619 : * we use an approximation proposed by Mackert and Lohman (see
620 : * index_pages_fetched() for details) to compute the number of pages
621 : * fetched, and then charge spc_random_page_cost per page fetched.
622 : *
623 : * When the index ordering is exactly correlated with the table ordering
624 : * (just after a CLUSTER, for example), the number of pages fetched should
625 : * be exactly selectivity * table_size. What's more, all but the first
626 : * will be sequential fetches, not the random fetches that occur in the
627 : * uncorrelated case. So if the number of pages is more than 1, we
628 : * ought to charge
629 : * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
630 : * For partially-correlated indexes, we ought to charge somewhere between
631 : * these two estimates. We currently interpolate linearly between the
632 : * estimates based on the correlation squared (XXX is that appropriate?).
633 : *
634 : * If it's an index-only scan, then we will not need to fetch any heap
635 : * pages for which the visibility map shows all tuples are visible.
636 : * Hence, reduce the estimated number of heap fetches accordingly.
637 : * We use the measured fraction of the entire heap that is all-visible,
638 : * which might not be particularly relevant to the subset of the heap
639 : * that this query will fetch; but it's not clear how to do better.
640 : *----------
641 : */
642 592926 : if (loop_count > 1)
643 : {
644 : /*
645 : * For repeated indexscans, the appropriate estimate for the
646 : * uncorrelated case is to scale up the number of tuples fetched in
647 : * the Mackert and Lohman formula by the number of scans, so that we
648 : * estimate the number of pages fetched by all the scans; then
649 : * pro-rate the costs for one scan. In this case we assume all the
650 : * fetches are random accesses.
651 : */
652 59534 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
653 : baserel->pages,
654 59534 : (double) index->pages,
655 : root);
656 :
657 59534 : if (indexonly)
658 7730 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
659 :
660 59534 : rand_heap_pages = pages_fetched;
661 :
662 59534 : max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
663 :
664 : /*
665 : * In the perfectly correlated case, the number of pages touched by
666 : * each scan is selectivity * table_size, and we can use the Mackert
667 : * and Lohman formula at the page level to estimate how much work is
668 : * saved by caching across scans. We still assume all the fetches are
669 : * random, though, which is an overestimate that's hard to correct for
670 : * without double-counting the cache effects. (But in most cases
671 : * where such a plan is actually interesting, only one page would get
672 : * fetched per scan anyway, so it shouldn't matter much.)
673 : */
674 59534 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
675 :
676 59534 : pages_fetched = index_pages_fetched(pages_fetched * loop_count,
677 : baserel->pages,
678 59534 : (double) index->pages,
679 : root);
680 :
681 59534 : if (indexonly)
682 7730 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
683 :
684 59534 : min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
685 : }
686 : else
687 : {
688 : /*
689 : * Normal case: apply the Mackert and Lohman formula, and then
690 : * interpolate between that and the correlation-derived result.
691 : */
692 533392 : pages_fetched = index_pages_fetched(tuples_fetched,
693 : baserel->pages,
694 533392 : (double) index->pages,
695 : root);
696 :
697 533392 : if (indexonly)
698 54406 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
699 :
700 533392 : rand_heap_pages = pages_fetched;
701 :
702 : /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
703 533392 : max_IO_cost = pages_fetched * spc_random_page_cost;
704 :
705 : /* min_IO_cost is for the perfectly correlated case (csquared=1) */
706 533392 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
707 :
708 533392 : if (indexonly)
709 54406 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
710 :
711 533392 : if (pages_fetched > 0)
712 : {
713 487402 : min_IO_cost = spc_random_page_cost;
714 487402 : if (pages_fetched > 1)
715 132572 : min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
716 : }
717 : else
718 45990 : min_IO_cost = 0;
719 : }
720 :
721 592926 : if (partial_path)
722 : {
723 : /*
724 : * For index only scans compute workers based on number of index pages
725 : * fetched; the number of heap pages we fetch might be so small as to
726 : * effectively rule out parallelism, which we don't want to do.
727 : */
728 202044 : if (indexonly)
729 19580 : rand_heap_pages = -1;
730 :
731 : /*
732 : * Estimate the number of parallel workers required to scan index. Use
733 : * the number of heap pages computed considering heap fetches won't be
734 : * sequential as for parallel scans the pages are accessed in random
735 : * order.
736 : */
737 202044 : path->path.parallel_workers = compute_parallel_worker(baserel,
738 : rand_heap_pages,
739 : index_pages,
740 : max_parallel_workers_per_gather);
741 :
742 : /*
743 : * Fall out if workers can't be assigned for parallel scan, because in
744 : * such a case this path will be rejected. So there is no benefit in
745 : * doing extra computation.
746 : */
747 202044 : if (path->path.parallel_workers <= 0)
748 192444 : return;
749 :
750 9600 : path->path.parallel_aware = true;
751 : }
752 :
753 : /*
754 : * Now interpolate based on estimated index order correlation to get total
755 : * disk I/O cost for main table accesses.
756 : */
757 400482 : csquared = indexCorrelation * indexCorrelation;
758 :
759 400482 : run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
760 :
761 : /*
762 : * Estimate CPU costs per tuple.
763 : *
764 : * What we want here is cpu_tuple_cost plus the evaluation costs of any
765 : * qual clauses that we have to evaluate as qpquals.
766 : */
767 400482 : cost_qual_eval(&qpqual_cost, qpquals, root);
768 :
769 400482 : startup_cost += qpqual_cost.startup;
770 400482 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
771 :
772 400482 : cpu_run_cost += cpu_per_tuple * tuples_fetched;
773 :
774 : /* tlist eval costs are paid per output row, not per tuple scanned */
775 400482 : startup_cost += path->path.pathtarget->cost.startup;
776 400482 : cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
777 :
778 : /* Adjust costing for parallelism, if used. */
779 400482 : if (path->path.parallel_workers > 0)
780 : {
781 9600 : double parallel_divisor = get_parallel_divisor(&path->path);
782 :
783 9600 : path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
784 :
785 : /* The CPU cost is divided among all the workers. */
786 9600 : cpu_run_cost /= parallel_divisor;
787 : }
788 :
789 400482 : run_cost += cpu_run_cost;
790 :
791 400482 : path->path.startup_cost = startup_cost;
792 400482 : path->path.total_cost = startup_cost + run_cost;
793 : }
794 :
795 : /*
796 : * extract_nonindex_conditions
797 : *
798 : * Given a list of quals to be enforced in an indexscan, extract the ones that
799 : * will have to be applied as qpquals (ie, the index machinery won't handle
800 : * them). Here we detect only whether a qual clause is directly redundant
801 : * with some indexclause. If the index path is chosen for use, createplan.c
802 : * will try a bit harder to get rid of redundant qual conditions; specifically
803 : * it will see if quals can be proven to be implied by the indexquals. But
804 : * it does not seem worth the cycles to try to factor that in at this stage,
805 : * since we're only trying to estimate qual eval costs. Otherwise this must
806 : * match the logic in create_indexscan_plan().
807 : *
808 : * qual_clauses, and the result, are lists of RestrictInfos.
809 : * indexclauses is a list of IndexClauses.
810 : */
811 : static List *
812 701606 : extract_nonindex_conditions(List *qual_clauses, List *indexclauses)
813 : {
814 701606 : List *result = NIL;
815 : ListCell *lc;
816 :
817 1453340 : foreach(lc, qual_clauses)
818 : {
819 751734 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
820 :
821 751734 : if (rinfo->pseudoconstant)
822 2946 : continue; /* we may drop pseudoconstants here */
823 748788 : if (is_redundant_with_indexclauses(rinfo, indexclauses))
824 445706 : continue; /* dup or derived from same EquivalenceClass */
825 : /* ... skip the predicate proof attempt createplan.c will try ... */
826 303082 : result = lappend(result, rinfo);
827 : }
828 701606 : return result;
829 : }
830 :
831 : /*
832 : * index_pages_fetched
833 : * Estimate the number of pages actually fetched after accounting for
834 : * cache effects.
835 : *
836 : * We use an approximation proposed by Mackert and Lohman, "Index Scans
837 : * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
838 : * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
839 : * The Mackert and Lohman approximation is that the number of pages
840 : * fetched is
841 : * PF =
842 : * min(2TNs/(2T+Ns), T) when T <= b
843 : * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
844 : * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
845 : * where
846 : * T = # pages in table
847 : * N = # tuples in table
848 : * s = selectivity = fraction of table to be scanned
849 : * b = # buffer pages available (we include kernel space here)
850 : *
851 : * We assume that effective_cache_size is the total number of buffer pages
852 : * available for the whole query, and pro-rate that space across all the
853 : * tables in the query and the index currently under consideration. (This
854 : * ignores space needed for other indexes used by the query, but since we
855 : * don't know which indexes will get used, we can't estimate that very well;
856 : * and in any case counting all the tables may well be an overestimate, since
857 : * depending on the join plan not all the tables may be scanned concurrently.)
858 : *
859 : * The product Ns is the number of tuples fetched; we pass in that
860 : * product rather than calculating it here. "pages" is the number of pages
861 : * in the object under consideration (either an index or a table).
862 : * "index_pages" is the amount to add to the total table space, which was
863 : * computed for us by make_one_rel.
864 : *
865 : * Caller is expected to have ensured that tuples_fetched is greater than zero
866 : * and rounded to integer (see clamp_row_est). The result will likewise be
867 : * greater than zero and integral.
868 : */
869 : double
870 817426 : index_pages_fetched(double tuples_fetched, BlockNumber pages,
871 : double index_pages, PlannerInfo *root)
872 : {
873 : double pages_fetched;
874 : double total_pages;
875 : double T,
876 : b;
877 :
878 : /* T is # pages in table, but don't allow it to be zero */
879 817426 : T = (pages > 1) ? (double) pages : 1.0;
880 :
881 : /* Compute number of pages assumed to be competing for cache space */
882 817426 : total_pages = root->total_table_pages + index_pages;
883 817426 : total_pages = Max(total_pages, 1.0);
884 : Assert(T <= total_pages);
885 :
886 : /* b is pro-rated share of effective_cache_size */
887 817426 : b = (double) effective_cache_size * T / total_pages;
888 :
889 : /* force it positive and integral */
890 817426 : if (b <= 1.0)
891 0 : b = 1.0;
892 : else
893 817426 : b = ceil(b);
894 :
895 : /* This part is the Mackert and Lohman formula */
896 817426 : if (T <= b)
897 : {
898 817426 : pages_fetched =
899 817426 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
900 817426 : if (pages_fetched >= T)
901 461574 : pages_fetched = T;
902 : else
903 355852 : pages_fetched = ceil(pages_fetched);
904 : }
905 : else
906 : {
907 : double lim;
908 :
909 0 : lim = (2.0 * T * b) / (2.0 * T - b);
910 0 : if (tuples_fetched <= lim)
911 : {
912 0 : pages_fetched =
913 0 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
914 : }
915 : else
916 : {
917 0 : pages_fetched =
918 0 : b + (tuples_fetched - lim) * (T - b) / T;
919 : }
920 0 : pages_fetched = ceil(pages_fetched);
921 : }
922 817426 : return pages_fetched;
923 : }
924 :
925 : /*
926 : * get_indexpath_pages
927 : * Determine the total size of the indexes used in a bitmap index path.
928 : *
929 : * Note: if the same index is used more than once in a bitmap tree, we will
930 : * count it multiple times, which perhaps is the wrong thing ... but it's
931 : * not completely clear, and detecting duplicates is difficult, so ignore it
932 : * for now.
933 : */
934 : static double
935 130342 : get_indexpath_pages(Path *bitmapqual)
936 : {
937 130342 : double result = 0;
938 : ListCell *l;
939 :
940 130342 : if (IsA(bitmapqual, BitmapAndPath))
941 : {
942 15692 : BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
943 :
944 47076 : foreach(l, apath->bitmapquals)
945 : {
946 31384 : result += get_indexpath_pages((Path *) lfirst(l));
947 : }
948 : }
949 114650 : else if (IsA(bitmapqual, BitmapOrPath))
950 : {
951 60 : BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
952 :
953 180 : foreach(l, opath->bitmapquals)
954 : {
955 120 : result += get_indexpath_pages((Path *) lfirst(l));
956 : }
957 : }
958 114590 : else if (IsA(bitmapqual, IndexPath))
959 : {
960 114590 : IndexPath *ipath = (IndexPath *) bitmapqual;
961 :
962 114590 : result = (double) ipath->indexinfo->pages;
963 : }
964 : else
965 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
966 :
967 130342 : return result;
968 : }
969 :
970 : /*
971 : * cost_bitmap_heap_scan
972 : * Determines and returns the cost of scanning a relation using a bitmap
973 : * index-then-heap plan.
974 : *
975 : * 'baserel' is the relation to be scanned
976 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
977 : * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
978 : * 'loop_count' is the number of repetitions of the indexscan to factor into
979 : * estimates of caching behavior
980 : *
981 : * Note: the component IndexPaths in bitmapqual should have been costed
982 : * using the same loop_count.
983 : */
984 : void
985 403588 : cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
986 : ParamPathInfo *param_info,
987 : Path *bitmapqual, double loop_count)
988 : {
989 403588 : Cost startup_cost = 0;
990 403588 : Cost run_cost = 0;
991 : Cost indexTotalCost;
992 : QualCost qpqual_cost;
993 : Cost cpu_per_tuple;
994 : Cost cost_per_page;
995 : Cost cpu_run_cost;
996 : double tuples_fetched;
997 : double pages_fetched;
998 : double spc_seq_page_cost,
999 : spc_random_page_cost;
1000 : double T;
1001 :
1002 : /* Should only be applied to base relations */
1003 : Assert(IsA(baserel, RelOptInfo));
1004 : Assert(baserel->relid > 0);
1005 : Assert(baserel->rtekind == RTE_RELATION);
1006 :
1007 : /* Mark the path with the correct row estimate */
1008 403588 : if (param_info)
1009 164004 : path->rows = param_info->ppi_rows;
1010 : else
1011 239584 : path->rows = baserel->rows;
1012 :
1013 403588 : if (!enable_bitmapscan)
1014 9230 : startup_cost += disable_cost;
1015 :
1016 403588 : pages_fetched = compute_bitmap_pages(root, baserel, bitmapqual,
1017 : loop_count, &indexTotalCost,
1018 : &tuples_fetched);
1019 :
1020 403588 : startup_cost += indexTotalCost;
1021 403588 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
1022 :
1023 : /* Fetch estimated page costs for tablespace containing table. */
1024 403588 : get_tablespace_page_costs(baserel->reltablespace,
1025 : &spc_random_page_cost,
1026 : &spc_seq_page_cost);
1027 :
1028 : /*
1029 : * For small numbers of pages we should charge spc_random_page_cost
1030 : * apiece, while if nearly all the table's pages are being read, it's more
1031 : * appropriate to charge spc_seq_page_cost apiece. The effect is
1032 : * nonlinear, too. For lack of a better idea, interpolate like this to
1033 : * determine the cost per page.
1034 : */
1035 403588 : if (pages_fetched >= 2.0)
1036 77432 : cost_per_page = spc_random_page_cost -
1037 77432 : (spc_random_page_cost - spc_seq_page_cost)
1038 77432 : * sqrt(pages_fetched / T);
1039 : else
1040 326156 : cost_per_page = spc_random_page_cost;
1041 :
1042 403588 : run_cost += pages_fetched * cost_per_page;
1043 :
1044 : /*
1045 : * Estimate CPU costs per tuple.
1046 : *
1047 : * Often the indexquals don't need to be rechecked at each tuple ... but
1048 : * not always, especially not if there are enough tuples involved that the
1049 : * bitmaps become lossy. For the moment, just assume they will be
1050 : * rechecked always. This means we charge the full freight for all the
1051 : * scan clauses.
1052 : */
1053 403588 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1054 :
1055 403588 : startup_cost += qpqual_cost.startup;
1056 403588 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1057 403588 : cpu_run_cost = cpu_per_tuple * tuples_fetched;
1058 :
1059 : /* Adjust costing for parallelism, if used. */
1060 403588 : if (path->parallel_workers > 0)
1061 : {
1062 4056 : double parallel_divisor = get_parallel_divisor(path);
1063 :
1064 : /* The CPU cost is divided among all the workers. */
1065 4056 : cpu_run_cost /= parallel_divisor;
1066 :
1067 4056 : path->rows = clamp_row_est(path->rows / parallel_divisor);
1068 : }
1069 :
1070 :
1071 403588 : run_cost += cpu_run_cost;
1072 :
1073 : /* tlist eval costs are paid per output row, not per tuple scanned */
1074 403588 : startup_cost += path->pathtarget->cost.startup;
1075 403588 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1076 :
1077 403588 : path->startup_cost = startup_cost;
1078 403588 : path->total_cost = startup_cost + run_cost;
1079 403588 : }
1080 :
1081 : /*
1082 : * cost_bitmap_tree_node
1083 : * Extract cost and selectivity from a bitmap tree node (index/and/or)
1084 : */
1085 : void
1086 740516 : cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
1087 : {
1088 740516 : if (IsA(path, IndexPath))
1089 : {
1090 705770 : *cost = ((IndexPath *) path)->indextotalcost;
1091 705770 : *selec = ((IndexPath *) path)->indexselectivity;
1092 :
1093 : /*
1094 : * Charge a small amount per retrieved tuple to reflect the costs of
1095 : * manipulating the bitmap. This is mostly to make sure that a bitmap
1096 : * scan doesn't look to be the same cost as an indexscan to retrieve a
1097 : * single tuple.
1098 : */
1099 705770 : *cost += 0.1 * cpu_operator_cost * path->rows;
1100 : }
1101 34746 : else if (IsA(path, BitmapAndPath))
1102 : {
1103 31862 : *cost = path->total_cost;
1104 31862 : *selec = ((BitmapAndPath *) path)->bitmapselectivity;
1105 : }
1106 2884 : else if (IsA(path, BitmapOrPath))
1107 : {
1108 2884 : *cost = path->total_cost;
1109 2884 : *selec = ((BitmapOrPath *) path)->bitmapselectivity;
1110 : }
1111 : else
1112 : {
1113 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(path));
1114 : *cost = *selec = 0; /* keep compiler quiet */
1115 : }
1116 740516 : }
1117 :
1118 : /*
1119 : * cost_bitmap_and_node
1120 : * Estimate the cost of a BitmapAnd node
1121 : *
1122 : * Note that this considers only the costs of index scanning and bitmap
1123 : * creation, not the eventual heap access. In that sense the object isn't
1124 : * truly a Path, but it has enough path-like properties (costs in particular)
1125 : * to warrant treating it as one. We don't bother to set the path rows field,
1126 : * however.
1127 : */
1128 : void
1129 31794 : cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
1130 : {
1131 : Cost totalCost;
1132 : Selectivity selec;
1133 : ListCell *l;
1134 :
1135 : /*
1136 : * We estimate AND selectivity on the assumption that the inputs are
1137 : * independent. This is probably often wrong, but we don't have the info
1138 : * to do better.
1139 : *
1140 : * The runtime cost of the BitmapAnd itself is estimated at 100x
1141 : * cpu_operator_cost for each tbm_intersect needed. Probably too small,
1142 : * definitely too simplistic?
1143 : */
1144 31794 : totalCost = 0.0;
1145 31794 : selec = 1.0;
1146 95382 : foreach(l, path->bitmapquals)
1147 : {
1148 63588 : Path *subpath = (Path *) lfirst(l);
1149 : Cost subCost;
1150 : Selectivity subselec;
1151 :
1152 63588 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1153 :
1154 63588 : selec *= subselec;
1155 :
1156 63588 : totalCost += subCost;
1157 63588 : if (l != list_head(path->bitmapquals))
1158 31794 : totalCost += 100.0 * cpu_operator_cost;
1159 : }
1160 31794 : path->bitmapselectivity = selec;
1161 31794 : path->path.rows = 0; /* per above, not used */
1162 31794 : path->path.startup_cost = totalCost;
1163 31794 : path->path.total_cost = totalCost;
1164 31794 : }
1165 :
1166 : /*
1167 : * cost_bitmap_or_node
1168 : * Estimate the cost of a BitmapOr node
1169 : *
1170 : * See comments for cost_bitmap_and_node.
1171 : */
1172 : void
1173 864 : cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
1174 : {
1175 : Cost totalCost;
1176 : Selectivity selec;
1177 : ListCell *l;
1178 :
1179 : /*
1180 : * We estimate OR selectivity on the assumption that the inputs are
1181 : * non-overlapping, since that's often the case in "x IN (list)" type
1182 : * situations. Of course, we clamp to 1.0 at the end.
1183 : *
1184 : * The runtime cost of the BitmapOr itself is estimated at 100x
1185 : * cpu_operator_cost for each tbm_union needed. Probably too small,
1186 : * definitely too simplistic? We are aware that the tbm_unions are
1187 : * optimized out when the inputs are BitmapIndexScans.
1188 : */
1189 864 : totalCost = 0.0;
1190 864 : selec = 0.0;
1191 2652 : foreach(l, path->bitmapquals)
1192 : {
1193 1788 : Path *subpath = (Path *) lfirst(l);
1194 : Cost subCost;
1195 : Selectivity subselec;
1196 :
1197 1788 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1198 :
1199 1788 : selec += subselec;
1200 :
1201 1788 : totalCost += subCost;
1202 1788 : if (l != list_head(path->bitmapquals) &&
1203 924 : !IsA(subpath, IndexPath))
1204 30 : totalCost += 100.0 * cpu_operator_cost;
1205 : }
1206 864 : path->bitmapselectivity = Min(selec, 1.0);
1207 864 : path->path.rows = 0; /* per above, not used */
1208 864 : path->path.startup_cost = totalCost;
1209 864 : path->path.total_cost = totalCost;
1210 864 : }
1211 :
1212 : /*
1213 : * cost_tidscan
1214 : * Determines and returns the cost of scanning a relation using TIDs.
1215 : *
1216 : * 'baserel' is the relation to be scanned
1217 : * 'tidquals' is the list of TID-checkable quals
1218 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1219 : */
1220 : void
1221 756 : cost_tidscan(Path *path, PlannerInfo *root,
1222 : RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
1223 : {
1224 756 : Cost startup_cost = 0;
1225 756 : Cost run_cost = 0;
1226 756 : bool isCurrentOf = false;
1227 : QualCost qpqual_cost;
1228 : Cost cpu_per_tuple;
1229 : QualCost tid_qual_cost;
1230 : int ntuples;
1231 : ListCell *l;
1232 : double spc_random_page_cost;
1233 :
1234 : /* Should only be applied to base relations */
1235 : Assert(baserel->relid > 0);
1236 : Assert(baserel->rtekind == RTE_RELATION);
1237 :
1238 : /* Mark the path with the correct row estimate */
1239 756 : if (param_info)
1240 144 : path->rows = param_info->ppi_rows;
1241 : else
1242 612 : path->rows = baserel->rows;
1243 :
1244 : /* Count how many tuples we expect to retrieve */
1245 756 : ntuples = 0;
1246 1536 : foreach(l, tidquals)
1247 : {
1248 780 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
1249 780 : Expr *qual = rinfo->clause;
1250 :
1251 780 : if (IsA(qual, ScalarArrayOpExpr))
1252 : {
1253 : /* Each element of the array yields 1 tuple */
1254 30 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) qual;
1255 30 : Node *arraynode = (Node *) lsecond(saop->args);
1256 :
1257 30 : ntuples += estimate_array_length(arraynode);
1258 : }
1259 750 : else if (IsA(qual, CurrentOfExpr))
1260 : {
1261 : /* CURRENT OF yields 1 tuple */
1262 392 : isCurrentOf = true;
1263 392 : ntuples++;
1264 : }
1265 : else
1266 : {
1267 : /* It's just CTID = something, count 1 tuple */
1268 358 : ntuples++;
1269 : }
1270 : }
1271 :
1272 : /*
1273 : * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
1274 : * understands how to do it correctly. Therefore, honor enable_tidscan
1275 : * only when CURRENT OF isn't present. Also note that cost_qual_eval
1276 : * counts a CurrentOfExpr as having startup cost disable_cost, which we
1277 : * subtract off here; that's to prevent other plan types such as seqscan
1278 : * from winning.
1279 : */
1280 756 : if (isCurrentOf)
1281 : {
1282 : Assert(baserel->baserestrictcost.startup >= disable_cost);
1283 392 : startup_cost -= disable_cost;
1284 : }
1285 364 : else if (!enable_tidscan)
1286 0 : startup_cost += disable_cost;
1287 :
1288 : /*
1289 : * The TID qual expressions will be computed once, any other baserestrict
1290 : * quals once per retrieved tuple.
1291 : */
1292 756 : cost_qual_eval(&tid_qual_cost, tidquals, root);
1293 :
1294 : /* fetch estimated page cost for tablespace containing table */
1295 756 : get_tablespace_page_costs(baserel->reltablespace,
1296 : &spc_random_page_cost,
1297 : NULL);
1298 :
1299 : /* disk costs --- assume each tuple on a different page */
1300 756 : run_cost += spc_random_page_cost * ntuples;
1301 :
1302 : /* Add scanning CPU costs */
1303 756 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1304 :
1305 : /* XXX currently we assume TID quals are a subset of qpquals */
1306 756 : startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1307 756 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1308 756 : tid_qual_cost.per_tuple;
1309 756 : run_cost += cpu_per_tuple * ntuples;
1310 :
1311 : /* tlist eval costs are paid per output row, not per tuple scanned */
1312 756 : startup_cost += path->pathtarget->cost.startup;
1313 756 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1314 :
1315 756 : path->startup_cost = startup_cost;
1316 756 : path->total_cost = startup_cost + run_cost;
1317 756 : }
1318 :
1319 : /*
1320 : * cost_tidrangescan
1321 : * Determines and sets the costs of scanning a relation using a range of
1322 : * TIDs for 'path'
1323 : *
1324 : * 'baserel' is the relation to be scanned
1325 : * 'tidrangequals' is the list of TID-checkable range quals
1326 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1327 : */
1328 : void
1329 202 : cost_tidrangescan(Path *path, PlannerInfo *root,
1330 : RelOptInfo *baserel, List *tidrangequals,
1331 : ParamPathInfo *param_info)
1332 : {
1333 : Selectivity selectivity;
1334 : double pages;
1335 202 : Cost startup_cost = 0;
1336 202 : Cost run_cost = 0;
1337 : QualCost qpqual_cost;
1338 : Cost cpu_per_tuple;
1339 : QualCost tid_qual_cost;
1340 : double ntuples;
1341 : double nseqpages;
1342 : double spc_random_page_cost;
1343 : double spc_seq_page_cost;
1344 :
1345 : /* Should only be applied to base relations */
1346 : Assert(baserel->relid > 0);
1347 : Assert(baserel->rtekind == RTE_RELATION);
1348 :
1349 : /* Mark the path with the correct row estimate */
1350 202 : if (param_info)
1351 0 : path->rows = param_info->ppi_rows;
1352 : else
1353 202 : path->rows = baserel->rows;
1354 :
1355 : /* Count how many tuples and pages we expect to scan */
1356 202 : selectivity = clauselist_selectivity(root, tidrangequals, baserel->relid,
1357 : JOIN_INNER, NULL);
1358 202 : pages = ceil(selectivity * baserel->pages);
1359 :
1360 202 : if (pages <= 0.0)
1361 54 : pages = 1.0;
1362 :
1363 : /*
1364 : * The first page in a range requires a random seek, but each subsequent
1365 : * page is just a normal sequential page read. NOTE: it's desirable for
1366 : * TID Range Scans to cost more than the equivalent Sequential Scans,
1367 : * because Seq Scans have some performance advantages such as scan
1368 : * synchronization and parallelizability, and we'd prefer one of them to
1369 : * be picked unless a TID Range Scan really is better.
1370 : */
1371 202 : ntuples = selectivity * baserel->tuples;
1372 202 : nseqpages = pages - 1.0;
1373 :
1374 202 : if (!enable_tidscan)
1375 0 : startup_cost += disable_cost;
1376 :
1377 : /*
1378 : * The TID qual expressions will be computed once, any other baserestrict
1379 : * quals once per retrieved tuple.
1380 : */
1381 202 : cost_qual_eval(&tid_qual_cost, tidrangequals, root);
1382 :
1383 : /* fetch estimated page cost for tablespace containing table */
1384 202 : get_tablespace_page_costs(baserel->reltablespace,
1385 : &spc_random_page_cost,
1386 : &spc_seq_page_cost);
1387 :
1388 : /* disk costs; 1 random page and the remainder as seq pages */
1389 202 : run_cost += spc_random_page_cost + spc_seq_page_cost * nseqpages;
1390 :
1391 : /* Add scanning CPU costs */
1392 202 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1393 :
1394 : /*
1395 : * XXX currently we assume TID quals are a subset of qpquals at this
1396 : * point; they will be removed (if possible) when we create the plan, so
1397 : * we subtract their cost from the total qpqual cost. (If the TID quals
1398 : * can't be removed, this is a mistake and we're going to underestimate
1399 : * the CPU cost a bit.)
1400 : */
1401 202 : startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1402 202 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1403 202 : tid_qual_cost.per_tuple;
1404 202 : run_cost += cpu_per_tuple * ntuples;
1405 :
1406 : /* tlist eval costs are paid per output row, not per tuple scanned */
1407 202 : startup_cost += path->pathtarget->cost.startup;
1408 202 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1409 :
1410 202 : path->startup_cost = startup_cost;
1411 202 : path->total_cost = startup_cost + run_cost;
1412 202 : }
1413 :
1414 : /*
1415 : * cost_subqueryscan
1416 : * Determines and returns the cost of scanning a subquery RTE.
1417 : *
1418 : * 'baserel' is the relation to be scanned
1419 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1420 : * 'trivial_pathtarget' is true if the pathtarget is believed to be trivial.
1421 : */
1422 : void
1423 19718 : cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root,
1424 : RelOptInfo *baserel, ParamPathInfo *param_info,
1425 : bool trivial_pathtarget)
1426 : {
1427 : Cost startup_cost;
1428 : Cost run_cost;
1429 : List *qpquals;
1430 : QualCost qpqual_cost;
1431 : Cost cpu_per_tuple;
1432 :
1433 : /* Should only be applied to base relations that are subqueries */
1434 : Assert(baserel->relid > 0);
1435 : Assert(baserel->rtekind == RTE_SUBQUERY);
1436 :
1437 : /*
1438 : * We compute the rowcount estimate as the subplan's estimate times the
1439 : * selectivity of relevant restriction clauses. In simple cases this will
1440 : * come out the same as baserel->rows; but when dealing with parallelized
1441 : * paths we must do it like this to get the right answer.
1442 : */
1443 19718 : if (param_info)
1444 450 : qpquals = list_concat_copy(param_info->ppi_clauses,
1445 450 : baserel->baserestrictinfo);
1446 : else
1447 19268 : qpquals = baserel->baserestrictinfo;
1448 :
1449 19718 : path->path.rows = clamp_row_est(path->subpath->rows *
1450 19718 : clauselist_selectivity(root,
1451 : qpquals,
1452 : 0,
1453 : JOIN_INNER,
1454 : NULL));
1455 :
1456 : /*
1457 : * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1458 : * any restriction clauses and tlist that will be attached to the
1459 : * SubqueryScan node, plus cpu_tuple_cost to account for selection and
1460 : * projection overhead.
1461 : */
1462 19718 : path->path.startup_cost = path->subpath->startup_cost;
1463 19718 : path->path.total_cost = path->subpath->total_cost;
1464 :
1465 : /*
1466 : * However, if there are no relevant restriction clauses and the
1467 : * pathtarget is trivial, then we expect that setrefs.c will optimize away
1468 : * the SubqueryScan plan node altogether, so we should just make its cost
1469 : * and rowcount equal to the input path's.
1470 : *
1471 : * Note: there are some edge cases where createplan.c will apply a
1472 : * different targetlist to the SubqueryScan node, thus falsifying our
1473 : * current estimate of whether the target is trivial, and making the cost
1474 : * estimate (though not the rowcount) wrong. It does not seem worth the
1475 : * extra complication to try to account for that exactly, especially since
1476 : * that behavior falsifies other cost estimates as well.
1477 : */
1478 19718 : if (qpquals == NIL && trivial_pathtarget)
1479 9898 : return;
1480 :
1481 9820 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1482 :
1483 9820 : startup_cost = qpqual_cost.startup;
1484 9820 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1485 9820 : run_cost = cpu_per_tuple * path->subpath->rows;
1486 :
1487 : /* tlist eval costs are paid per output row, not per tuple scanned */
1488 9820 : startup_cost += path->path.pathtarget->cost.startup;
1489 9820 : run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
1490 :
1491 9820 : path->path.startup_cost += startup_cost;
1492 9820 : path->path.total_cost += startup_cost + run_cost;
1493 : }
1494 :
1495 : /*
1496 : * cost_functionscan
1497 : * Determines and returns the cost of scanning a function RTE.
1498 : *
1499 : * 'baserel' is the relation to be scanned
1500 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1501 : */
1502 : void
1503 38142 : cost_functionscan(Path *path, PlannerInfo *root,
1504 : RelOptInfo *baserel, ParamPathInfo *param_info)
1505 : {
1506 38142 : Cost startup_cost = 0;
1507 38142 : Cost run_cost = 0;
1508 : QualCost qpqual_cost;
1509 : Cost cpu_per_tuple;
1510 : RangeTblEntry *rte;
1511 : QualCost exprcost;
1512 :
1513 : /* Should only be applied to base relations that are functions */
1514 : Assert(baserel->relid > 0);
1515 38142 : rte = planner_rt_fetch(baserel->relid, root);
1516 : Assert(rte->rtekind == RTE_FUNCTION);
1517 :
1518 : /* Mark the path with the correct row estimate */
1519 38142 : if (param_info)
1520 7916 : path->rows = param_info->ppi_rows;
1521 : else
1522 30226 : path->rows = baserel->rows;
1523 :
1524 : /*
1525 : * Estimate costs of executing the function expression(s).
1526 : *
1527 : * Currently, nodeFunctionscan.c always executes the functions to
1528 : * completion before returning any rows, and caches the results in a
1529 : * tuplestore. So the function eval cost is all startup cost, and per-row
1530 : * costs are minimal.
1531 : *
1532 : * XXX in principle we ought to charge tuplestore spill costs if the
1533 : * number of rows is large. However, given how phony our rowcount
1534 : * estimates for functions tend to be, there's not a lot of point in that
1535 : * refinement right now.
1536 : */
1537 38142 : cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1538 :
1539 38142 : startup_cost += exprcost.startup + exprcost.per_tuple;
1540 :
1541 : /* Add scanning CPU costs */
1542 38142 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1543 :
1544 38142 : startup_cost += qpqual_cost.startup;
1545 38142 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1546 38142 : run_cost += cpu_per_tuple * baserel->tuples;
1547 :
1548 : /* tlist eval costs are paid per output row, not per tuple scanned */
1549 38142 : startup_cost += path->pathtarget->cost.startup;
1550 38142 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1551 :
1552 38142 : path->startup_cost = startup_cost;
1553 38142 : path->total_cost = startup_cost + run_cost;
1554 38142 : }
1555 :
1556 : /*
1557 : * cost_tablefuncscan
1558 : * Determines and returns the cost of scanning a table function.
1559 : *
1560 : * 'baserel' is the relation to be scanned
1561 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1562 : */
1563 : void
1564 216 : cost_tablefuncscan(Path *path, PlannerInfo *root,
1565 : RelOptInfo *baserel, ParamPathInfo *param_info)
1566 : {
1567 216 : Cost startup_cost = 0;
1568 216 : Cost run_cost = 0;
1569 : QualCost qpqual_cost;
1570 : Cost cpu_per_tuple;
1571 : RangeTblEntry *rte;
1572 : QualCost exprcost;
1573 :
1574 : /* Should only be applied to base relations that are functions */
1575 : Assert(baserel->relid > 0);
1576 216 : rte = planner_rt_fetch(baserel->relid, root);
1577 : Assert(rte->rtekind == RTE_TABLEFUNC);
1578 :
1579 : /* Mark the path with the correct row estimate */
1580 216 : if (param_info)
1581 144 : path->rows = param_info->ppi_rows;
1582 : else
1583 72 : path->rows = baserel->rows;
1584 :
1585 : /*
1586 : * Estimate costs of executing the table func expression(s).
1587 : *
1588 : * XXX in principle we ought to charge tuplestore spill costs if the
1589 : * number of rows is large. However, given how phony our rowcount
1590 : * estimates for tablefuncs tend to be, there's not a lot of point in that
1591 : * refinement right now.
1592 : */
1593 216 : cost_qual_eval_node(&exprcost, (Node *) rte->tablefunc, root);
1594 :
1595 216 : startup_cost += exprcost.startup + exprcost.per_tuple;
1596 :
1597 : /* Add scanning CPU costs */
1598 216 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1599 :
1600 216 : startup_cost += qpqual_cost.startup;
1601 216 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1602 216 : run_cost += cpu_per_tuple * baserel->tuples;
1603 :
1604 : /* tlist eval costs are paid per output row, not per tuple scanned */
1605 216 : startup_cost += path->pathtarget->cost.startup;
1606 216 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1607 :
1608 216 : path->startup_cost = startup_cost;
1609 216 : path->total_cost = startup_cost + run_cost;
1610 216 : }
1611 :
1612 : /*
1613 : * cost_valuesscan
1614 : * Determines and returns the cost of scanning a VALUES RTE.
1615 : *
1616 : * 'baserel' is the relation to be scanned
1617 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1618 : */
1619 : void
1620 7322 : cost_valuesscan(Path *path, PlannerInfo *root,
1621 : RelOptInfo *baserel, ParamPathInfo *param_info)
1622 : {
1623 7322 : Cost startup_cost = 0;
1624 7322 : Cost run_cost = 0;
1625 : QualCost qpqual_cost;
1626 : Cost cpu_per_tuple;
1627 :
1628 : /* Should only be applied to base relations that are values lists */
1629 : Assert(baserel->relid > 0);
1630 : Assert(baserel->rtekind == RTE_VALUES);
1631 :
1632 : /* Mark the path with the correct row estimate */
1633 7322 : if (param_info)
1634 48 : path->rows = param_info->ppi_rows;
1635 : else
1636 7274 : path->rows = baserel->rows;
1637 :
1638 : /*
1639 : * For now, estimate list evaluation cost at one operator eval per list
1640 : * (probably pretty bogus, but is it worth being smarter?)
1641 : */
1642 7322 : cpu_per_tuple = cpu_operator_cost;
1643 :
1644 : /* Add scanning CPU costs */
1645 7322 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1646 :
1647 7322 : startup_cost += qpqual_cost.startup;
1648 7322 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1649 7322 : run_cost += cpu_per_tuple * baserel->tuples;
1650 :
1651 : /* tlist eval costs are paid per output row, not per tuple scanned */
1652 7322 : startup_cost += path->pathtarget->cost.startup;
1653 7322 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1654 :
1655 7322 : path->startup_cost = startup_cost;
1656 7322 : path->total_cost = startup_cost + run_cost;
1657 7322 : }
1658 :
1659 : /*
1660 : * cost_ctescan
1661 : * Determines and returns the cost of scanning a CTE RTE.
1662 : *
1663 : * Note: this is used for both self-reference and regular CTEs; the
1664 : * possible cost differences are below the threshold of what we could
1665 : * estimate accurately anyway. Note that the costs of evaluating the
1666 : * referenced CTE query are added into the final plan as initplan costs,
1667 : * and should NOT be counted here.
1668 : */
1669 : void
1670 3776 : cost_ctescan(Path *path, PlannerInfo *root,
1671 : RelOptInfo *baserel, ParamPathInfo *param_info)
1672 : {
1673 3776 : Cost startup_cost = 0;
1674 3776 : Cost run_cost = 0;
1675 : QualCost qpqual_cost;
1676 : Cost cpu_per_tuple;
1677 :
1678 : /* Should only be applied to base relations that are CTEs */
1679 : Assert(baserel->relid > 0);
1680 : Assert(baserel->rtekind == RTE_CTE);
1681 :
1682 : /* Mark the path with the correct row estimate */
1683 3776 : if (param_info)
1684 0 : path->rows = param_info->ppi_rows;
1685 : else
1686 3776 : path->rows = baserel->rows;
1687 :
1688 : /* Charge one CPU tuple cost per row for tuplestore manipulation */
1689 3776 : cpu_per_tuple = cpu_tuple_cost;
1690 :
1691 : /* Add scanning CPU costs */
1692 3776 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1693 :
1694 3776 : startup_cost += qpqual_cost.startup;
1695 3776 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1696 3776 : run_cost += cpu_per_tuple * baserel->tuples;
1697 :
1698 : /* tlist eval costs are paid per output row, not per tuple scanned */
1699 3776 : startup_cost += path->pathtarget->cost.startup;
1700 3776 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1701 :
1702 3776 : path->startup_cost = startup_cost;
1703 3776 : path->total_cost = startup_cost + run_cost;
1704 3776 : }
1705 :
1706 : /*
1707 : * cost_namedtuplestorescan
1708 : * Determines and returns the cost of scanning a named tuplestore.
1709 : */
1710 : void
1711 446 : cost_namedtuplestorescan(Path *path, PlannerInfo *root,
1712 : RelOptInfo *baserel, ParamPathInfo *param_info)
1713 : {
1714 446 : Cost startup_cost = 0;
1715 446 : Cost run_cost = 0;
1716 : QualCost qpqual_cost;
1717 : Cost cpu_per_tuple;
1718 :
1719 : /* Should only be applied to base relations that are Tuplestores */
1720 : Assert(baserel->relid > 0);
1721 : Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
1722 :
1723 : /* Mark the path with the correct row estimate */
1724 446 : if (param_info)
1725 0 : path->rows = param_info->ppi_rows;
1726 : else
1727 446 : path->rows = baserel->rows;
1728 :
1729 : /* Charge one CPU tuple cost per row for tuplestore manipulation */
1730 446 : cpu_per_tuple = cpu_tuple_cost;
1731 :
1732 : /* Add scanning CPU costs */
1733 446 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1734 :
1735 446 : startup_cost += qpqual_cost.startup;
1736 446 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1737 446 : run_cost += cpu_per_tuple * baserel->tuples;
1738 :
1739 446 : path->startup_cost = startup_cost;
1740 446 : path->total_cost = startup_cost + run_cost;
1741 446 : }
1742 :
1743 : /*
1744 : * cost_resultscan
1745 : * Determines and returns the cost of scanning an RTE_RESULT relation.
1746 : */
1747 : void
1748 1402 : cost_resultscan(Path *path, PlannerInfo *root,
1749 : RelOptInfo *baserel, ParamPathInfo *param_info)
1750 : {
1751 1402 : Cost startup_cost = 0;
1752 1402 : Cost run_cost = 0;
1753 : QualCost qpqual_cost;
1754 : Cost cpu_per_tuple;
1755 :
1756 : /* Should only be applied to RTE_RESULT base relations */
1757 : Assert(baserel->relid > 0);
1758 : Assert(baserel->rtekind == RTE_RESULT);
1759 :
1760 : /* Mark the path with the correct row estimate */
1761 1402 : if (param_info)
1762 132 : path->rows = param_info->ppi_rows;
1763 : else
1764 1270 : path->rows = baserel->rows;
1765 :
1766 : /* We charge qual cost plus cpu_tuple_cost */
1767 1402 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1768 :
1769 1402 : startup_cost += qpqual_cost.startup;
1770 1402 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1771 1402 : run_cost += cpu_per_tuple * baserel->tuples;
1772 :
1773 1402 : path->startup_cost = startup_cost;
1774 1402 : path->total_cost = startup_cost + run_cost;
1775 1402 : }
1776 :
1777 : /*
1778 : * cost_recursive_union
1779 : * Determines and returns the cost of performing a recursive union,
1780 : * and also the estimated output size.
1781 : *
1782 : * We are given Paths for the nonrecursive and recursive terms.
1783 : */
1784 : void
1785 778 : cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
1786 : {
1787 : Cost startup_cost;
1788 : Cost total_cost;
1789 : double total_rows;
1790 :
1791 : /* We probably have decent estimates for the non-recursive term */
1792 778 : startup_cost = nrterm->startup_cost;
1793 778 : total_cost = nrterm->total_cost;
1794 778 : total_rows = nrterm->rows;
1795 :
1796 : /*
1797 : * We arbitrarily assume that about 10 recursive iterations will be
1798 : * needed, and that we've managed to get a good fix on the cost and output
1799 : * size of each one of them. These are mighty shaky assumptions but it's
1800 : * hard to see how to do better.
1801 : */
1802 778 : total_cost += 10 * rterm->total_cost;
1803 778 : total_rows += 10 * rterm->rows;
1804 :
1805 : /*
1806 : * Also charge cpu_tuple_cost per row to account for the costs of
1807 : * manipulating the tuplestores. (We don't worry about possible
1808 : * spill-to-disk costs.)
1809 : */
1810 778 : total_cost += cpu_tuple_cost * total_rows;
1811 :
1812 778 : runion->startup_cost = startup_cost;
1813 778 : runion->total_cost = total_cost;
1814 778 : runion->rows = total_rows;
1815 778 : runion->pathtarget->width = Max(nrterm->pathtarget->width,
1816 : rterm->pathtarget->width);
1817 778 : }
1818 :
1819 : /*
1820 : * cost_tuplesort
1821 : * Determines and returns the cost of sorting a relation using tuplesort,
1822 : * not including the cost of reading the input data.
1823 : *
1824 : * If the total volume of data to sort is less than sort_mem, we will do
1825 : * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1826 : * comparisons for t tuples.
1827 : *
1828 : * If the total volume exceeds sort_mem, we switch to a tape-style merge
1829 : * algorithm. There will still be about t*log2(t) tuple comparisons in
1830 : * total, but we will also need to write and read each tuple once per
1831 : * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1832 : * number of initial runs formed and M is the merge order used by tuplesort.c.
1833 : * Since the average initial run should be about sort_mem, we have
1834 : * disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
1835 : * cpu = comparison_cost * t * log2(t)
1836 : *
1837 : * If the sort is bounded (i.e., only the first k result tuples are needed)
1838 : * and k tuples can fit into sort_mem, we use a heap method that keeps only
1839 : * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1840 : *
1841 : * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1842 : * accesses (XXX can't we refine that guess?)
1843 : *
1844 : * By default, we charge two operator evals per tuple comparison, which should
1845 : * be in the right ballpark in most cases. The caller can tweak this by
1846 : * specifying nonzero comparison_cost; typically that's used for any extra
1847 : * work that has to be done to prepare the inputs to the comparison operators.
1848 : *
1849 : * 'tuples' is the number of tuples in the relation
1850 : * 'width' is the average tuple width in bytes
1851 : * 'comparison_cost' is the extra cost per comparison, if any
1852 : * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1853 : * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1854 : */
1855 : static void
1856 1278984 : cost_tuplesort(Cost *startup_cost, Cost *run_cost,
1857 : double tuples, int width,
1858 : Cost comparison_cost, int sort_mem,
1859 : double limit_tuples)
1860 : {
1861 1278984 : double input_bytes = relation_byte_size(tuples, width);
1862 : double output_bytes;
1863 : double output_tuples;
1864 1278984 : long sort_mem_bytes = sort_mem * 1024L;
1865 :
1866 : /*
1867 : * We want to be sure the cost of a sort is never estimated as zero, even
1868 : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1869 : */
1870 1278984 : if (tuples < 2.0)
1871 398644 : tuples = 2.0;
1872 :
1873 : /* Include the default cost-per-comparison */
1874 1278984 : comparison_cost += 2.0 * cpu_operator_cost;
1875 :
1876 : /* Do we have a useful LIMIT? */
1877 1278984 : if (limit_tuples > 0 && limit_tuples < tuples)
1878 : {
1879 1670 : output_tuples = limit_tuples;
1880 1670 : output_bytes = relation_byte_size(output_tuples, width);
1881 : }
1882 : else
1883 : {
1884 1277314 : output_tuples = tuples;
1885 1277314 : output_bytes = input_bytes;
1886 : }
1887 :
1888 1278984 : if (output_bytes > sort_mem_bytes)
1889 : {
1890 : /*
1891 : * We'll have to use a disk-based sort of all the tuples
1892 : */
1893 15922 : double npages = ceil(input_bytes / BLCKSZ);
1894 15922 : double nruns = input_bytes / sort_mem_bytes;
1895 15922 : double mergeorder = tuplesort_merge_order(sort_mem_bytes);
1896 : double log_runs;
1897 : double npageaccesses;
1898 :
1899 : /*
1900 : * CPU costs
1901 : *
1902 : * Assume about N log2 N comparisons
1903 : */
1904 15922 : *startup_cost = comparison_cost * tuples * LOG2(tuples);
1905 :
1906 : /* Disk costs */
1907 :
1908 : /* Compute logM(r) as log(r) / log(M) */
1909 15922 : if (nruns > mergeorder)
1910 4884 : log_runs = ceil(log(nruns) / log(mergeorder));
1911 : else
1912 11038 : log_runs = 1.0;
1913 15922 : npageaccesses = 2.0 * npages * log_runs;
1914 : /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1915 15922 : *startup_cost += npageaccesses *
1916 15922 : (seq_page_cost * 0.75 + random_page_cost * 0.25);
1917 : }
1918 1263062 : else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1919 : {
1920 : /*
1921 : * We'll use a bounded heap-sort keeping just K tuples in memory, for
1922 : * a total number of tuple comparisons of N log2 K; but the constant
1923 : * factor is a bit higher than for quicksort. Tweak it so that the
1924 : * cost curve is continuous at the crossover point.
1925 : */
1926 1300 : *startup_cost = comparison_cost * tuples * LOG2(2.0 * output_tuples);
1927 : }
1928 : else
1929 : {
1930 : /* We'll use plain quicksort on all the input tuples */
1931 1261762 : *startup_cost = comparison_cost * tuples * LOG2(tuples);
1932 : }
1933 :
1934 : /*
1935 : * Also charge a small amount (arbitrarily set equal to operator cost) per
1936 : * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
1937 : * doesn't do qual-checking or projection, so it has less overhead than
1938 : * most plan nodes. Note it's correct to use tuples not output_tuples
1939 : * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1940 : * counting the LIMIT otherwise.
1941 : */
1942 1278984 : *run_cost = cpu_operator_cost * tuples;
1943 1278984 : }
1944 :
1945 : /*
1946 : * cost_incremental_sort
1947 : * Determines and returns the cost of sorting a relation incrementally, when
1948 : * the input path is presorted by a prefix of the pathkeys.
1949 : *
1950 : * 'presorted_keys' is the number of leading pathkeys by which the input path
1951 : * is sorted.
1952 : *
1953 : * We estimate the number of groups into which the relation is divided by the
1954 : * leading pathkeys, and then calculate the cost of sorting a single group
1955 : * with tuplesort using cost_tuplesort().
1956 : */
1957 : void
1958 4582 : cost_incremental_sort(Path *path,
1959 : PlannerInfo *root, List *pathkeys, int presorted_keys,
1960 : Cost input_startup_cost, Cost input_total_cost,
1961 : double input_tuples, int width, Cost comparison_cost, int sort_mem,
1962 : double limit_tuples)
1963 : {
1964 : Cost startup_cost,
1965 : run_cost,
1966 4582 : input_run_cost = input_total_cost - input_startup_cost;
1967 : double group_tuples,
1968 : input_groups;
1969 : Cost group_startup_cost,
1970 : group_run_cost,
1971 : group_input_run_cost;
1972 4582 : List *presortedExprs = NIL;
1973 : ListCell *l;
1974 4582 : bool unknown_varno = false;
1975 :
1976 : Assert(presorted_keys > 0 && presorted_keys < list_length(pathkeys));
1977 :
1978 : /*
1979 : * We want to be sure the cost of a sort is never estimated as zero, even
1980 : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1981 : */
1982 4582 : if (input_tuples < 2.0)
1983 1924 : input_tuples = 2.0;
1984 :
1985 : /* Default estimate of number of groups, capped to one group per row. */
1986 4582 : input_groups = Min(input_tuples, DEFAULT_NUM_DISTINCT);
1987 :
1988 : /*
1989 : * Extract presorted keys as list of expressions.
1990 : *
1991 : * We need to be careful about Vars containing "varno 0" which might have
1992 : * been introduced by generate_append_tlist, which would confuse
1993 : * estimate_num_groups (in fact it'd fail for such expressions). See
1994 : * recurse_set_operations which has to deal with the same issue.
1995 : *
1996 : * Unlike recurse_set_operations we can't access the original target list
1997 : * here, and even if we could it's not very clear how useful would that be
1998 : * for a set operation combining multiple tables. So we simply detect if
1999 : * there are any expressions with "varno 0" and use the default
2000 : * DEFAULT_NUM_DISTINCT in that case.
2001 : *
2002 : * We might also use either 1.0 (a single group) or input_tuples (each row
2003 : * being a separate group), pretty much the worst and best case for
2004 : * incremental sort. But those are extreme cases and using something in
2005 : * between seems reasonable. Furthermore, generate_append_tlist is used
2006 : * for set operations, which are likely to produce mostly unique output
2007 : * anyway - from that standpoint the DEFAULT_NUM_DISTINCT is defensive
2008 : * while maintaining lower startup cost.
2009 : */
2010 4636 : foreach(l, pathkeys)
2011 : {
2012 4636 : PathKey *key = (PathKey *) lfirst(l);
2013 4636 : EquivalenceMember *member = (EquivalenceMember *)
2014 4636 : linitial(key->pk_eclass->ec_members);
2015 :
2016 : /*
2017 : * Check if the expression contains Var with "varno 0" so that we
2018 : * don't call estimate_num_groups in that case.
2019 : */
2020 4636 : if (bms_is_member(0, pull_varnos(root, (Node *) member->em_expr)))
2021 : {
2022 6 : unknown_varno = true;
2023 6 : break;
2024 : }
2025 :
2026 : /* expression not containing any Vars with "varno 0" */
2027 4630 : presortedExprs = lappend(presortedExprs, member->em_expr);
2028 :
2029 4630 : if (foreach_current_index(l) + 1 >= presorted_keys)
2030 4576 : break;
2031 : }
2032 :
2033 : /* Estimate the number of groups with equal presorted keys. */
2034 4582 : if (!unknown_varno)
2035 4576 : input_groups = estimate_num_groups(root, presortedExprs, input_tuples,
2036 : NULL, NULL);
2037 :
2038 4582 : group_tuples = input_tuples / input_groups;
2039 4582 : group_input_run_cost = input_run_cost / input_groups;
2040 :
2041 : /*
2042 : * Estimate the average cost of sorting of one group where presorted keys
2043 : * are equal.
2044 : */
2045 4582 : cost_tuplesort(&group_startup_cost, &group_run_cost,
2046 : group_tuples, width, comparison_cost, sort_mem,
2047 : limit_tuples);
2048 :
2049 : /*
2050 : * Startup cost of incremental sort is the startup cost of its first group
2051 : * plus the cost of its input.
2052 : */
2053 4582 : startup_cost = group_startup_cost + input_startup_cost +
2054 : group_input_run_cost;
2055 :
2056 : /*
2057 : * After we started producing tuples from the first group, the cost of
2058 : * producing all the tuples is given by the cost to finish processing this
2059 : * group, plus the total cost to process the remaining groups, plus the
2060 : * remaining cost of input.
2061 : */
2062 4582 : run_cost = group_run_cost + (group_run_cost + group_startup_cost) *
2063 4582 : (input_groups - 1) + group_input_run_cost * (input_groups - 1);
2064 :
2065 : /*
2066 : * Incremental sort adds some overhead by itself. Firstly, it has to
2067 : * detect the sort groups. This is roughly equal to one extra copy and
2068 : * comparison per tuple.
2069 : */
2070 4582 : run_cost += (cpu_tuple_cost + comparison_cost) * input_tuples;
2071 :
2072 : /*
2073 : * Additionally, we charge double cpu_tuple_cost for each input group to
2074 : * account for the tuplesort_reset that's performed after each group.
2075 : */
2076 4582 : run_cost += 2.0 * cpu_tuple_cost * input_groups;
2077 :
2078 4582 : path->rows = input_tuples;
2079 4582 : path->startup_cost = startup_cost;
2080 4582 : path->total_cost = startup_cost + run_cost;
2081 4582 : }
2082 :
2083 : /*
2084 : * cost_sort
2085 : * Determines and returns the cost of sorting a relation, including
2086 : * the cost of reading the input data.
2087 : *
2088 : * NOTE: some callers currently pass NIL for pathkeys because they
2089 : * can't conveniently supply the sort keys. Since this routine doesn't
2090 : * currently do anything with pathkeys anyway, that doesn't matter...
2091 : * but if it ever does, it should react gracefully to lack of key data.
2092 : * (Actually, the thing we'd most likely be interested in is just the number
2093 : * of sort keys, which all callers *could* supply.)
2094 : */
2095 : void
2096 1274402 : cost_sort(Path *path, PlannerInfo *root,
2097 : List *pathkeys, Cost input_cost, double tuples, int width,
2098 : Cost comparison_cost, int sort_mem,
2099 : double limit_tuples)
2100 :
2101 : {
2102 : Cost startup_cost;
2103 : Cost run_cost;
2104 :
2105 1274402 : cost_tuplesort(&startup_cost, &run_cost,
2106 : tuples, width,
2107 : comparison_cost, sort_mem,
2108 : limit_tuples);
2109 :
2110 1274402 : if (!enable_sort)
2111 1224 : startup_cost += disable_cost;
2112 :
2113 1274402 : startup_cost += input_cost;
2114 :
2115 1274402 : path->rows = tuples;
2116 1274402 : path->startup_cost = startup_cost;
2117 1274402 : path->total_cost = startup_cost + run_cost;
2118 1274402 : }
2119 :
2120 : /*
2121 : * append_nonpartial_cost
2122 : * Estimate the cost of the non-partial paths in a Parallel Append.
2123 : * The non-partial paths are assumed to be the first "numpaths" paths
2124 : * from the subpaths list, and to be in order of decreasing cost.
2125 : */
2126 : static Cost
2127 14596 : append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
2128 : {
2129 : Cost *costarr;
2130 : int arrlen;
2131 : ListCell *l;
2132 : ListCell *cell;
2133 : int path_index;
2134 : int min_index;
2135 : int max_index;
2136 :
2137 14596 : if (numpaths == 0)
2138 13172 : return 0;
2139 :
2140 : /*
2141 : * Array length is number of workers or number of relevant paths,
2142 : * whichever is less.
2143 : */
2144 1424 : arrlen = Min(parallel_workers, numpaths);
2145 1424 : costarr = (Cost *) palloc(sizeof(Cost) * arrlen);
2146 :
2147 : /* The first few paths will each be claimed by a different worker. */
2148 1424 : path_index = 0;
2149 3834 : foreach(cell, subpaths)
2150 : {
2151 3224 : Path *subpath = (Path *) lfirst(cell);
2152 :
2153 3224 : if (path_index == arrlen)
2154 814 : break;
2155 2410 : costarr[path_index++] = subpath->total_cost;
2156 : }
2157 :
2158 : /*
2159 : * Since subpaths are sorted by decreasing cost, the last one will have
2160 : * the minimum cost.
2161 : */
2162 1424 : min_index = arrlen - 1;
2163 :
2164 : /*
2165 : * For each of the remaining subpaths, add its cost to the array element
2166 : * with minimum cost.
2167 : */
2168 1906 : for_each_cell(l, subpaths, cell)
2169 : {
2170 998 : Path *subpath = (Path *) lfirst(l);
2171 :
2172 : /* Consider only the non-partial paths */
2173 998 : if (path_index++ == numpaths)
2174 516 : break;
2175 :
2176 482 : costarr[min_index] += subpath->total_cost;
2177 :
2178 : /* Update the new min cost array index */
2179 482 : min_index = 0;
2180 1482 : for (int i = 0; i < arrlen; i++)
2181 : {
2182 1000 : if (costarr[i] < costarr[min_index])
2183 202 : min_index = i;
2184 : }
2185 : }
2186 :
2187 : /* Return the highest cost from the array */
2188 1424 : max_index = 0;
2189 3834 : for (int i = 0; i < arrlen; i++)
2190 : {
2191 2410 : if (costarr[i] > costarr[max_index])
2192 186 : max_index = i;
2193 : }
2194 :
2195 1424 : return costarr[max_index];
2196 : }
2197 :
2198 : /*
2199 : * cost_append
2200 : * Determines and returns the cost of an Append node.
2201 : */
2202 : void
2203 44156 : cost_append(AppendPath *apath)
2204 : {
2205 : ListCell *l;
2206 :
2207 44156 : apath->path.startup_cost = 0;
2208 44156 : apath->path.total_cost = 0;
2209 44156 : apath->path.rows = 0;
2210 :
2211 44156 : if (apath->subpaths == NIL)
2212 1428 : return;
2213 :
2214 42728 : if (!apath->path.parallel_aware)
2215 : {
2216 28132 : List *pathkeys = apath->path.pathkeys;
2217 :
2218 28132 : if (pathkeys == NIL)
2219 : {
2220 26162 : Path *firstsubpath = (Path *) linitial(apath->subpaths);
2221 :
2222 : /*
2223 : * For an unordered, non-parallel-aware Append we take the startup
2224 : * cost as the startup cost of the first subpath.
2225 : */
2226 26162 : apath->path.startup_cost = firstsubpath->startup_cost;
2227 :
2228 : /* Compute rows and costs as sums of subplan rows and costs. */
2229 103616 : foreach(l, apath->subpaths)
2230 : {
2231 77454 : Path *subpath = (Path *) lfirst(l);
2232 :
2233 77454 : apath->path.rows += subpath->rows;
2234 77454 : apath->path.total_cost += subpath->total_cost;
2235 : }
2236 : }
2237 : else
2238 : {
2239 : /*
2240 : * For an ordered, non-parallel-aware Append we take the startup
2241 : * cost as the sum of the subpath startup costs. This ensures
2242 : * that we don't underestimate the startup cost when a query's
2243 : * LIMIT is such that several of the children have to be run to
2244 : * satisfy it. This might be overkill --- another plausible hack
2245 : * would be to take the Append's startup cost as the maximum of
2246 : * the child startup costs. But we don't want to risk believing
2247 : * that an ORDER BY LIMIT query can be satisfied at small cost
2248 : * when the first child has small startup cost but later ones
2249 : * don't. (If we had the ability to deal with nonlinear cost
2250 : * interpolation for partial retrievals, we would not need to be
2251 : * so conservative about this.)
2252 : *
2253 : * This case is also different from the above in that we have to
2254 : * account for possibly injecting sorts into subpaths that aren't
2255 : * natively ordered.
2256 : */
2257 7692 : foreach(l, apath->subpaths)
2258 : {
2259 5722 : Path *subpath = (Path *) lfirst(l);
2260 : Path sort_path; /* dummy for result of cost_sort */
2261 :
2262 5722 : if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
2263 : {
2264 : /*
2265 : * We'll need to insert a Sort node, so include costs for
2266 : * that. We can use the parent's LIMIT if any, since we
2267 : * certainly won't pull more than that many tuples from
2268 : * any child.
2269 : */
2270 44 : cost_sort(&sort_path,
2271 : NULL, /* doesn't currently need root */
2272 : pathkeys,
2273 : subpath->total_cost,
2274 : subpath->rows,
2275 44 : subpath->pathtarget->width,
2276 : 0.0,
2277 : work_mem,
2278 : apath->limit_tuples);
2279 44 : subpath = &sort_path;
2280 : }
2281 :
2282 5722 : apath->path.rows += subpath->rows;
2283 5722 : apath->path.startup_cost += subpath->startup_cost;
2284 5722 : apath->path.total_cost += subpath->total_cost;
2285 : }
2286 : }
2287 : }
2288 : else /* parallel-aware */
2289 : {
2290 14596 : int i = 0;
2291 14596 : double parallel_divisor = get_parallel_divisor(&apath->path);
2292 :
2293 : /* Parallel-aware Append never produces ordered output. */
2294 : Assert(apath->path.pathkeys == NIL);
2295 :
2296 : /* Calculate startup cost. */
2297 59564 : foreach(l, apath->subpaths)
2298 : {
2299 44968 : Path *subpath = (Path *) lfirst(l);
2300 :
2301 : /*
2302 : * Append will start returning tuples when the child node having
2303 : * lowest startup cost is done setting up. We consider only the
2304 : * first few subplans that immediately get a worker assigned.
2305 : */
2306 44968 : if (i == 0)
2307 14596 : apath->path.startup_cost = subpath->startup_cost;
2308 30372 : else if (i < apath->path.parallel_workers)
2309 14098 : apath->path.startup_cost = Min(apath->path.startup_cost,
2310 : subpath->startup_cost);
2311 :
2312 : /*
2313 : * Apply parallel divisor to subpaths. Scale the number of rows
2314 : * for each partial subpath based on the ratio of the parallel
2315 : * divisor originally used for the subpath to the one we adopted.
2316 : * Also add the cost of partial paths to the total cost, but
2317 : * ignore non-partial paths for now.
2318 : */
2319 44968 : if (i < apath->first_partial_path)
2320 2892 : apath->path.rows += subpath->rows / parallel_divisor;
2321 : else
2322 : {
2323 : double subpath_parallel_divisor;
2324 :
2325 42076 : subpath_parallel_divisor = get_parallel_divisor(subpath);
2326 42076 : apath->path.rows += subpath->rows * (subpath_parallel_divisor /
2327 : parallel_divisor);
2328 42076 : apath->path.total_cost += subpath->total_cost;
2329 : }
2330 :
2331 44968 : apath->path.rows = clamp_row_est(apath->path.rows);
2332 :
2333 44968 : i++;
2334 : }
2335 :
2336 : /* Add cost for non-partial subpaths. */
2337 14596 : apath->path.total_cost +=
2338 14596 : append_nonpartial_cost(apath->subpaths,
2339 : apath->first_partial_path,
2340 : apath->path.parallel_workers);
2341 : }
2342 :
2343 : /*
2344 : * Although Append does not do any selection or projection, it's not free;
2345 : * add a small per-tuple overhead.
2346 : */
2347 42728 : apath->path.total_cost +=
2348 42728 : cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * apath->path.rows;
2349 : }
2350 :
2351 : /*
2352 : * cost_merge_append
2353 : * Determines and returns the cost of a MergeAppend node.
2354 : *
2355 : * MergeAppend merges several pre-sorted input streams, using a heap that
2356 : * at any given instant holds the next tuple from each stream. If there
2357 : * are N streams, we need about N*log2(N) tuple comparisons to construct
2358 : * the heap at startup, and then for each output tuple, about log2(N)
2359 : * comparisons to replace the top entry.
2360 : *
2361 : * (The effective value of N will drop once some of the input streams are
2362 : * exhausted, but it seems unlikely to be worth trying to account for that.)
2363 : *
2364 : * The heap is never spilled to disk, since we assume N is not very large.
2365 : * So this is much simpler than cost_sort.
2366 : *
2367 : * As in cost_sort, we charge two operator evals per tuple comparison.
2368 : *
2369 : * 'pathkeys' is a list of sort keys
2370 : * 'n_streams' is the number of input streams
2371 : * 'input_startup_cost' is the sum of the input streams' startup costs
2372 : * 'input_total_cost' is the sum of the input streams' total costs
2373 : * 'tuples' is the number of tuples in all the streams
2374 : */
2375 : void
2376 3582 : cost_merge_append(Path *path, PlannerInfo *root,
2377 : List *pathkeys, int n_streams,
2378 : Cost input_startup_cost, Cost input_total_cost,
2379 : double tuples)
2380 : {
2381 3582 : Cost startup_cost = 0;
2382 3582 : Cost run_cost = 0;
2383 : Cost comparison_cost;
2384 : double N;
2385 : double logN;
2386 :
2387 : /*
2388 : * Avoid log(0)...
2389 : */
2390 3582 : N = (n_streams < 2) ? 2.0 : (double) n_streams;
2391 3582 : logN = LOG2(N);
2392 :
2393 : /* Assumed cost per tuple comparison */
2394 3582 : comparison_cost = 2.0 * cpu_operator_cost;
2395 :
2396 : /* Heap creation cost */
2397 3582 : startup_cost += comparison_cost * N * logN;
2398 :
2399 : /* Per-tuple heap maintenance cost */
2400 3582 : run_cost += tuples * comparison_cost * logN;
2401 :
2402 : /*
2403 : * Although MergeAppend does not do any selection or projection, it's not
2404 : * free; add a small per-tuple overhead.
2405 : */
2406 3582 : run_cost += cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * tuples;
2407 :
2408 3582 : path->startup_cost = startup_cost + input_startup_cost;
2409 3582 : path->total_cost = startup_cost + run_cost + input_total_cost;
2410 3582 : }
2411 :
2412 : /*
2413 : * cost_material
2414 : * Determines and returns the cost of materializing a relation, including
2415 : * the cost of reading the input data.
2416 : *
2417 : * If the total volume of data to materialize exceeds work_mem, we will need
2418 : * to write it to disk, so the cost is much higher in that case.
2419 : *
2420 : * Note that here we are estimating the costs for the first scan of the
2421 : * relation, so the materialization is all overhead --- any savings will
2422 : * occur only on rescan, which is estimated in cost_rescan.
2423 : */
2424 : void
2425 395584 : cost_material(Path *path,
2426 : Cost input_startup_cost, Cost input_total_cost,
2427 : double tuples, int width)
2428 : {
2429 395584 : Cost startup_cost = input_startup_cost;
2430 395584 : Cost run_cost = input_total_cost - input_startup_cost;
2431 395584 : double nbytes = relation_byte_size(tuples, width);
2432 395584 : long work_mem_bytes = work_mem * 1024L;
2433 :
2434 395584 : path->rows = tuples;
2435 :
2436 : /*
2437 : * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
2438 : * reflect bookkeeping overhead. (This rate must be more than what
2439 : * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
2440 : * if it is exactly the same then there will be a cost tie between
2441 : * nestloop with A outer, materialized B inner and nestloop with B outer,
2442 : * materialized A inner. The extra cost ensures we'll prefer
2443 : * materializing the smaller rel.) Note that this is normally a good deal
2444 : * less than cpu_tuple_cost; which is OK because a Material plan node
2445 : * doesn't do qual-checking or projection, so it's got less overhead than
2446 : * most plan nodes.
2447 : */
2448 395584 : run_cost += 2 * cpu_operator_cost * tuples;
2449 :
2450 : /*
2451 : * If we will spill to disk, charge at the rate of seq_page_cost per page.
2452 : * This cost is assumed to be evenly spread through the plan run phase,
2453 : * which isn't exactly accurate but our cost model doesn't allow for
2454 : * nonuniform costs within the run phase.
2455 : */
2456 395584 : if (nbytes > work_mem_bytes)
2457 : {
2458 4206 : double npages = ceil(nbytes / BLCKSZ);
2459 :
2460 4206 : run_cost += seq_page_cost * npages;
2461 : }
2462 :
2463 395584 : path->startup_cost = startup_cost;
2464 395584 : path->total_cost = startup_cost + run_cost;
2465 395584 : }
2466 :
2467 : /*
2468 : * cost_memoize_rescan
2469 : * Determines the estimated cost of rescanning a Memoize node.
2470 : *
2471 : * In order to estimate this, we must gain knowledge of how often we expect to
2472 : * be called and how many distinct sets of parameters we are likely to be
2473 : * called with. If we expect a good cache hit ratio, then we can set our
2474 : * costs to account for that hit ratio, plus a little bit of cost for the
2475 : * caching itself. Caching will not work out well if we expect to be called
2476 : * with too many distinct parameter values. The worst-case here is that we
2477 : * never see any parameter value twice, in which case we'd never get a cache
2478 : * hit and caching would be a complete waste of effort.
2479 : */
2480 : static void
2481 209386 : cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
2482 : Cost *rescan_startup_cost, Cost *rescan_total_cost)
2483 : {
2484 : EstimationInfo estinfo;
2485 : ListCell *lc;
2486 209386 : Cost input_startup_cost = mpath->subpath->startup_cost;
2487 209386 : Cost input_total_cost = mpath->subpath->total_cost;
2488 209386 : double tuples = mpath->subpath->rows;
2489 209386 : double calls = mpath->calls;
2490 209386 : int width = mpath->subpath->pathtarget->width;
2491 :
2492 : double hash_mem_bytes;
2493 : double est_entry_bytes;
2494 : double est_cache_entries;
2495 : double ndistinct;
2496 : double evict_ratio;
2497 : double hit_ratio;
2498 : Cost startup_cost;
2499 : Cost total_cost;
2500 :
2501 : /* available cache space */
2502 209386 : hash_mem_bytes = get_hash_memory_limit();
2503 :
2504 : /*
2505 : * Set the number of bytes each cache entry should consume in the cache.
2506 : * To provide us with better estimations on how many cache entries we can
2507 : * store at once, we make a call to the executor here to ask it what
2508 : * memory overheads there are for a single cache entry.
2509 : */
2510 209386 : est_entry_bytes = relation_byte_size(tuples, width) +
2511 209386 : ExecEstimateCacheEntryOverheadBytes(tuples);
2512 :
2513 : /* include the estimated width for the cache keys */
2514 442498 : foreach(lc, mpath->param_exprs)
2515 233112 : est_entry_bytes += get_expr_width(root, (Node *) lfirst(lc));
2516 :
2517 : /* estimate on the upper limit of cache entries we can hold at once */
2518 209386 : est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
2519 :
2520 : /* estimate on the distinct number of parameter values */
2521 209386 : ndistinct = estimate_num_groups(root, mpath->param_exprs, calls, NULL,
2522 : &estinfo);
2523 :
2524 : /*
2525 : * When the estimation fell back on using a default value, it's a bit too
2526 : * risky to assume that it's ok to use a Memoize node. The use of a
2527 : * default could cause us to use a Memoize node when it's really
2528 : * inappropriate to do so. If we see that this has been done, then we'll
2529 : * assume that every call will have unique parameters, which will almost
2530 : * certainly mean a MemoizePath will never survive add_path().
2531 : */
2532 209386 : if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
2533 10490 : ndistinct = calls;
2534 :
2535 : /*
2536 : * Since we've already estimated the maximum number of entries we can
2537 : * store at once and know the estimated number of distinct values we'll be
2538 : * called with, we'll take this opportunity to set the path's est_entries.
2539 : * This will ultimately determine the hash table size that the executor
2540 : * will use. If we leave this at zero, the executor will just choose the
2541 : * size itself. Really this is not the right place to do this, but it's
2542 : * convenient since everything is already calculated.
2543 : */
2544 209386 : mpath->est_entries = Min(Min(ndistinct, est_cache_entries),
2545 : PG_UINT32_MAX);
2546 :
2547 : /*
2548 : * When the number of distinct parameter values is above the amount we can
2549 : * store in the cache, then we'll have to evict some entries from the
2550 : * cache. This is not free. Here we estimate how often we'll incur the
2551 : * cost of that eviction.
2552 : */
2553 209386 : evict_ratio = 1.0 - Min(est_cache_entries, ndistinct) / ndistinct;
2554 :
2555 : /*
2556 : * In order to estimate how costly a single scan will be, we need to
2557 : * attempt to estimate what the cache hit ratio will be. To do that we
2558 : * must look at how many scans are estimated in total for this node and
2559 : * how many of those scans we expect to get a cache hit.
2560 : */
2561 418772 : hit_ratio = ((calls - ndistinct) / calls) *
2562 209386 : (est_cache_entries / Max(ndistinct, est_cache_entries));
2563 :
2564 : Assert(hit_ratio >= 0 && hit_ratio <= 1.0);
2565 :
2566 : /*
2567 : * Set the total_cost accounting for the expected cache hit ratio. We
2568 : * also add on a cpu_operator_cost to account for a cache lookup. This
2569 : * will happen regardless of whether it's a cache hit or not.
2570 : */
2571 209386 : total_cost = input_total_cost * (1.0 - hit_ratio) + cpu_operator_cost;
2572 :
2573 : /* Now adjust the total cost to account for cache evictions */
2574 :
2575 : /* Charge a cpu_tuple_cost for evicting the actual cache entry */
2576 209386 : total_cost += cpu_tuple_cost * evict_ratio;
2577 :
2578 : /*
2579 : * Charge a 10th of cpu_operator_cost to evict every tuple in that entry.
2580 : * The per-tuple eviction is really just a pfree, so charging a whole
2581 : * cpu_operator_cost seems a little excessive.
2582 : */
2583 209386 : total_cost += cpu_operator_cost / 10.0 * evict_ratio * tuples;
2584 :
2585 : /*
2586 : * Now adjust for storing things in the cache, since that's not free
2587 : * either. Everything must go in the cache. We don't proportion this
2588 : * over any ratio, just apply it once for the scan. We charge a
2589 : * cpu_tuple_cost for the creation of the cache entry and also a
2590 : * cpu_operator_cost for each tuple we expect to cache.
2591 : */
2592 209386 : total_cost += cpu_tuple_cost + cpu_operator_cost * tuples;
2593 :
2594 : /*
2595 : * Getting the first row must be also be proportioned according to the
2596 : * expected cache hit ratio.
2597 : */
2598 209386 : startup_cost = input_startup_cost * (1.0 - hit_ratio);
2599 :
2600 : /*
2601 : * Additionally we charge a cpu_tuple_cost to account for cache lookups,
2602 : * which we'll do regardless of whether it was a cache hit or not.
2603 : */
2604 209386 : startup_cost += cpu_tuple_cost;
2605 :
2606 209386 : *rescan_startup_cost = startup_cost;
2607 209386 : *rescan_total_cost = total_cost;
2608 209386 : }
2609 :
2610 : /*
2611 : * cost_agg
2612 : * Determines and returns the cost of performing an Agg plan node,
2613 : * including the cost of its input.
2614 : *
2615 : * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
2616 : * we are using a hashed Agg node just to do grouping).
2617 : *
2618 : * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
2619 : * are for appropriately-sorted input.
2620 : */
2621 : void
2622 60182 : cost_agg(Path *path, PlannerInfo *root,
2623 : AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
2624 : int numGroupCols, double numGroups,
2625 : List *quals,
2626 : Cost input_startup_cost, Cost input_total_cost,
2627 : double input_tuples, double input_width)
2628 : {
2629 : double output_tuples;
2630 : Cost startup_cost;
2631 : Cost total_cost;
2632 : AggClauseCosts dummy_aggcosts;
2633 :
2634 : /* Use all-zero per-aggregate costs if NULL is passed */
2635 60182 : if (aggcosts == NULL)
2636 : {
2637 : Assert(aggstrategy == AGG_HASHED);
2638 68340 : MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
2639 11390 : aggcosts = &dummy_aggcosts;
2640 : }
2641 :
2642 : /*
2643 : * The transCost.per_tuple component of aggcosts should be charged once
2644 : * per input tuple, corresponding to the costs of evaluating the aggregate
2645 : * transfns and their input expressions. The finalCost.per_tuple component
2646 : * is charged once per output tuple, corresponding to the costs of
2647 : * evaluating the finalfns. Startup costs are of course charged but once.
2648 : *
2649 : * If we are grouping, we charge an additional cpu_operator_cost per
2650 : * grouping column per input tuple for grouping comparisons.
2651 : *
2652 : * We will produce a single output tuple if not grouping, and a tuple per
2653 : * group otherwise. We charge cpu_tuple_cost for each output tuple.
2654 : *
2655 : * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
2656 : * same total CPU cost, but AGG_SORTED has lower startup cost. If the
2657 : * input path is already sorted appropriately, AGG_SORTED should be
2658 : * preferred (since it has no risk of memory overflow). This will happen
2659 : * as long as the computed total costs are indeed exactly equal --- but if
2660 : * there's roundoff error we might do the wrong thing. So be sure that
2661 : * the computations below form the same intermediate values in the same
2662 : * order.
2663 : */
2664 60182 : if (aggstrategy == AGG_PLAIN)
2665 : {
2666 31042 : startup_cost = input_total_cost;
2667 31042 : startup_cost += aggcosts->transCost.startup;
2668 31042 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2669 31042 : startup_cost += aggcosts->finalCost.startup;
2670 31042 : startup_cost += aggcosts->finalCost.per_tuple;
2671 : /* we aren't grouping */
2672 31042 : total_cost = startup_cost + cpu_tuple_cost;
2673 31042 : output_tuples = 1;
2674 : }
2675 29140 : else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
2676 : {
2677 : /* Here we are able to deliver output on-the-fly */
2678 10130 : startup_cost = input_startup_cost;
2679 10130 : total_cost = input_total_cost;
2680 10130 : if (aggstrategy == AGG_MIXED && !enable_hashagg)
2681 : {
2682 456 : startup_cost += disable_cost;
2683 456 : total_cost += disable_cost;
2684 : }
2685 : /* calcs phrased this way to match HASHED case, see note above */
2686 10130 : total_cost += aggcosts->transCost.startup;
2687 10130 : total_cost += aggcosts->transCost.per_tuple * input_tuples;
2688 10130 : total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2689 10130 : total_cost += aggcosts->finalCost.startup;
2690 10130 : total_cost += aggcosts->finalCost.per_tuple * numGroups;
2691 10130 : total_cost += cpu_tuple_cost * numGroups;
2692 10130 : output_tuples = numGroups;
2693 : }
2694 : else
2695 : {
2696 : /* must be AGG_HASHED */
2697 19010 : startup_cost = input_total_cost;
2698 19010 : if (!enable_hashagg)
2699 1374 : startup_cost += disable_cost;
2700 19010 : startup_cost += aggcosts->transCost.startup;
2701 19010 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2702 : /* cost of computing hash value */
2703 19010 : startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2704 19010 : startup_cost += aggcosts->finalCost.startup;
2705 :
2706 19010 : total_cost = startup_cost;
2707 19010 : total_cost += aggcosts->finalCost.per_tuple * numGroups;
2708 : /* cost of retrieving from hash table */
2709 19010 : total_cost += cpu_tuple_cost * numGroups;
2710 19010 : output_tuples = numGroups;
2711 : }
2712 :
2713 : /*
2714 : * Add the disk costs of hash aggregation that spills to disk.
2715 : *
2716 : * Groups that go into the hash table stay in memory until finalized, so
2717 : * spilling and reprocessing tuples doesn't incur additional invocations
2718 : * of transCost or finalCost. Furthermore, the computed hash value is
2719 : * stored with the spilled tuples, so we don't incur extra invocations of
2720 : * the hash function.
2721 : *
2722 : * Hash Agg begins returning tuples after the first batch is complete.
2723 : * Accrue writes (spilled tuples) to startup_cost and to total_cost;
2724 : * accrue reads only to total_cost.
2725 : */
2726 60182 : if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
2727 : {
2728 : double pages;
2729 19854 : double pages_written = 0.0;
2730 19854 : double pages_read = 0.0;
2731 : double spill_cost;
2732 : double hashentrysize;
2733 : double nbatches;
2734 : Size mem_limit;
2735 : uint64 ngroups_limit;
2736 : int num_partitions;
2737 : int depth;
2738 :
2739 : /*
2740 : * Estimate number of batches based on the computed limits. If less
2741 : * than or equal to one, all groups are expected to fit in memory;
2742 : * otherwise we expect to spill.
2743 : */
2744 19854 : hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
2745 : input_width,
2746 : aggcosts->transitionSpace);
2747 19854 : hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
2748 : &ngroups_limit, &num_partitions);
2749 :
2750 19854 : nbatches = Max((numGroups * hashentrysize) / mem_limit,
2751 : numGroups / ngroups_limit);
2752 :
2753 19854 : nbatches = Max(ceil(nbatches), 1.0);
2754 19854 : num_partitions = Max(num_partitions, 2);
2755 :
2756 : /*
2757 : * The number of partitions can change at different levels of
2758 : * recursion; but for the purposes of this calculation assume it stays
2759 : * constant.
2760 : */
2761 19854 : depth = ceil(log(nbatches) / log(num_partitions));
2762 :
2763 : /*
2764 : * Estimate number of pages read and written. For each level of
2765 : * recursion, a tuple must be written and then later read.
2766 : */
2767 19854 : pages = relation_byte_size(input_tuples, input_width) / BLCKSZ;
2768 19854 : pages_written = pages_read = pages * depth;
2769 :
2770 : /*
2771 : * HashAgg has somewhat worse IO behavior than Sort on typical
2772 : * hardware/OS combinations. Account for this with a generic penalty.
2773 : */
2774 19854 : pages_read *= 2.0;
2775 19854 : pages_written *= 2.0;
2776 :
2777 19854 : startup_cost += pages_written * random_page_cost;
2778 19854 : total_cost += pages_written * random_page_cost;
2779 19854 : total_cost += pages_read * seq_page_cost;
2780 :
2781 : /* account for CPU cost of spilling a tuple and reading it back */
2782 19854 : spill_cost = depth * input_tuples * 2.0 * cpu_tuple_cost;
2783 19854 : startup_cost += spill_cost;
2784 19854 : total_cost += spill_cost;
2785 : }
2786 :
2787 : /*
2788 : * If there are quals (HAVING quals), account for their cost and
2789 : * selectivity.
2790 : */
2791 60182 : if (quals)
2792 : {
2793 : QualCost qual_cost;
2794 :
2795 3704 : cost_qual_eval(&qual_cost, quals, root);
2796 3704 : startup_cost += qual_cost.startup;
2797 3704 : total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
2798 :
2799 3704 : output_tuples = clamp_row_est(output_tuples *
2800 3704 : clauselist_selectivity(root,
2801 : quals,
2802 : 0,
2803 : JOIN_INNER,
2804 : NULL));
2805 : }
2806 :
2807 60182 : path->rows = output_tuples;
2808 60182 : path->startup_cost = startup_cost;
2809 60182 : path->total_cost = total_cost;
2810 60182 : }
2811 :
2812 : /*
2813 : * get_windowclause_startup_tuples
2814 : * Estimate how many tuples we'll need to fetch from a WindowAgg's
2815 : * subnode before we can output the first WindowAgg tuple.
2816 : *
2817 : * How many tuples need to be read depends on the WindowClause. For example,
2818 : * a WindowClause with no PARTITION BY and no ORDER BY requires that all
2819 : * subnode tuples are read and aggregated before the WindowAgg can output
2820 : * anything. If there's a PARTITION BY, then we only need to look at tuples
2821 : * in the first partition. Here we attempt to estimate just how many
2822 : * 'input_tuples' the WindowAgg will need to read for the given WindowClause
2823 : * before the first tuple can be output.
2824 : */
2825 : static double
2826 2592 : get_windowclause_startup_tuples(PlannerInfo *root, WindowClause *wc,
2827 : double input_tuples)
2828 : {
2829 2592 : int frameOptions = wc->frameOptions;
2830 : double partition_tuples;
2831 : double return_tuples;
2832 : double peer_tuples;
2833 :
2834 : /*
2835 : * First, figure out how many partitions there are likely to be and set
2836 : * partition_tuples according to that estimate.
2837 : */
2838 2592 : if (wc->partitionClause != NIL)
2839 : {
2840 : double num_partitions;
2841 636 : List *partexprs = get_sortgrouplist_exprs(wc->partitionClause,
2842 636 : root->parse->targetList);
2843 :
2844 636 : num_partitions = estimate_num_groups(root, partexprs, input_tuples,
2845 : NULL, NULL);
2846 636 : list_free(partexprs);
2847 :
2848 636 : partition_tuples = input_tuples / num_partitions;
2849 : }
2850 : else
2851 : {
2852 : /* all tuples belong to the same partition */
2853 1956 : partition_tuples = input_tuples;
2854 : }
2855 :
2856 : /* estimate the number of tuples in each peer group */
2857 2592 : if (wc->orderClause != NIL)
2858 : {
2859 : double num_groups;
2860 : List *orderexprs;
2861 :
2862 2198 : orderexprs = get_sortgrouplist_exprs(wc->orderClause,
2863 2198 : root->parse->targetList);
2864 :
2865 : /* estimate out how many peer groups there are in the partition */
2866 2198 : num_groups = estimate_num_groups(root, orderexprs,
2867 : partition_tuples, NULL,
2868 : NULL);
2869 2198 : list_free(orderexprs);
2870 2198 : peer_tuples = partition_tuples / num_groups;
2871 : }
2872 : else
2873 : {
2874 : /* no ORDER BY so only 1 tuple belongs in each peer group */
2875 394 : peer_tuples = 1.0;
2876 : }
2877 :
2878 2592 : if (frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING)
2879 : {
2880 : /* include all partition rows */
2881 346 : return_tuples = partition_tuples;
2882 : }
2883 2246 : else if (frameOptions & FRAMEOPTION_END_CURRENT_ROW)
2884 : {
2885 1256 : if (frameOptions & FRAMEOPTION_ROWS)
2886 : {
2887 : /* just count the current row */
2888 540 : return_tuples = 1.0;
2889 : }
2890 716 : else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
2891 : {
2892 : /*
2893 : * When in RANGE/GROUPS mode, it's more complex. If there's no
2894 : * ORDER BY, then all rows in the partition are peers, otherwise
2895 : * we'll need to read the first group of peers.
2896 : */
2897 716 : if (wc->orderClause == NIL)
2898 256 : return_tuples = partition_tuples;
2899 : else
2900 460 : return_tuples = peer_tuples;
2901 : }
2902 : else
2903 : {
2904 : /*
2905 : * Something new we don't support yet? This needs attention.
2906 : * We'll just return 1.0 in the meantime.
2907 : */
2908 : Assert(false);
2909 0 : return_tuples = 1.0;
2910 : }
2911 : }
2912 990 : else if (frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING)
2913 : {
2914 : /*
2915 : * BETWEEN ... AND N PRECEDING will only need to read the WindowAgg's
2916 : * subnode after N ROWS/RANGES/GROUPS. N can be 0, but not negative,
2917 : * so we'll just assume only the current row needs to be read to fetch
2918 : * the first WindowAgg row.
2919 : */
2920 108 : return_tuples = 1.0;
2921 : }
2922 882 : else if (frameOptions & FRAMEOPTION_END_OFFSET_FOLLOWING)
2923 : {
2924 882 : Const *endOffset = (Const *) wc->endOffset;
2925 : double end_offset_value;
2926 :
2927 : /* try and figure out the value specified in the endOffset. */
2928 882 : if (IsA(endOffset, Const))
2929 : {
2930 870 : if (endOffset->constisnull)
2931 : {
2932 : /*
2933 : * NULLs are not allowed, but currently, there's no code to
2934 : * error out if there's a NULL Const. We'll only discover
2935 : * this during execution. For now, just pretend everything is
2936 : * fine and assume that just the first row/range/group will be
2937 : * needed.
2938 : */
2939 0 : end_offset_value = 1.0;
2940 : }
2941 : else
2942 : {
2943 870 : switch (endOffset->consttype)
2944 : {
2945 24 : case INT2OID:
2946 24 : end_offset_value =
2947 24 : (double) DatumGetInt16(endOffset->constvalue);
2948 24 : break;
2949 132 : case INT4OID:
2950 132 : end_offset_value =
2951 132 : (double) DatumGetInt32(endOffset->constvalue);
2952 132 : break;
2953 372 : case INT8OID:
2954 372 : end_offset_value =
2955 372 : (double) DatumGetInt64(endOffset->constvalue);
2956 372 : break;
2957 342 : default:
2958 342 : end_offset_value =
2959 342 : partition_tuples / peer_tuples *
2960 : DEFAULT_INEQ_SEL;
2961 342 : break;
2962 : }
2963 : }
2964 : }
2965 : else
2966 : {
2967 : /*
2968 : * When the end bound is not a Const, we'll just need to guess. We
2969 : * just make use of DEFAULT_INEQ_SEL.
2970 : */
2971 12 : end_offset_value =
2972 12 : partition_tuples / peer_tuples * DEFAULT_INEQ_SEL;
2973 : }
2974 :
2975 882 : if (frameOptions & FRAMEOPTION_ROWS)
2976 : {
2977 : /* include the N FOLLOWING and the current row */
2978 222 : return_tuples = end_offset_value + 1.0;
2979 : }
2980 660 : else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
2981 : {
2982 : /* include N FOLLOWING ranges/group and the initial range/group */
2983 660 : return_tuples = peer_tuples * (end_offset_value + 1.0);
2984 : }
2985 : else
2986 : {
2987 : /*
2988 : * Something new we don't support yet? This needs attention.
2989 : * We'll just return 1.0 in the meantime.
2990 : */
2991 : Assert(false);
2992 0 : return_tuples = 1.0;
2993 : }
2994 : }
2995 : else
2996 : {
2997 : /*
2998 : * Something new we don't support yet? This needs attention. We'll
2999 : * just return 1.0 in the meantime.
3000 : */
3001 : Assert(false);
3002 0 : return_tuples = 1.0;
3003 : }
3004 :
3005 2592 : if (wc->partitionClause != NIL || wc->orderClause != NIL)
3006 : {
3007 : /*
3008 : * Cap the return value to the estimated partition tuples and account
3009 : * for the extra tuple WindowAgg will need to read to confirm the next
3010 : * tuple does not belong to the same partition or peer group.
3011 : */
3012 2352 : return_tuples = Min(return_tuples + 1.0, partition_tuples);
3013 : }
3014 : else
3015 : {
3016 : /*
3017 : * Cap the return value so it's never higher than the expected tuples
3018 : * in the partition.
3019 : */
3020 240 : return_tuples = Min(return_tuples, partition_tuples);
3021 : }
3022 :
3023 : /*
3024 : * We needn't worry about any EXCLUDE options as those only exclude rows
3025 : * from being aggregated, not from being read from the WindowAgg's
3026 : * subnode.
3027 : */
3028 :
3029 2592 : return clamp_row_est(return_tuples);
3030 : }
3031 :
3032 : /*
3033 : * cost_windowagg
3034 : * Determines and returns the cost of performing a WindowAgg plan node,
3035 : * including the cost of its input.
3036 : *
3037 : * Input is assumed already properly sorted.
3038 : */
3039 : void
3040 2592 : cost_windowagg(Path *path, PlannerInfo *root,
3041 : List *windowFuncs, WindowClause *winclause,
3042 : Cost input_startup_cost, Cost input_total_cost,
3043 : double input_tuples)
3044 : {
3045 : Cost startup_cost;
3046 : Cost total_cost;
3047 : double startup_tuples;
3048 : int numPartCols;
3049 : int numOrderCols;
3050 : ListCell *lc;
3051 :
3052 2592 : numPartCols = list_length(winclause->partitionClause);
3053 2592 : numOrderCols = list_length(winclause->orderClause);
3054 :
3055 2592 : startup_cost = input_startup_cost;
3056 2592 : total_cost = input_total_cost;
3057 :
3058 : /*
3059 : * Window functions are assumed to cost their stated execution cost, plus
3060 : * the cost of evaluating their input expressions, per tuple. Since they
3061 : * may in fact evaluate their inputs at multiple rows during each cycle,
3062 : * this could be a drastic underestimate; but without a way to know how
3063 : * many rows the window function will fetch, it's hard to do better. In
3064 : * any case, it's a good estimate for all the built-in window functions,
3065 : * so we'll just do this for now.
3066 : */
3067 5874 : foreach(lc, windowFuncs)
3068 : {
3069 3282 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc);
3070 : Cost wfunccost;
3071 : QualCost argcosts;
3072 :
3073 3282 : argcosts.startup = argcosts.per_tuple = 0;
3074 3282 : add_function_cost(root, wfunc->winfnoid, (Node *) wfunc,
3075 : &argcosts);
3076 3282 : startup_cost += argcosts.startup;
3077 3282 : wfunccost = argcosts.per_tuple;
3078 :
3079 : /* also add the input expressions' cost to per-input-row costs */
3080 3282 : cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
3081 3282 : startup_cost += argcosts.startup;
3082 3282 : wfunccost += argcosts.per_tuple;
3083 :
3084 : /*
3085 : * Add the filter's cost to per-input-row costs. XXX We should reduce
3086 : * input expression costs according to filter selectivity.
3087 : */
3088 3282 : cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
3089 3282 : startup_cost += argcosts.startup;
3090 3282 : wfunccost += argcosts.per_tuple;
3091 :
3092 3282 : total_cost += wfunccost * input_tuples;
3093 : }
3094 :
3095 : /*
3096 : * We also charge cpu_operator_cost per grouping column per tuple for
3097 : * grouping comparisons, plus cpu_tuple_cost per tuple for general
3098 : * overhead.
3099 : *
3100 : * XXX this neglects costs of spooling the data to disk when it overflows
3101 : * work_mem. Sooner or later that should get accounted for.
3102 : */
3103 2592 : total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
3104 2592 : total_cost += cpu_tuple_cost * input_tuples;
3105 :
3106 2592 : path->rows = input_tuples;
3107 2592 : path->startup_cost = startup_cost;
3108 2592 : path->total_cost = total_cost;
3109 :
3110 : /*
3111 : * Also, take into account how many tuples we need to read from the
3112 : * subnode in order to produce the first tuple from the WindowAgg. To do
3113 : * this we proportion the run cost (total cost not including startup cost)
3114 : * over the estimated startup tuples. We already included the startup
3115 : * cost of the subnode, so we only need to do this when the estimated
3116 : * startup tuples is above 1.0.
3117 : */
3118 2592 : startup_tuples = get_windowclause_startup_tuples(root, winclause,
3119 : input_tuples);
3120 :
3121 2592 : if (startup_tuples > 1.0)
3122 2340 : path->startup_cost += (total_cost - startup_cost) / input_tuples *
3123 2340 : (startup_tuples - 1.0);
3124 2592 : }
3125 :
3126 : /*
3127 : * cost_group
3128 : * Determines and returns the cost of performing a Group plan node,
3129 : * including the cost of its input.
3130 : *
3131 : * Note: caller must ensure that input costs are for appropriately-sorted
3132 : * input.
3133 : */
3134 : void
3135 4378 : cost_group(Path *path, PlannerInfo *root,
3136 : int numGroupCols, double numGroups,
3137 : List *quals,
3138 : Cost input_startup_cost, Cost input_total_cost,
3139 : double input_tuples)
3140 : {
3141 : double output_tuples;
3142 : Cost startup_cost;
3143 : Cost total_cost;
3144 :
3145 4378 : output_tuples = numGroups;
3146 4378 : startup_cost = input_startup_cost;
3147 4378 : total_cost = input_total_cost;
3148 :
3149 : /*
3150 : * Charge one cpu_operator_cost per comparison per input tuple. We assume
3151 : * all columns get compared at most of the tuples.
3152 : */
3153 4378 : total_cost += cpu_operator_cost * input_tuples * numGroupCols;
3154 :
3155 : /*
3156 : * If there are quals (HAVING quals), account for their cost and
3157 : * selectivity.
3158 : */
3159 4378 : if (quals)
3160 : {
3161 : QualCost qual_cost;
3162 :
3163 0 : cost_qual_eval(&qual_cost, quals, root);
3164 0 : startup_cost += qual_cost.startup;
3165 0 : total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
3166 :
3167 0 : output_tuples = clamp_row_est(output_tuples *
3168 0 : clauselist_selectivity(root,
3169 : quals,
3170 : 0,
3171 : JOIN_INNER,
3172 : NULL));
3173 : }
3174 :
3175 4378 : path->rows = output_tuples;
3176 4378 : path->startup_cost = startup_cost;
3177 4378 : path->total_cost = total_cost;
3178 4378 : }
3179 :
3180 : /*
3181 : * initial_cost_nestloop
3182 : * Preliminary estimate of the cost of a nestloop join path.
3183 : *
3184 : * This must quickly produce lower-bound estimates of the path's startup and
3185 : * total costs. If we are unable to eliminate the proposed path from
3186 : * consideration using the lower bounds, final_cost_nestloop will be called
3187 : * to obtain the final estimates.
3188 : *
3189 : * The exact division of labor between this function and final_cost_nestloop
3190 : * is private to them, and represents a tradeoff between speed of the initial
3191 : * estimate and getting a tight lower bound. We choose to not examine the
3192 : * join quals here, since that's by far the most expensive part of the
3193 : * calculations. The end result is that CPU-cost considerations must be
3194 : * left for the second phase; and for SEMI/ANTI joins, we must also postpone
3195 : * incorporation of the inner path's run cost.
3196 : *
3197 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3198 : * other data to be used by final_cost_nestloop
3199 : * 'jointype' is the type of join to be performed
3200 : * 'outer_path' is the outer input to the join
3201 : * 'inner_path' is the inner input to the join
3202 : * 'extra' contains miscellaneous information about the join
3203 : */
3204 : void
3205 2062632 : initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
3206 : JoinType jointype,
3207 : Path *outer_path, Path *inner_path,
3208 : JoinPathExtraData *extra)
3209 : {
3210 2062632 : Cost startup_cost = 0;
3211 2062632 : Cost run_cost = 0;
3212 2062632 : double outer_path_rows = outer_path->rows;
3213 : Cost inner_rescan_start_cost;
3214 : Cost inner_rescan_total_cost;
3215 : Cost inner_run_cost;
3216 : Cost inner_rescan_run_cost;
3217 :
3218 : /* estimate costs to rescan the inner relation */
3219 2062632 : cost_rescan(root, inner_path,
3220 : &inner_rescan_start_cost,
3221 : &inner_rescan_total_cost);
3222 :
3223 : /* cost of source data */
3224 :
3225 : /*
3226 : * NOTE: clearly, we must pay both outer and inner paths' startup_cost
3227 : * before we can start returning tuples, so the join's startup cost is
3228 : * their sum. We'll also pay the inner path's rescan startup cost
3229 : * multiple times.
3230 : */
3231 2062632 : startup_cost += outer_path->startup_cost + inner_path->startup_cost;
3232 2062632 : run_cost += outer_path->total_cost - outer_path->startup_cost;
3233 2062632 : if (outer_path_rows > 1)
3234 1401082 : run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
3235 :
3236 2062632 : inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
3237 2062632 : inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
3238 :
3239 2062632 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI ||
3240 2024508 : extra->inner_unique)
3241 : {
3242 : /*
3243 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3244 : * executor will stop after the first match.
3245 : *
3246 : * Getting decent estimates requires inspection of the join quals,
3247 : * which we choose to postpone to final_cost_nestloop.
3248 : */
3249 :
3250 : /* Save private data for final_cost_nestloop */
3251 950936 : workspace->inner_run_cost = inner_run_cost;
3252 950936 : workspace->inner_rescan_run_cost = inner_rescan_run_cost;
3253 : }
3254 : else
3255 : {
3256 : /* Normal case; we'll scan whole input rel for each outer row */
3257 1111696 : run_cost += inner_run_cost;
3258 1111696 : if (outer_path_rows > 1)
3259 768740 : run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
3260 : }
3261 :
3262 : /* CPU costs left for later */
3263 :
3264 : /* Public result fields */
3265 2062632 : workspace->startup_cost = startup_cost;
3266 2062632 : workspace->total_cost = startup_cost + run_cost;
3267 : /* Save private data for final_cost_nestloop */
3268 2062632 : workspace->run_cost = run_cost;
3269 2062632 : }
3270 :
3271 : /*
3272 : * final_cost_nestloop
3273 : * Final estimate of the cost and result size of a nestloop join path.
3274 : *
3275 : * 'path' is already filled in except for the rows and cost fields
3276 : * 'workspace' is the result from initial_cost_nestloop
3277 : * 'extra' contains miscellaneous information about the join
3278 : */
3279 : void
3280 1011428 : final_cost_nestloop(PlannerInfo *root, NestPath *path,
3281 : JoinCostWorkspace *workspace,
3282 : JoinPathExtraData *extra)
3283 : {
3284 1011428 : Path *outer_path = path->jpath.outerjoinpath;
3285 1011428 : Path *inner_path = path->jpath.innerjoinpath;
3286 1011428 : double outer_path_rows = outer_path->rows;
3287 1011428 : double inner_path_rows = inner_path->rows;
3288 1011428 : Cost startup_cost = workspace->startup_cost;
3289 1011428 : Cost run_cost = workspace->run_cost;
3290 : Cost cpu_per_tuple;
3291 : QualCost restrict_qual_cost;
3292 : double ntuples;
3293 :
3294 : /* Protect some assumptions below that rowcounts aren't zero */
3295 1011428 : if (outer_path_rows <= 0)
3296 0 : outer_path_rows = 1;
3297 1011428 : if (inner_path_rows <= 0)
3298 588 : inner_path_rows = 1;
3299 : /* Mark the path with the correct row estimate */
3300 1011428 : if (path->jpath.path.param_info)
3301 25238 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3302 : else
3303 986190 : path->jpath.path.rows = path->jpath.path.parent->rows;
3304 :
3305 : /* For partial paths, scale row estimate. */
3306 1011428 : if (path->jpath.path.parallel_workers > 0)
3307 : {
3308 7352 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3309 :
3310 7352 : path->jpath.path.rows =
3311 7352 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3312 : }
3313 :
3314 : /*
3315 : * We could include disable_cost in the preliminary estimate, but that
3316 : * would amount to optimizing for the case where the join method is
3317 : * disabled, which doesn't seem like the way to bet.
3318 : */
3319 1011428 : if (!enable_nestloop)
3320 3180 : startup_cost += disable_cost;
3321 :
3322 : /* cost of inner-relation source data (we already dealt with outer rel) */
3323 :
3324 1011428 : if (path->jpath.jointype == JOIN_SEMI || path->jpath.jointype == JOIN_ANTI ||
3325 985794 : extra->inner_unique)
3326 649876 : {
3327 : /*
3328 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3329 : * executor will stop after the first match.
3330 : */
3331 649876 : Cost inner_run_cost = workspace->inner_run_cost;
3332 649876 : Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
3333 : double outer_matched_rows;
3334 : double outer_unmatched_rows;
3335 : Selectivity inner_scan_frac;
3336 :
3337 : /*
3338 : * For an outer-rel row that has at least one match, we can expect the
3339 : * inner scan to stop after a fraction 1/(match_count+1) of the inner
3340 : * rows, if the matches are evenly distributed. Since they probably
3341 : * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
3342 : * that fraction. (If we used a larger fuzz factor, we'd have to
3343 : * clamp inner_scan_frac to at most 1.0; but since match_count is at
3344 : * least 1, no such clamp is needed now.)
3345 : */
3346 649876 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
3347 649876 : outer_unmatched_rows = outer_path_rows - outer_matched_rows;
3348 649876 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
3349 :
3350 : /*
3351 : * Compute number of tuples processed (not number emitted!). First,
3352 : * account for successfully-matched outer rows.
3353 : */
3354 649876 : ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
3355 :
3356 : /*
3357 : * Now we need to estimate the actual costs of scanning the inner
3358 : * relation, which may be quite a bit less than N times inner_run_cost
3359 : * due to early scan stops. We consider two cases. If the inner path
3360 : * is an indexscan using all the joinquals as indexquals, then an
3361 : * unmatched outer row results in an indexscan returning no rows,
3362 : * which is probably quite cheap. Otherwise, the executor will have
3363 : * to scan the whole inner rel for an unmatched row; not so cheap.
3364 : */
3365 649876 : if (has_indexed_join_quals(path))
3366 : {
3367 : /*
3368 : * Successfully-matched outer rows will only require scanning
3369 : * inner_scan_frac of the inner relation. In this case, we don't
3370 : * need to charge the full inner_run_cost even when that's more
3371 : * than inner_rescan_run_cost, because we can assume that none of
3372 : * the inner scans ever scan the whole inner relation. So it's
3373 : * okay to assume that all the inner scan executions can be
3374 : * fractions of the full cost, even if materialization is reducing
3375 : * the rescan cost. At this writing, it's impossible to get here
3376 : * for a materialized inner scan, so inner_run_cost and
3377 : * inner_rescan_run_cost will be the same anyway; but just in
3378 : * case, use inner_run_cost for the first matched tuple and
3379 : * inner_rescan_run_cost for additional ones.
3380 : */
3381 120380 : run_cost += inner_run_cost * inner_scan_frac;
3382 120380 : if (outer_matched_rows > 1)
3383 13532 : run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
3384 :
3385 : /*
3386 : * Add the cost of inner-scan executions for unmatched outer rows.
3387 : * We estimate this as the same cost as returning the first tuple
3388 : * of a nonempty scan. We consider that these are all rescans,
3389 : * since we used inner_run_cost once already.
3390 : */
3391 120380 : run_cost += outer_unmatched_rows *
3392 120380 : inner_rescan_run_cost / inner_path_rows;
3393 :
3394 : /*
3395 : * We won't be evaluating any quals at all for unmatched rows, so
3396 : * don't add them to ntuples.
3397 : */
3398 : }
3399 : else
3400 : {
3401 : /*
3402 : * Here, a complicating factor is that rescans may be cheaper than
3403 : * first scans. If we never scan all the way to the end of the
3404 : * inner rel, it might be (depending on the plan type) that we'd
3405 : * never pay the whole inner first-scan run cost. However it is
3406 : * difficult to estimate whether that will happen (and it could
3407 : * not happen if there are any unmatched outer rows!), so be
3408 : * conservative and always charge the whole first-scan cost once.
3409 : * We consider this charge to correspond to the first unmatched
3410 : * outer row, unless there isn't one in our estimate, in which
3411 : * case blame it on the first matched row.
3412 : */
3413 :
3414 : /* First, count all unmatched join tuples as being processed */
3415 529496 : ntuples += outer_unmatched_rows * inner_path_rows;
3416 :
3417 : /* Now add the forced full scan, and decrement appropriate count */
3418 529496 : run_cost += inner_run_cost;
3419 529496 : if (outer_unmatched_rows >= 1)
3420 515238 : outer_unmatched_rows -= 1;
3421 : else
3422 14258 : outer_matched_rows -= 1;
3423 :
3424 : /* Add inner run cost for additional outer tuples having matches */
3425 529496 : if (outer_matched_rows > 0)
3426 172738 : run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
3427 :
3428 : /* Add inner run cost for additional unmatched outer tuples */
3429 529496 : if (outer_unmatched_rows > 0)
3430 337344 : run_cost += outer_unmatched_rows * inner_rescan_run_cost;
3431 : }
3432 : }
3433 : else
3434 : {
3435 : /* Normal-case source costs were included in preliminary estimate */
3436 :
3437 : /* Compute number of tuples processed (not number emitted!) */
3438 361552 : ntuples = outer_path_rows * inner_path_rows;
3439 : }
3440 :
3441 : /* CPU costs */
3442 1011428 : cost_qual_eval(&restrict_qual_cost, path->jpath.joinrestrictinfo, root);
3443 1011428 : startup_cost += restrict_qual_cost.startup;
3444 1011428 : cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
3445 1011428 : run_cost += cpu_per_tuple * ntuples;
3446 :
3447 : /* tlist eval costs are paid per output row, not per tuple scanned */
3448 1011428 : startup_cost += path->jpath.path.pathtarget->cost.startup;
3449 1011428 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3450 :
3451 1011428 : path->jpath.path.startup_cost = startup_cost;
3452 1011428 : path->jpath.path.total_cost = startup_cost + run_cost;
3453 1011428 : }
3454 :
3455 : /*
3456 : * initial_cost_mergejoin
3457 : * Preliminary estimate of the cost of a mergejoin path.
3458 : *
3459 : * This must quickly produce lower-bound estimates of the path's startup and
3460 : * total costs. If we are unable to eliminate the proposed path from
3461 : * consideration using the lower bounds, final_cost_mergejoin will be called
3462 : * to obtain the final estimates.
3463 : *
3464 : * The exact division of labor between this function and final_cost_mergejoin
3465 : * is private to them, and represents a tradeoff between speed of the initial
3466 : * estimate and getting a tight lower bound. We choose to not examine the
3467 : * join quals here, except for obtaining the scan selectivity estimate which
3468 : * is really essential (but fortunately, use of caching keeps the cost of
3469 : * getting that down to something reasonable).
3470 : * We also assume that cost_sort is cheap enough to use here.
3471 : *
3472 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3473 : * other data to be used by final_cost_mergejoin
3474 : * 'jointype' is the type of join to be performed
3475 : * 'mergeclauses' is the list of joinclauses to be used as merge clauses
3476 : * 'outer_path' is the outer input to the join
3477 : * 'inner_path' is the inner input to the join
3478 : * 'outersortkeys' is the list of sort keys for the outer path
3479 : * 'innersortkeys' is the list of sort keys for the inner path
3480 : * 'extra' contains miscellaneous information about the join
3481 : *
3482 : * Note: outersortkeys and innersortkeys should be NIL if no explicit
3483 : * sort is needed because the respective source path is already ordered.
3484 : */
3485 : void
3486 944748 : initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
3487 : JoinType jointype,
3488 : List *mergeclauses,
3489 : Path *outer_path, Path *inner_path,
3490 : List *outersortkeys, List *innersortkeys,
3491 : JoinPathExtraData *extra)
3492 : {
3493 944748 : Cost startup_cost = 0;
3494 944748 : Cost run_cost = 0;
3495 944748 : double outer_path_rows = outer_path->rows;
3496 944748 : double inner_path_rows = inner_path->rows;
3497 : Cost inner_run_cost;
3498 : double outer_rows,
3499 : inner_rows,
3500 : outer_skip_rows,
3501 : inner_skip_rows;
3502 : Selectivity outerstartsel,
3503 : outerendsel,
3504 : innerstartsel,
3505 : innerendsel;
3506 : Path sort_path; /* dummy for result of cost_sort */
3507 :
3508 : /* Protect some assumptions below that rowcounts aren't zero */
3509 944748 : if (outer_path_rows <= 0)
3510 96 : outer_path_rows = 1;
3511 944748 : if (inner_path_rows <= 0)
3512 126 : inner_path_rows = 1;
3513 :
3514 : /*
3515 : * A merge join will stop as soon as it exhausts either input stream
3516 : * (unless it's an outer join, in which case the outer side has to be
3517 : * scanned all the way anyway). Estimate fraction of the left and right
3518 : * inputs that will actually need to be scanned. Likewise, we can
3519 : * estimate the number of rows that will be skipped before the first join
3520 : * pair is found, which should be factored into startup cost. We use only
3521 : * the first (most significant) merge clause for this purpose. Since
3522 : * mergejoinscansel() is a fairly expensive computation, we cache the
3523 : * results in the merge clause RestrictInfo.
3524 : */
3525 944748 : if (mergeclauses && jointype != JOIN_FULL)
3526 938830 : {
3527 938830 : RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
3528 : List *opathkeys;
3529 : List *ipathkeys;
3530 : PathKey *opathkey;
3531 : PathKey *ipathkey;
3532 : MergeScanSelCache *cache;
3533 :
3534 : /* Get the input pathkeys to determine the sort-order details */
3535 938830 : opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
3536 938830 : ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
3537 : Assert(opathkeys);
3538 : Assert(ipathkeys);
3539 938830 : opathkey = (PathKey *) linitial(opathkeys);
3540 938830 : ipathkey = (PathKey *) linitial(ipathkeys);
3541 : /* debugging check */
3542 938830 : if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
3543 938830 : opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
3544 938830 : opathkey->pk_strategy != ipathkey->pk_strategy ||
3545 938830 : opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
3546 0 : elog(ERROR, "left and right pathkeys do not match in mergejoin");
3547 :
3548 : /* Get the selectivity with caching */
3549 938830 : cache = cached_scansel(root, firstclause, opathkey);
3550 :
3551 938830 : if (bms_is_subset(firstclause->left_relids,
3552 938830 : outer_path->parent->relids))
3553 : {
3554 : /* left side of clause is outer */
3555 491378 : outerstartsel = cache->leftstartsel;
3556 491378 : outerendsel = cache->leftendsel;
3557 491378 : innerstartsel = cache->rightstartsel;
3558 491378 : innerendsel = cache->rightendsel;
3559 : }
3560 : else
3561 : {
3562 : /* left side of clause is inner */
3563 447452 : outerstartsel = cache->rightstartsel;
3564 447452 : outerendsel = cache->rightendsel;
3565 447452 : innerstartsel = cache->leftstartsel;
3566 447452 : innerendsel = cache->leftendsel;
3567 : }
3568 938830 : if (jointype == JOIN_LEFT ||
3569 : jointype == JOIN_ANTI)
3570 : {
3571 187854 : outerstartsel = 0.0;
3572 187854 : outerendsel = 1.0;
3573 : }
3574 750976 : else if (jointype == JOIN_RIGHT ||
3575 : jointype == JOIN_RIGHT_ANTI)
3576 : {
3577 183776 : innerstartsel = 0.0;
3578 183776 : innerendsel = 1.0;
3579 : }
3580 : }
3581 : else
3582 : {
3583 : /* cope with clauseless or full mergejoin */
3584 5918 : outerstartsel = innerstartsel = 0.0;
3585 5918 : outerendsel = innerendsel = 1.0;
3586 : }
3587 :
3588 : /*
3589 : * Convert selectivities to row counts. We force outer_rows and
3590 : * inner_rows to be at least 1, but the skip_rows estimates can be zero.
3591 : */
3592 944748 : outer_skip_rows = rint(outer_path_rows * outerstartsel);
3593 944748 : inner_skip_rows = rint(inner_path_rows * innerstartsel);
3594 944748 : outer_rows = clamp_row_est(outer_path_rows * outerendsel);
3595 944748 : inner_rows = clamp_row_est(inner_path_rows * innerendsel);
3596 :
3597 : Assert(outer_skip_rows <= outer_rows);
3598 : Assert(inner_skip_rows <= inner_rows);
3599 :
3600 : /*
3601 : * Readjust scan selectivities to account for above rounding. This is
3602 : * normally an insignificant effect, but when there are only a few rows in
3603 : * the inputs, failing to do this makes for a large percentage error.
3604 : */
3605 944748 : outerstartsel = outer_skip_rows / outer_path_rows;
3606 944748 : innerstartsel = inner_skip_rows / inner_path_rows;
3607 944748 : outerendsel = outer_rows / outer_path_rows;
3608 944748 : innerendsel = inner_rows / inner_path_rows;
3609 :
3610 : Assert(outerstartsel <= outerendsel);
3611 : Assert(innerstartsel <= innerendsel);
3612 :
3613 : /* cost of source data */
3614 :
3615 944748 : if (outersortkeys) /* do we need to sort outer? */
3616 : {
3617 451488 : cost_sort(&sort_path,
3618 : root,
3619 : outersortkeys,
3620 : outer_path->total_cost,
3621 : outer_path_rows,
3622 451488 : outer_path->pathtarget->width,
3623 : 0.0,
3624 : work_mem,
3625 : -1.0);
3626 451488 : startup_cost += sort_path.startup_cost;
3627 451488 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3628 451488 : * outerstartsel;
3629 451488 : run_cost += (sort_path.total_cost - sort_path.startup_cost)
3630 451488 : * (outerendsel - outerstartsel);
3631 : }
3632 : else
3633 : {
3634 493260 : startup_cost += outer_path->startup_cost;
3635 493260 : startup_cost += (outer_path->total_cost - outer_path->startup_cost)
3636 493260 : * outerstartsel;
3637 493260 : run_cost += (outer_path->total_cost - outer_path->startup_cost)
3638 493260 : * (outerendsel - outerstartsel);
3639 : }
3640 :
3641 944748 : if (innersortkeys) /* do we need to sort inner? */
3642 : {
3643 742324 : cost_sort(&sort_path,
3644 : root,
3645 : innersortkeys,
3646 : inner_path->total_cost,
3647 : inner_path_rows,
3648 742324 : inner_path->pathtarget->width,
3649 : 0.0,
3650 : work_mem,
3651 : -1.0);
3652 742324 : startup_cost += sort_path.startup_cost;
3653 742324 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3654 742324 : * innerstartsel;
3655 742324 : inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
3656 742324 : * (innerendsel - innerstartsel);
3657 : }
3658 : else
3659 : {
3660 202424 : startup_cost += inner_path->startup_cost;
3661 202424 : startup_cost += (inner_path->total_cost - inner_path->startup_cost)
3662 202424 : * innerstartsel;
3663 202424 : inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
3664 202424 : * (innerendsel - innerstartsel);
3665 : }
3666 :
3667 : /*
3668 : * We can't yet determine whether rescanning occurs, or whether
3669 : * materialization of the inner input should be done. The minimum
3670 : * possible inner input cost, regardless of rescan and materialization
3671 : * considerations, is inner_run_cost. We include that in
3672 : * workspace->total_cost, but not yet in run_cost.
3673 : */
3674 :
3675 : /* CPU costs left for later */
3676 :
3677 : /* Public result fields */
3678 944748 : workspace->startup_cost = startup_cost;
3679 944748 : workspace->total_cost = startup_cost + run_cost + inner_run_cost;
3680 : /* Save private data for final_cost_mergejoin */
3681 944748 : workspace->run_cost = run_cost;
3682 944748 : workspace->inner_run_cost = inner_run_cost;
3683 944748 : workspace->outer_rows = outer_rows;
3684 944748 : workspace->inner_rows = inner_rows;
3685 944748 : workspace->outer_skip_rows = outer_skip_rows;
3686 944748 : workspace->inner_skip_rows = inner_skip_rows;
3687 944748 : }
3688 :
3689 : /*
3690 : * final_cost_mergejoin
3691 : * Final estimate of the cost and result size of a mergejoin path.
3692 : *
3693 : * Unlike other costsize functions, this routine makes two actual decisions:
3694 : * whether the executor will need to do mark/restore, and whether we should
3695 : * materialize the inner path. It would be logically cleaner to build
3696 : * separate paths testing these alternatives, but that would require repeating
3697 : * most of the cost calculations, which are not all that cheap. Since the
3698 : * choice will not affect output pathkeys or startup cost, only total cost,
3699 : * there is no possibility of wanting to keep more than one path. So it seems
3700 : * best to make the decisions here and record them in the path's
3701 : * skip_mark_restore and materialize_inner fields.
3702 : *
3703 : * Mark/restore overhead is usually required, but can be skipped if we know
3704 : * that the executor need find only one match per outer tuple, and that the
3705 : * mergeclauses are sufficient to identify a match.
3706 : *
3707 : * We materialize the inner path if we need mark/restore and either the inner
3708 : * path can't support mark/restore, or it's cheaper to use an interposed
3709 : * Material node to handle mark/restore.
3710 : *
3711 : * 'path' is already filled in except for the rows and cost fields and
3712 : * skip_mark_restore and materialize_inner
3713 : * 'workspace' is the result from initial_cost_mergejoin
3714 : * 'extra' contains miscellaneous information about the join
3715 : */
3716 : void
3717 237486 : final_cost_mergejoin(PlannerInfo *root, MergePath *path,
3718 : JoinCostWorkspace *workspace,
3719 : JoinPathExtraData *extra)
3720 : {
3721 237486 : Path *outer_path = path->jpath.outerjoinpath;
3722 237486 : Path *inner_path = path->jpath.innerjoinpath;
3723 237486 : double inner_path_rows = inner_path->rows;
3724 237486 : List *mergeclauses = path->path_mergeclauses;
3725 237486 : List *innersortkeys = path->innersortkeys;
3726 237486 : Cost startup_cost = workspace->startup_cost;
3727 237486 : Cost run_cost = workspace->run_cost;
3728 237486 : Cost inner_run_cost = workspace->inner_run_cost;
3729 237486 : double outer_rows = workspace->outer_rows;
3730 237486 : double inner_rows = workspace->inner_rows;
3731 237486 : double outer_skip_rows = workspace->outer_skip_rows;
3732 237486 : double inner_skip_rows = workspace->inner_skip_rows;
3733 : Cost cpu_per_tuple,
3734 : bare_inner_cost,
3735 : mat_inner_cost;
3736 : QualCost merge_qual_cost;
3737 : QualCost qp_qual_cost;
3738 : double mergejointuples,
3739 : rescannedtuples;
3740 : double rescanratio;
3741 :
3742 : /* Protect some assumptions below that rowcounts aren't zero */
3743 237486 : if (inner_path_rows <= 0)
3744 90 : inner_path_rows = 1;
3745 :
3746 : /* Mark the path with the correct row estimate */
3747 237486 : if (path->jpath.path.param_info)
3748 646 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3749 : else
3750 236840 : path->jpath.path.rows = path->jpath.path.parent->rows;
3751 :
3752 : /* For partial paths, scale row estimate. */
3753 237486 : if (path->jpath.path.parallel_workers > 0)
3754 : {
3755 8976 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3756 :
3757 8976 : path->jpath.path.rows =
3758 8976 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3759 : }
3760 :
3761 : /*
3762 : * We could include disable_cost in the preliminary estimate, but that
3763 : * would amount to optimizing for the case where the join method is
3764 : * disabled, which doesn't seem like the way to bet.
3765 : */
3766 237486 : if (!enable_mergejoin)
3767 0 : startup_cost += disable_cost;
3768 :
3769 : /*
3770 : * Compute cost of the mergequals and qpquals (other restriction clauses)
3771 : * separately.
3772 : */
3773 237486 : cost_qual_eval(&merge_qual_cost, mergeclauses, root);
3774 237486 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
3775 237486 : qp_qual_cost.startup -= merge_qual_cost.startup;
3776 237486 : qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
3777 :
3778 : /*
3779 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3780 : * executor will stop scanning for matches after the first match. When
3781 : * all the joinclauses are merge clauses, this means we don't ever need to
3782 : * back up the merge, and so we can skip mark/restore overhead.
3783 : */
3784 237486 : if ((path->jpath.jointype == JOIN_SEMI ||
3785 233970 : path->jpath.jointype == JOIN_ANTI ||
3786 339326 : extra->inner_unique) &&
3787 111314 : (list_length(path->jpath.joinrestrictinfo) ==
3788 111314 : list_length(path->path_mergeclauses)))
3789 95594 : path->skip_mark_restore = true;
3790 : else
3791 141892 : path->skip_mark_restore = false;
3792 :
3793 : /*
3794 : * Get approx # tuples passing the mergequals. We use approx_tuple_count
3795 : * here because we need an estimate done with JOIN_INNER semantics.
3796 : */
3797 237486 : mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
3798 :
3799 : /*
3800 : * When there are equal merge keys in the outer relation, the mergejoin
3801 : * must rescan any matching tuples in the inner relation. This means
3802 : * re-fetching inner tuples; we have to estimate how often that happens.
3803 : *
3804 : * For regular inner and outer joins, the number of re-fetches can be
3805 : * estimated approximately as size of merge join output minus size of
3806 : * inner relation. Assume that the distinct key values are 1, 2, ..., and
3807 : * denote the number of values of each key in the outer relation as m1,
3808 : * m2, ...; in the inner relation, n1, n2, ... Then we have
3809 : *
3810 : * size of join = m1 * n1 + m2 * n2 + ...
3811 : *
3812 : * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
3813 : * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
3814 : * relation
3815 : *
3816 : * This equation works correctly for outer tuples having no inner match
3817 : * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
3818 : * are effectively subtracting those from the number of rescanned tuples,
3819 : * when we should not. Can we do better without expensive selectivity
3820 : * computations?
3821 : *
3822 : * The whole issue is moot if we are working from a unique-ified outer
3823 : * input, or if we know we don't need to mark/restore at all.
3824 : */
3825 237486 : if (IsA(outer_path, UniquePath) || path->skip_mark_restore)
3826 96428 : rescannedtuples = 0;
3827 : else
3828 : {
3829 141058 : rescannedtuples = mergejointuples - inner_path_rows;
3830 : /* Must clamp because of possible underestimate */
3831 141058 : if (rescannedtuples < 0)
3832 56690 : rescannedtuples = 0;
3833 : }
3834 :
3835 : /*
3836 : * We'll inflate various costs this much to account for rescanning. Note
3837 : * that this is to be multiplied by something involving inner_rows, or
3838 : * another number related to the portion of the inner rel we'll scan.
3839 : */
3840 237486 : rescanratio = 1.0 + (rescannedtuples / inner_rows);
3841 :
3842 : /*
3843 : * Decide whether we want to materialize the inner input to shield it from
3844 : * mark/restore and performing re-fetches. Our cost model for regular
3845 : * re-fetches is that a re-fetch costs the same as an original fetch,
3846 : * which is probably an overestimate; but on the other hand we ignore the
3847 : * bookkeeping costs of mark/restore. Not clear if it's worth developing
3848 : * a more refined model. So we just need to inflate the inner run cost by
3849 : * rescanratio.
3850 : */
3851 237486 : bare_inner_cost = inner_run_cost * rescanratio;
3852 :
3853 : /*
3854 : * When we interpose a Material node the re-fetch cost is assumed to be
3855 : * just cpu_operator_cost per tuple, independently of the underlying
3856 : * plan's cost; and we charge an extra cpu_operator_cost per original
3857 : * fetch as well. Note that we're assuming the materialize node will
3858 : * never spill to disk, since it only has to remember tuples back to the
3859 : * last mark. (If there are a huge number of duplicates, our other cost
3860 : * factors will make the path so expensive that it probably won't get
3861 : * chosen anyway.) So we don't use cost_rescan here.
3862 : *
3863 : * Note: keep this estimate in sync with create_mergejoin_plan's labeling
3864 : * of the generated Material node.
3865 : */
3866 237486 : mat_inner_cost = inner_run_cost +
3867 237486 : cpu_operator_cost * inner_rows * rescanratio;
3868 :
3869 : /*
3870 : * If we don't need mark/restore at all, we don't need materialization.
3871 : */
3872 237486 : if (path->skip_mark_restore)
3873 95594 : path->materialize_inner = false;
3874 :
3875 : /*
3876 : * Prefer materializing if it looks cheaper, unless the user has asked to
3877 : * suppress materialization.
3878 : */
3879 141892 : else if (enable_material && mat_inner_cost < bare_inner_cost)
3880 2476 : path->materialize_inner = true;
3881 :
3882 : /*
3883 : * Even if materializing doesn't look cheaper, we *must* do it if the
3884 : * inner path is to be used directly (without sorting) and it doesn't
3885 : * support mark/restore.
3886 : *
3887 : * Since the inner side must be ordered, and only Sorts and IndexScans can
3888 : * create order to begin with, and they both support mark/restore, you
3889 : * might think there's no problem --- but you'd be wrong. Nestloop and
3890 : * merge joins can *preserve* the order of their inputs, so they can be
3891 : * selected as the input of a mergejoin, and they don't support
3892 : * mark/restore at present.
3893 : *
3894 : * We don't test the value of enable_material here, because
3895 : * materialization is required for correctness in this case, and turning
3896 : * it off does not entitle us to deliver an invalid plan.
3897 : */
3898 139416 : else if (innersortkeys == NIL &&
3899 6328 : !ExecSupportsMarkRestore(inner_path))
3900 890 : path->materialize_inner = true;
3901 :
3902 : /*
3903 : * Also, force materializing if the inner path is to be sorted and the
3904 : * sort is expected to spill to disk. This is because the final merge
3905 : * pass can be done on-the-fly if it doesn't have to support mark/restore.
3906 : * We don't try to adjust the cost estimates for this consideration,
3907 : * though.
3908 : *
3909 : * Since materialization is a performance optimization in this case,
3910 : * rather than necessary for correctness, we skip it if enable_material is
3911 : * off.
3912 : */
3913 138526 : else if (enable_material && innersortkeys != NIL &&
3914 133040 : relation_byte_size(inner_path_rows,
3915 133040 : inner_path->pathtarget->width) >
3916 133040 : (work_mem * 1024L))
3917 212 : path->materialize_inner = true;
3918 : else
3919 138314 : path->materialize_inner = false;
3920 :
3921 : /* Charge the right incremental cost for the chosen case */
3922 237486 : if (path->materialize_inner)
3923 3578 : run_cost += mat_inner_cost;
3924 : else
3925 233908 : run_cost += bare_inner_cost;
3926 :
3927 : /* CPU costs */
3928 :
3929 : /*
3930 : * The number of tuple comparisons needed is approximately number of outer
3931 : * rows plus number of inner rows plus number of rescanned tuples (can we
3932 : * refine this?). At each one, we need to evaluate the mergejoin quals.
3933 : */
3934 237486 : startup_cost += merge_qual_cost.startup;
3935 237486 : startup_cost += merge_qual_cost.per_tuple *
3936 237486 : (outer_skip_rows + inner_skip_rows * rescanratio);
3937 237486 : run_cost += merge_qual_cost.per_tuple *
3938 237486 : ((outer_rows - outer_skip_rows) +
3939 237486 : (inner_rows - inner_skip_rows) * rescanratio);
3940 :
3941 : /*
3942 : * For each tuple that gets through the mergejoin proper, we charge
3943 : * cpu_tuple_cost plus the cost of evaluating additional restriction
3944 : * clauses that are to be applied at the join. (This is pessimistic since
3945 : * not all of the quals may get evaluated at each tuple.)
3946 : *
3947 : * Note: we could adjust for SEMI/ANTI joins skipping some qual
3948 : * evaluations here, but it's probably not worth the trouble.
3949 : */
3950 237486 : startup_cost += qp_qual_cost.startup;
3951 237486 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
3952 237486 : run_cost += cpu_per_tuple * mergejointuples;
3953 :
3954 : /* tlist eval costs are paid per output row, not per tuple scanned */
3955 237486 : startup_cost += path->jpath.path.pathtarget->cost.startup;
3956 237486 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3957 :
3958 237486 : path->jpath.path.startup_cost = startup_cost;
3959 237486 : path->jpath.path.total_cost = startup_cost + run_cost;
3960 237486 : }
3961 :
3962 : /*
3963 : * run mergejoinscansel() with caching
3964 : */
3965 : static MergeScanSelCache *
3966 938830 : cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
3967 : {
3968 : MergeScanSelCache *cache;
3969 : ListCell *lc;
3970 : Selectivity leftstartsel,
3971 : leftendsel,
3972 : rightstartsel,
3973 : rightendsel;
3974 : MemoryContext oldcontext;
3975 :
3976 : /* Do we have this result already? */
3977 938872 : foreach(lc, rinfo->scansel_cache)
3978 : {
3979 848336 : cache = (MergeScanSelCache *) lfirst(lc);
3980 848336 : if (cache->opfamily == pathkey->pk_opfamily &&
3981 848336 : cache->collation == pathkey->pk_eclass->ec_collation &&
3982 848336 : cache->strategy == pathkey->pk_strategy &&
3983 848294 : cache->nulls_first == pathkey->pk_nulls_first)
3984 848294 : return cache;
3985 : }
3986 :
3987 : /* Nope, do the computation */
3988 90536 : mergejoinscansel(root,
3989 90536 : (Node *) rinfo->clause,
3990 : pathkey->pk_opfamily,
3991 : pathkey->pk_strategy,
3992 90536 : pathkey->pk_nulls_first,
3993 : &leftstartsel,
3994 : &leftendsel,
3995 : &rightstartsel,
3996 : &rightendsel);
3997 :
3998 : /* Cache the result in suitably long-lived workspace */
3999 90536 : oldcontext = MemoryContextSwitchTo(root->planner_cxt);
4000 :
4001 90536 : cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
4002 90536 : cache->opfamily = pathkey->pk_opfamily;
4003 90536 : cache->collation = pathkey->pk_eclass->ec_collation;
4004 90536 : cache->strategy = pathkey->pk_strategy;
4005 90536 : cache->nulls_first = pathkey->pk_nulls_first;
4006 90536 : cache->leftstartsel = leftstartsel;
4007 90536 : cache->leftendsel = leftendsel;
4008 90536 : cache->rightstartsel = rightstartsel;
4009 90536 : cache->rightendsel = rightendsel;
4010 :
4011 90536 : rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
4012 :
4013 90536 : MemoryContextSwitchTo(oldcontext);
4014 :
4015 90536 : return cache;
4016 : }
4017 :
4018 : /*
4019 : * initial_cost_hashjoin
4020 : * Preliminary estimate of the cost of a hashjoin path.
4021 : *
4022 : * This must quickly produce lower-bound estimates of the path's startup and
4023 : * total costs. If we are unable to eliminate the proposed path from
4024 : * consideration using the lower bounds, final_cost_hashjoin will be called
4025 : * to obtain the final estimates.
4026 : *
4027 : * The exact division of labor between this function and final_cost_hashjoin
4028 : * is private to them, and represents a tradeoff between speed of the initial
4029 : * estimate and getting a tight lower bound. We choose to not examine the
4030 : * join quals here (other than by counting the number of hash clauses),
4031 : * so we can't do much with CPU costs. We do assume that
4032 : * ExecChooseHashTableSize is cheap enough to use here.
4033 : *
4034 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
4035 : * other data to be used by final_cost_hashjoin
4036 : * 'jointype' is the type of join to be performed
4037 : * 'hashclauses' is the list of joinclauses to be used as hash clauses
4038 : * 'outer_path' is the outer input to the join
4039 : * 'inner_path' is the inner input to the join
4040 : * 'extra' contains miscellaneous information about the join
4041 : * 'parallel_hash' indicates that inner_path is partial and that a shared
4042 : * hash table will be built in parallel
4043 : */
4044 : void
4045 505398 : initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
4046 : JoinType jointype,
4047 : List *hashclauses,
4048 : Path *outer_path, Path *inner_path,
4049 : JoinPathExtraData *extra,
4050 : bool parallel_hash)
4051 : {
4052 505398 : Cost startup_cost = 0;
4053 505398 : Cost run_cost = 0;
4054 505398 : double outer_path_rows = outer_path->rows;
4055 505398 : double inner_path_rows = inner_path->rows;
4056 505398 : double inner_path_rows_total = inner_path_rows;
4057 505398 : int num_hashclauses = list_length(hashclauses);
4058 : int numbuckets;
4059 : int numbatches;
4060 : int num_skew_mcvs;
4061 : size_t space_allowed; /* unused */
4062 :
4063 : /* cost of source data */
4064 505398 : startup_cost += outer_path->startup_cost;
4065 505398 : run_cost += outer_path->total_cost - outer_path->startup_cost;
4066 505398 : startup_cost += inner_path->total_cost;
4067 :
4068 : /*
4069 : * Cost of computing hash function: must do it once per input tuple. We
4070 : * charge one cpu_operator_cost for each column's hash function. Also,
4071 : * tack on one cpu_tuple_cost per inner row, to model the costs of
4072 : * inserting the row into the hashtable.
4073 : *
4074 : * XXX when a hashclause is more complex than a single operator, we really
4075 : * should charge the extra eval costs of the left or right side, as
4076 : * appropriate, here. This seems more work than it's worth at the moment.
4077 : */
4078 505398 : startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
4079 505398 : * inner_path_rows;
4080 505398 : run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
4081 :
4082 : /*
4083 : * If this is a parallel hash build, then the value we have for
4084 : * inner_rows_total currently refers only to the rows returned by each
4085 : * participant. For shared hash table size estimation, we need the total
4086 : * number, so we need to undo the division.
4087 : */
4088 505398 : if (parallel_hash)
4089 11574 : inner_path_rows_total *= get_parallel_divisor(inner_path);
4090 :
4091 : /*
4092 : * Get hash table size that executor would use for inner relation.
4093 : *
4094 : * XXX for the moment, always assume that skew optimization will be
4095 : * performed. As long as SKEW_HASH_MEM_PERCENT is small, it's not worth
4096 : * trying to determine that for sure.
4097 : *
4098 : * XXX at some point it might be interesting to try to account for skew
4099 : * optimization in the cost estimate, but for now, we don't.
4100 : */
4101 505398 : ExecChooseHashTableSize(inner_path_rows_total,
4102 505398 : inner_path->pathtarget->width,
4103 : true, /* useskew */
4104 : parallel_hash, /* try_combined_hash_mem */
4105 : outer_path->parallel_workers,
4106 : &space_allowed,
4107 : &numbuckets,
4108 : &numbatches,
4109 : &num_skew_mcvs);
4110 :
4111 : /*
4112 : * If inner relation is too big then we will need to "batch" the join,
4113 : * which implies writing and reading most of the tuples to disk an extra
4114 : * time. Charge seq_page_cost per page, since the I/O should be nice and
4115 : * sequential. Writing the inner rel counts as startup cost, all the rest
4116 : * as run cost.
4117 : */
4118 505398 : if (numbatches > 1)
4119 : {
4120 4300 : double outerpages = page_size(outer_path_rows,
4121 4300 : outer_path->pathtarget->width);
4122 4300 : double innerpages = page_size(inner_path_rows,
4123 4300 : inner_path->pathtarget->width);
4124 :
4125 4300 : startup_cost += seq_page_cost * innerpages;
4126 4300 : run_cost += seq_page_cost * (innerpages + 2 * outerpages);
4127 : }
4128 :
4129 : /* CPU costs left for later */
4130 :
4131 : /* Public result fields */
4132 505398 : workspace->startup_cost = startup_cost;
4133 505398 : workspace->total_cost = startup_cost + run_cost;
4134 : /* Save private data for final_cost_hashjoin */
4135 505398 : workspace->run_cost = run_cost;
4136 505398 : workspace->numbuckets = numbuckets;
4137 505398 : workspace->numbatches = numbatches;
4138 505398 : workspace->inner_rows_total = inner_path_rows_total;
4139 505398 : }
4140 :
4141 : /*
4142 : * final_cost_hashjoin
4143 : * Final estimate of the cost and result size of a hashjoin path.
4144 : *
4145 : * Note: the numbatches estimate is also saved into 'path' for use later
4146 : *
4147 : * 'path' is already filled in except for the rows and cost fields and
4148 : * num_batches
4149 : * 'workspace' is the result from initial_cost_hashjoin
4150 : * 'extra' contains miscellaneous information about the join
4151 : */
4152 : void
4153 209176 : final_cost_hashjoin(PlannerInfo *root, HashPath *path,
4154 : JoinCostWorkspace *workspace,
4155 : JoinPathExtraData *extra)
4156 : {
4157 209176 : Path *outer_path = path->jpath.outerjoinpath;
4158 209176 : Path *inner_path = path->jpath.innerjoinpath;
4159 209176 : double outer_path_rows = outer_path->rows;
4160 209176 : double inner_path_rows = inner_path->rows;
4161 209176 : double inner_path_rows_total = workspace->inner_rows_total;
4162 209176 : List *hashclauses = path->path_hashclauses;
4163 209176 : Cost startup_cost = workspace->startup_cost;
4164 209176 : Cost run_cost = workspace->run_cost;
4165 209176 : int numbuckets = workspace->numbuckets;
4166 209176 : int numbatches = workspace->numbatches;
4167 : Cost cpu_per_tuple;
4168 : QualCost hash_qual_cost;
4169 : QualCost qp_qual_cost;
4170 : double hashjointuples;
4171 : double virtualbuckets;
4172 : Selectivity innerbucketsize;
4173 : Selectivity innermcvfreq;
4174 : ListCell *hcl;
4175 :
4176 : /* Mark the path with the correct row estimate */
4177 209176 : if (path->jpath.path.param_info)
4178 1206 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
4179 : else
4180 207970 : path->jpath.path.rows = path->jpath.path.parent->rows;
4181 :
4182 : /* For partial paths, scale row estimate. */
4183 209176 : if (path->jpath.path.parallel_workers > 0)
4184 : {
4185 10578 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
4186 :
4187 10578 : path->jpath.path.rows =
4188 10578 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
4189 : }
4190 :
4191 : /*
4192 : * We could include disable_cost in the preliminary estimate, but that
4193 : * would amount to optimizing for the case where the join method is
4194 : * disabled, which doesn't seem like the way to bet.
4195 : */
4196 209176 : if (!enable_hashjoin)
4197 204 : startup_cost += disable_cost;
4198 :
4199 : /* mark the path with estimated # of batches */
4200 209176 : path->num_batches = numbatches;
4201 :
4202 : /* store the total number of tuples (sum of partial row estimates) */
4203 209176 : path->inner_rows_total = inner_path_rows_total;
4204 :
4205 : /* and compute the number of "virtual" buckets in the whole join */
4206 209176 : virtualbuckets = (double) numbuckets * (double) numbatches;
4207 :
4208 : /*
4209 : * Determine bucketsize fraction and MCV frequency for the inner relation.
4210 : * We use the smallest bucketsize or MCV frequency estimated for any
4211 : * individual hashclause; this is undoubtedly conservative.
4212 : *
4213 : * BUT: if inner relation has been unique-ified, we can assume it's good
4214 : * for hashing. This is important both because it's the right answer, and
4215 : * because we avoid contaminating the cache with a value that's wrong for
4216 : * non-unique-ified paths.
4217 : */
4218 209176 : if (IsA(inner_path, UniquePath))
4219 : {
4220 1854 : innerbucketsize = 1.0 / virtualbuckets;
4221 1854 : innermcvfreq = 0.0;
4222 : }
4223 : else
4224 : {
4225 207322 : innerbucketsize = 1.0;
4226 207322 : innermcvfreq = 1.0;
4227 435808 : foreach(hcl, hashclauses)
4228 : {
4229 228486 : RestrictInfo *restrictinfo = lfirst_node(RestrictInfo, hcl);
4230 : Selectivity thisbucketsize;
4231 : Selectivity thismcvfreq;
4232 :
4233 : /*
4234 : * First we have to figure out which side of the hashjoin clause
4235 : * is the inner side.
4236 : *
4237 : * Since we tend to visit the same clauses over and over when
4238 : * planning a large query, we cache the bucket stats estimates in
4239 : * the RestrictInfo node to avoid repeated lookups of statistics.
4240 : */
4241 228486 : if (bms_is_subset(restrictinfo->right_relids,
4242 228486 : inner_path->parent->relids))
4243 : {
4244 : /* righthand side is inner */
4245 122672 : thisbucketsize = restrictinfo->right_bucketsize;
4246 122672 : if (thisbucketsize < 0)
4247 : {
4248 : /* not cached yet */
4249 66648 : estimate_hash_bucket_stats(root,
4250 66648 : get_rightop(restrictinfo->clause),
4251 : virtualbuckets,
4252 : &restrictinfo->right_mcvfreq,
4253 : &restrictinfo->right_bucketsize);
4254 66648 : thisbucketsize = restrictinfo->right_bucketsize;
4255 : }
4256 122672 : thismcvfreq = restrictinfo->right_mcvfreq;
4257 : }
4258 : else
4259 : {
4260 : Assert(bms_is_subset(restrictinfo->left_relids,
4261 : inner_path->parent->relids));
4262 : /* lefthand side is inner */
4263 105814 : thisbucketsize = restrictinfo->left_bucketsize;
4264 105814 : if (thisbucketsize < 0)
4265 : {
4266 : /* not cached yet */
4267 56916 : estimate_hash_bucket_stats(root,
4268 56916 : get_leftop(restrictinfo->clause),
4269 : virtualbuckets,
4270 : &restrictinfo->left_mcvfreq,
4271 : &restrictinfo->left_bucketsize);
4272 56916 : thisbucketsize = restrictinfo->left_bucketsize;
4273 : }
4274 105814 : thismcvfreq = restrictinfo->left_mcvfreq;
4275 : }
4276 :
4277 228486 : if (innerbucketsize > thisbucketsize)
4278 149766 : innerbucketsize = thisbucketsize;
4279 228486 : if (innermcvfreq > thismcvfreq)
4280 211904 : innermcvfreq = thismcvfreq;
4281 : }
4282 : }
4283 :
4284 : /*
4285 : * If the bucket holding the inner MCV would exceed hash_mem, we don't
4286 : * want to hash unless there is really no other alternative, so apply
4287 : * disable_cost. (The executor normally copes with excessive memory usage
4288 : * by splitting batches, but obviously it cannot separate equal values
4289 : * that way, so it will be unable to drive the batch size below hash_mem
4290 : * when this is true.)
4291 : */
4292 209176 : if (relation_byte_size(clamp_row_est(inner_path_rows * innermcvfreq),
4293 418352 : inner_path->pathtarget->width) > get_hash_memory_limit())
4294 0 : startup_cost += disable_cost;
4295 :
4296 : /*
4297 : * Compute cost of the hashquals and qpquals (other restriction clauses)
4298 : * separately.
4299 : */
4300 209176 : cost_qual_eval(&hash_qual_cost, hashclauses, root);
4301 209176 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
4302 209176 : qp_qual_cost.startup -= hash_qual_cost.startup;
4303 209176 : qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
4304 :
4305 : /* CPU costs */
4306 :
4307 209176 : if (path->jpath.jointype == JOIN_SEMI ||
4308 206410 : path->jpath.jointype == JOIN_ANTI ||
4309 202388 : extra->inner_unique)
4310 89754 : {
4311 : double outer_matched_rows;
4312 : Selectivity inner_scan_frac;
4313 :
4314 : /*
4315 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
4316 : * executor will stop after the first match.
4317 : *
4318 : * For an outer-rel row that has at least one match, we can expect the
4319 : * bucket scan to stop after a fraction 1/(match_count+1) of the
4320 : * bucket's rows, if the matches are evenly distributed. Since they
4321 : * probably aren't quite evenly distributed, we apply a fuzz factor of
4322 : * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
4323 : * to clamp inner_scan_frac to at most 1.0; but since match_count is
4324 : * at least 1, no such clamp is needed now.)
4325 : */
4326 89754 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
4327 89754 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
4328 :
4329 89754 : startup_cost += hash_qual_cost.startup;
4330 179508 : run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
4331 89754 : clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
4332 :
4333 : /*
4334 : * For unmatched outer-rel rows, the picture is quite a lot different.
4335 : * In the first place, there is no reason to assume that these rows
4336 : * preferentially hit heavily-populated buckets; instead assume they
4337 : * are uncorrelated with the inner distribution and so they see an
4338 : * average bucket size of inner_path_rows / virtualbuckets. In the
4339 : * second place, it seems likely that they will have few if any exact
4340 : * hash-code matches and so very few of the tuples in the bucket will
4341 : * actually require eval of the hash quals. We don't have any good
4342 : * way to estimate how many will, but for the moment assume that the
4343 : * effective cost per bucket entry is one-tenth what it is for
4344 : * matchable tuples.
4345 : */
4346 179508 : run_cost += hash_qual_cost.per_tuple *
4347 179508 : (outer_path_rows - outer_matched_rows) *
4348 89754 : clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
4349 :
4350 : /* Get # of tuples that will pass the basic join */
4351 89754 : if (path->jpath.jointype == JOIN_ANTI)
4352 4022 : hashjointuples = outer_path_rows - outer_matched_rows;
4353 : else
4354 85732 : hashjointuples = outer_matched_rows;
4355 : }
4356 : else
4357 : {
4358 : /*
4359 : * The number of tuple comparisons needed is the number of outer
4360 : * tuples times the typical number of tuples in a hash bucket, which
4361 : * is the inner relation size times its bucketsize fraction. At each
4362 : * one, we need to evaluate the hashjoin quals. But actually,
4363 : * charging the full qual eval cost at each tuple is pessimistic,
4364 : * since we don't evaluate the quals unless the hash values match
4365 : * exactly. For lack of a better idea, halve the cost estimate to
4366 : * allow for that.
4367 : */
4368 119422 : startup_cost += hash_qual_cost.startup;
4369 238844 : run_cost += hash_qual_cost.per_tuple * outer_path_rows *
4370 119422 : clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
4371 :
4372 : /*
4373 : * Get approx # tuples passing the hashquals. We use
4374 : * approx_tuple_count here because we need an estimate done with
4375 : * JOIN_INNER semantics.
4376 : */
4377 119422 : hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
4378 : }
4379 :
4380 : /*
4381 : * For each tuple that gets through the hashjoin proper, we charge
4382 : * cpu_tuple_cost plus the cost of evaluating additional restriction
4383 : * clauses that are to be applied at the join. (This is pessimistic since
4384 : * not all of the quals may get evaluated at each tuple.)
4385 : */
4386 209176 : startup_cost += qp_qual_cost.startup;
4387 209176 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
4388 209176 : run_cost += cpu_per_tuple * hashjointuples;
4389 :
4390 : /* tlist eval costs are paid per output row, not per tuple scanned */
4391 209176 : startup_cost += path->jpath.path.pathtarget->cost.startup;
4392 209176 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
4393 :
4394 209176 : path->jpath.path.startup_cost = startup_cost;
4395 209176 : path->jpath.path.total_cost = startup_cost + run_cost;
4396 209176 : }
4397 :
4398 :
4399 : /*
4400 : * cost_subplan
4401 : * Figure the costs for a SubPlan (or initplan).
4402 : *
4403 : * Note: we could dig the subplan's Plan out of the root list, but in practice
4404 : * all callers have it handy already, so we make them pass it.
4405 : */
4406 : void
4407 33694 : cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
4408 : {
4409 : QualCost sp_cost;
4410 :
4411 : /* Figure any cost for evaluating the testexpr */
4412 33694 : cost_qual_eval(&sp_cost,
4413 33694 : make_ands_implicit((Expr *) subplan->testexpr),
4414 : root);
4415 :
4416 33694 : if (subplan->useHashTable)
4417 : {
4418 : /*
4419 : * If we are using a hash table for the subquery outputs, then the
4420 : * cost of evaluating the query is a one-time cost. We charge one
4421 : * cpu_operator_cost per tuple for the work of loading the hashtable,
4422 : * too.
4423 : */
4424 1850 : sp_cost.startup += plan->total_cost +
4425 1850 : cpu_operator_cost * plan->plan_rows;
4426 :
4427 : /*
4428 : * The per-tuple costs include the cost of evaluating the lefthand
4429 : * expressions, plus the cost of probing the hashtable. We already
4430 : * accounted for the lefthand expressions as part of the testexpr, and
4431 : * will also have counted one cpu_operator_cost for each comparison
4432 : * operator. That is probably too low for the probing cost, but it's
4433 : * hard to make a better estimate, so live with it for now.
4434 : */
4435 : }
4436 : else
4437 : {
4438 : /*
4439 : * Otherwise we will be rescanning the subplan output on each
4440 : * evaluation. We need to estimate how much of the output we will
4441 : * actually need to scan. NOTE: this logic should agree with the
4442 : * tuple_fraction estimates used by make_subplan() in
4443 : * plan/subselect.c.
4444 : */
4445 31844 : Cost plan_run_cost = plan->total_cost - plan->startup_cost;
4446 :
4447 31844 : if (subplan->subLinkType == EXISTS_SUBLINK)
4448 : {
4449 : /* we only need to fetch 1 tuple; clamp to avoid zero divide */
4450 1992 : sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
4451 : }
4452 29852 : else if (subplan->subLinkType == ALL_SUBLINK ||
4453 29834 : subplan->subLinkType == ANY_SUBLINK)
4454 : {
4455 : /* assume we need 50% of the tuples */
4456 130 : sp_cost.per_tuple += 0.50 * plan_run_cost;
4457 : /* also charge a cpu_operator_cost per row examined */
4458 130 : sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
4459 : }
4460 : else
4461 : {
4462 : /* assume we need all tuples */
4463 29722 : sp_cost.per_tuple += plan_run_cost;
4464 : }
4465 :
4466 : /*
4467 : * Also account for subplan's startup cost. If the subplan is
4468 : * uncorrelated or undirect correlated, AND its topmost node is one
4469 : * that materializes its output, assume that we'll only need to pay
4470 : * its startup cost once; otherwise assume we pay the startup cost
4471 : * every time.
4472 : */
4473 42328 : if (subplan->parParam == NIL &&
4474 10484 : ExecMaterializesOutput(nodeTag(plan)))
4475 460 : sp_cost.startup += plan->startup_cost;
4476 : else
4477 31384 : sp_cost.per_tuple += plan->startup_cost;
4478 : }
4479 :
4480 33694 : subplan->startup_cost = sp_cost.startup;
4481 33694 : subplan->per_call_cost = sp_cost.per_tuple;
4482 33694 : }
4483 :
4484 :
4485 : /*
4486 : * cost_rescan
4487 : * Given a finished Path, estimate the costs of rescanning it after
4488 : * having done so the first time. For some Path types a rescan is
4489 : * cheaper than an original scan (if no parameters change), and this
4490 : * function embodies knowledge about that. The default is to return
4491 : * the same costs stored in the Path. (Note that the cost estimates
4492 : * actually stored in Paths are always for first scans.)
4493 : *
4494 : * This function is not currently intended to model effects such as rescans
4495 : * being cheaper due to disk block caching; what we are concerned with is
4496 : * plan types wherein the executor caches results explicitly, or doesn't
4497 : * redo startup calculations, etc.
4498 : */
4499 : static void
4500 2062632 : cost_rescan(PlannerInfo *root, Path *path,
4501 : Cost *rescan_startup_cost, /* output parameters */
4502 : Cost *rescan_total_cost)
4503 : {
4504 2062632 : switch (path->pathtype)
4505 : {
4506 37348 : case T_FunctionScan:
4507 :
4508 : /*
4509 : * Currently, nodeFunctionscan.c always executes the function to
4510 : * completion before returning any rows, and caches the results in
4511 : * a tuplestore. So the function eval cost is all startup cost
4512 : * and isn't paid over again on rescans. However, all run costs
4513 : * will be paid over again.
4514 : */
4515 37348 : *rescan_startup_cost = 0;
4516 37348 : *rescan_total_cost = path->total_cost - path->startup_cost;
4517 37348 : break;
4518 93462 : case T_HashJoin:
4519 :
4520 : /*
4521 : * If it's a single-batch join, we don't need to rebuild the hash
4522 : * table during a rescan.
4523 : */
4524 93462 : if (((HashPath *) path)->num_batches == 1)
4525 : {
4526 : /* Startup cost is exactly the cost of hash table building */
4527 93462 : *rescan_startup_cost = 0;
4528 93462 : *rescan_total_cost = path->total_cost - path->startup_cost;
4529 : }
4530 : else
4531 : {
4532 : /* Otherwise, no special treatment */
4533 0 : *rescan_startup_cost = path->startup_cost;
4534 0 : *rescan_total_cost = path->total_cost;
4535 : }
4536 93462 : break;
4537 6034 : case T_CteScan:
4538 : case T_WorkTableScan:
4539 : {
4540 : /*
4541 : * These plan types materialize their final result in a
4542 : * tuplestore or tuplesort object. So the rescan cost is only
4543 : * cpu_tuple_cost per tuple, unless the result is large enough
4544 : * to spill to disk.
4545 : */
4546 6034 : Cost run_cost = cpu_tuple_cost * path->rows;
4547 6034 : double nbytes = relation_byte_size(path->rows,
4548 6034 : path->pathtarget->width);
4549 6034 : long work_mem_bytes = work_mem * 1024L;
4550 :
4551 6034 : if (nbytes > work_mem_bytes)
4552 : {
4553 : /* It will spill, so account for re-read cost */
4554 144 : double npages = ceil(nbytes / BLCKSZ);
4555 :
4556 144 : run_cost += seq_page_cost * npages;
4557 : }
4558 6034 : *rescan_startup_cost = 0;
4559 6034 : *rescan_total_cost = run_cost;
4560 : }
4561 6034 : break;
4562 691498 : case T_Material:
4563 : case T_Sort:
4564 : {
4565 : /*
4566 : * These plan types not only materialize their results, but do
4567 : * not implement qual filtering or projection. So they are
4568 : * even cheaper to rescan than the ones above. We charge only
4569 : * cpu_operator_cost per tuple. (Note: keep that in sync with
4570 : * the run_cost charge in cost_sort, and also see comments in
4571 : * cost_material before you change it.)
4572 : */
4573 691498 : Cost run_cost = cpu_operator_cost * path->rows;
4574 691498 : double nbytes = relation_byte_size(path->rows,
4575 691498 : path->pathtarget->width);
4576 691498 : long work_mem_bytes = work_mem * 1024L;
4577 :
4578 691498 : if (nbytes > work_mem_bytes)
4579 : {
4580 : /* It will spill, so account for re-read cost */
4581 8356 : double npages = ceil(nbytes / BLCKSZ);
4582 :
4583 8356 : run_cost += seq_page_cost * npages;
4584 : }
4585 691498 : *rescan_startup_cost = 0;
4586 691498 : *rescan_total_cost = run_cost;
4587 : }
4588 691498 : break;
4589 209386 : case T_Memoize:
4590 : /* All the hard work is done by cost_memoize_rescan */
4591 209386 : cost_memoize_rescan(root, (MemoizePath *) path,
4592 : rescan_startup_cost, rescan_total_cost);
4593 209386 : break;
4594 1024904 : default:
4595 1024904 : *rescan_startup_cost = path->startup_cost;
4596 1024904 : *rescan_total_cost = path->total_cost;
4597 1024904 : break;
4598 : }
4599 2062632 : }
4600 :
4601 :
4602 : /*
4603 : * cost_qual_eval
4604 : * Estimate the CPU costs of evaluating a WHERE clause.
4605 : * The input can be either an implicitly-ANDed list of boolean
4606 : * expressions, or a list of RestrictInfo nodes. (The latter is
4607 : * preferred since it allows caching of the results.)
4608 : * The result includes both a one-time (startup) component,
4609 : * and a per-evaluation component.
4610 : */
4611 : void
4612 2942096 : cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
4613 : {
4614 : cost_qual_eval_context context;
4615 : ListCell *l;
4616 :
4617 2942096 : context.root = root;
4618 2942096 : context.total.startup = 0;
4619 2942096 : context.total.per_tuple = 0;
4620 :
4621 : /* We don't charge any cost for the implicit ANDing at top level ... */
4622 :
4623 5471732 : foreach(l, quals)
4624 : {
4625 2529636 : Node *qual = (Node *) lfirst(l);
4626 :
4627 2529636 : cost_qual_eval_walker(qual, &context);
4628 : }
4629 :
4630 2942096 : *cost = context.total;
4631 2942096 : }
4632 :
4633 : /*
4634 : * cost_qual_eval_node
4635 : * As above, for a single RestrictInfo or expression.
4636 : */
4637 : void
4638 1419930 : cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
4639 : {
4640 : cost_qual_eval_context context;
4641 :
4642 1419930 : context.root = root;
4643 1419930 : context.total.startup = 0;
4644 1419930 : context.total.per_tuple = 0;
4645 :
4646 1419930 : cost_qual_eval_walker(qual, &context);
4647 :
4648 1419930 : *cost = context.total;
4649 1419930 : }
4650 :
4651 : static bool
4652 6777166 : cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
4653 : {
4654 6777166 : if (node == NULL)
4655 85710 : return false;
4656 :
4657 : /*
4658 : * RestrictInfo nodes contain an eval_cost field reserved for this
4659 : * routine's use, so that it's not necessary to evaluate the qual clause's
4660 : * cost more than once. If the clause's cost hasn't been computed yet,
4661 : * the field's startup value will contain -1.
4662 : */
4663 6691456 : if (IsA(node, RestrictInfo))
4664 : {
4665 2658426 : RestrictInfo *rinfo = (RestrictInfo *) node;
4666 :
4667 2658426 : if (rinfo->eval_cost.startup < 0)
4668 : {
4669 : cost_qual_eval_context locContext;
4670 :
4671 461034 : locContext.root = context->root;
4672 461034 : locContext.total.startup = 0;
4673 461034 : locContext.total.per_tuple = 0;
4674 :
4675 : /*
4676 : * For an OR clause, recurse into the marked-up tree so that we
4677 : * set the eval_cost for contained RestrictInfos too.
4678 : */
4679 461034 : if (rinfo->orclause)
4680 7446 : cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
4681 : else
4682 453588 : cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
4683 :
4684 : /*
4685 : * If the RestrictInfo is marked pseudoconstant, it will be tested
4686 : * only once, so treat its cost as all startup cost.
4687 : */
4688 461034 : if (rinfo->pseudoconstant)
4689 : {
4690 : /* count one execution during startup */
4691 7768 : locContext.total.startup += locContext.total.per_tuple;
4692 7768 : locContext.total.per_tuple = 0;
4693 : }
4694 461034 : rinfo->eval_cost = locContext.total;
4695 : }
4696 2658426 : context->total.startup += rinfo->eval_cost.startup;
4697 2658426 : context->total.per_tuple += rinfo->eval_cost.per_tuple;
4698 : /* do NOT recurse into children */
4699 2658426 : return false;
4700 : }
4701 :
4702 : /*
4703 : * For each operator or function node in the given tree, we charge the
4704 : * estimated execution cost given by pg_proc.procost (remember to multiply
4705 : * this by cpu_operator_cost).
4706 : *
4707 : * Vars and Consts are charged zero, and so are boolean operators (AND,
4708 : * OR, NOT). Simplistic, but a lot better than no model at all.
4709 : *
4710 : * Should we try to account for the possibility of short-circuit
4711 : * evaluation of AND/OR? Probably *not*, because that would make the
4712 : * results depend on the clause ordering, and we are not in any position
4713 : * to expect that the current ordering of the clauses is the one that's
4714 : * going to end up being used. The above per-RestrictInfo caching would
4715 : * not mix well with trying to re-order clauses anyway.
4716 : *
4717 : * Another issue that is entirely ignored here is that if a set-returning
4718 : * function is below top level in the tree, the functions/operators above
4719 : * it will need to be evaluated multiple times. In practical use, such
4720 : * cases arise so seldom as to not be worth the added complexity needed;
4721 : * moreover, since our rowcount estimates for functions tend to be pretty
4722 : * phony, the results would also be pretty phony.
4723 : */
4724 4033030 : if (IsA(node, FuncExpr))
4725 : {
4726 279552 : add_function_cost(context->root, ((FuncExpr *) node)->funcid, node,
4727 : &context->total);
4728 : }
4729 3753478 : else if (IsA(node, OpExpr) ||
4730 3202952 : IsA(node, DistinctExpr) ||
4731 3202134 : IsA(node, NullIfExpr))
4732 : {
4733 : /* rely on struct equivalence to treat these all alike */
4734 551430 : set_opfuncid((OpExpr *) node);
4735 551430 : add_function_cost(context->root, ((OpExpr *) node)->opfuncid, node,
4736 : &context->total);
4737 : }
4738 3202048 : else if (IsA(node, ScalarArrayOpExpr))
4739 : {
4740 33768 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
4741 33768 : Node *arraynode = (Node *) lsecond(saop->args);
4742 : QualCost sacosts;
4743 : QualCost hcosts;
4744 33768 : int estarraylen = estimate_array_length(arraynode);
4745 :
4746 33768 : set_sa_opfuncid(saop);
4747 33768 : sacosts.startup = sacosts.per_tuple = 0;
4748 33768 : add_function_cost(context->root, saop->opfuncid, NULL,
4749 : &sacosts);
4750 :
4751 33768 : if (OidIsValid(saop->hashfuncid))
4752 : {
4753 : /* Handle costs for hashed ScalarArrayOpExpr */
4754 260 : hcosts.startup = hcosts.per_tuple = 0;
4755 :
4756 260 : add_function_cost(context->root, saop->hashfuncid, NULL, &hcosts);
4757 260 : context->total.startup += sacosts.startup + hcosts.startup;
4758 :
4759 : /* Estimate the cost of building the hashtable. */
4760 260 : context->total.startup += estarraylen * hcosts.per_tuple;
4761 :
4762 : /*
4763 : * XXX should we charge a little bit for sacosts.per_tuple when
4764 : * building the table, or is it ok to assume there will be zero
4765 : * hash collision?
4766 : */
4767 :
4768 : /*
4769 : * Charge for hashtable lookups. Charge a single hash and a
4770 : * single comparison.
4771 : */
4772 260 : context->total.per_tuple += hcosts.per_tuple + sacosts.per_tuple;
4773 : }
4774 : else
4775 : {
4776 : /*
4777 : * Estimate that the operator will be applied to about half of the
4778 : * array elements before the answer is determined.
4779 : */
4780 33508 : context->total.startup += sacosts.startup;
4781 67016 : context->total.per_tuple += sacosts.per_tuple *
4782 33508 : estimate_array_length(arraynode) * 0.5;
4783 : }
4784 : }
4785 3168280 : else if (IsA(node, Aggref) ||
4786 3120416 : IsA(node, WindowFunc))
4787 : {
4788 : /*
4789 : * Aggref and WindowFunc nodes are (and should be) treated like Vars,
4790 : * ie, zero execution cost in the current model, because they behave
4791 : * essentially like Vars at execution. We disregard the costs of
4792 : * their input expressions for the same reason. The actual execution
4793 : * costs of the aggregate/window functions and their arguments have to
4794 : * be factored into plan-node-specific costing of the Agg or WindowAgg
4795 : * plan node.
4796 : */
4797 51140 : return false; /* don't recurse into children */
4798 : }
4799 3117140 : else if (IsA(node, GroupingFunc))
4800 : {
4801 : /* Treat this as having cost 1 */
4802 350 : context->total.per_tuple += cpu_operator_cost;
4803 350 : return false; /* don't recurse into children */
4804 : }
4805 3116790 : else if (IsA(node, CoerceViaIO))
4806 : {
4807 18360 : CoerceViaIO *iocoerce = (CoerceViaIO *) node;
4808 : Oid iofunc;
4809 : Oid typioparam;
4810 : bool typisvarlena;
4811 :
4812 : /* check the result type's input function */
4813 18360 : getTypeInputInfo(iocoerce->resulttype,
4814 : &iofunc, &typioparam);
4815 18360 : add_function_cost(context->root, iofunc, NULL,
4816 : &context->total);
4817 : /* check the input type's output function */
4818 18360 : getTypeOutputInfo(exprType((Node *) iocoerce->arg),
4819 : &iofunc, &typisvarlena);
4820 18360 : add_function_cost(context->root, iofunc, NULL,
4821 : &context->total);
4822 : }
4823 3098430 : else if (IsA(node, ArrayCoerceExpr))
4824 : {
4825 4148 : ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
4826 : QualCost perelemcost;
4827 :
4828 4148 : cost_qual_eval_node(&perelemcost, (Node *) acoerce->elemexpr,
4829 : context->root);
4830 4148 : context->total.startup += perelemcost.startup;
4831 4148 : if (perelemcost.per_tuple > 0)
4832 48 : context->total.per_tuple += perelemcost.per_tuple *
4833 48 : estimate_array_length((Node *) acoerce->arg);
4834 : }
4835 3094282 : else if (IsA(node, RowCompareExpr))
4836 : {
4837 : /* Conservatively assume we will check all the columns */
4838 156 : RowCompareExpr *rcexpr = (RowCompareExpr *) node;
4839 : ListCell *lc;
4840 :
4841 522 : foreach(lc, rcexpr->opnos)
4842 : {
4843 366 : Oid opid = lfirst_oid(lc);
4844 :
4845 366 : add_function_cost(context->root, get_opcode(opid), NULL,
4846 : &context->total);
4847 : }
4848 : }
4849 3094126 : else if (IsA(node, MinMaxExpr) ||
4850 3093972 : IsA(node, SQLValueFunction) ||
4851 3090008 : IsA(node, XmlExpr) ||
4852 3089318 : IsA(node, CoerceToDomain) ||
4853 3082214 : IsA(node, NextValueExpr))
4854 : {
4855 : /* Treat all these as having cost 1 */
4856 12184 : context->total.per_tuple += cpu_operator_cost;
4857 : }
4858 3081942 : else if (IsA(node, CurrentOfExpr))
4859 : {
4860 : /* Report high cost to prevent selection of anything but TID scan */
4861 394 : context->total.startup += disable_cost;
4862 : }
4863 3081548 : else if (IsA(node, SubLink))
4864 : {
4865 : /* This routine should not be applied to un-planned expressions */
4866 0 : elog(ERROR, "cannot handle unplanned sub-select");
4867 : }
4868 3081548 : else if (IsA(node, SubPlan))
4869 : {
4870 : /*
4871 : * A subplan node in an expression typically indicates that the
4872 : * subplan will be executed on each evaluation, so charge accordingly.
4873 : * (Sub-selects that can be executed as InitPlans have already been
4874 : * removed from the expression.)
4875 : */
4876 33040 : SubPlan *subplan = (SubPlan *) node;
4877 :
4878 33040 : context->total.startup += subplan->startup_cost;
4879 33040 : context->total.per_tuple += subplan->per_call_cost;
4880 :
4881 : /*
4882 : * We don't want to recurse into the testexpr, because it was already
4883 : * counted in the SubPlan node's costs. So we're done.
4884 : */
4885 33040 : return false;
4886 : }
4887 3048508 : else if (IsA(node, AlternativeSubPlan))
4888 : {
4889 : /*
4890 : * Arbitrarily use the first alternative plan for costing. (We should
4891 : * certainly only include one alternative, and we don't yet have
4892 : * enough information to know which one the executor is most likely to
4893 : * use.)
4894 : */
4895 1588 : AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
4896 :
4897 1588 : return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
4898 : context);
4899 : }
4900 3046920 : else if (IsA(node, PlaceHolderVar))
4901 : {
4902 : /*
4903 : * A PlaceHolderVar should be given cost zero when considering general
4904 : * expression evaluation costs. The expense of doing the contained
4905 : * expression is charged as part of the tlist eval costs of the scan
4906 : * or join where the PHV is first computed (see set_rel_width and
4907 : * add_placeholders_to_joinrel). If we charged it again here, we'd be
4908 : * double-counting the cost for each level of plan that the PHV
4909 : * bubbles up through. Hence, return without recursing into the
4910 : * phexpr.
4911 : */
4912 2214 : return false;
4913 : }
4914 :
4915 : /* recurse into children */
4916 3944698 : return expression_tree_walker(node, cost_qual_eval_walker,
4917 : (void *) context);
4918 : }
4919 :
4920 : /*
4921 : * get_restriction_qual_cost
4922 : * Compute evaluation costs of a baserel's restriction quals, plus any
4923 : * movable join quals that have been pushed down to the scan.
4924 : * Results are returned into *qpqual_cost.
4925 : *
4926 : * This is a convenience subroutine that works for seqscans and other cases
4927 : * where all the given quals will be evaluated the hard way. It's not useful
4928 : * for cost_index(), for example, where the index machinery takes care of
4929 : * some of the quals. We assume baserestrictcost was previously set by
4930 : * set_baserel_size_estimates().
4931 : */
4932 : static void
4933 818626 : get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
4934 : ParamPathInfo *param_info,
4935 : QualCost *qpqual_cost)
4936 : {
4937 818626 : if (param_info)
4938 : {
4939 : /* Include costs of pushed-down clauses */
4940 173148 : cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
4941 :
4942 173148 : qpqual_cost->startup += baserel->baserestrictcost.startup;
4943 173148 : qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
4944 : }
4945 : else
4946 645478 : *qpqual_cost = baserel->baserestrictcost;
4947 818626 : }
4948 :
4949 :
4950 : /*
4951 : * compute_semi_anti_join_factors
4952 : * Estimate how much of the inner input a SEMI, ANTI, or inner_unique join
4953 : * can be expected to scan.
4954 : *
4955 : * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
4956 : * inner rows as soon as it finds a match to the current outer row.
4957 : * The same happens if we have detected the inner rel is unique.
4958 : * We should therefore adjust some of the cost components for this effect.
4959 : * This function computes some estimates needed for these adjustments.
4960 : * These estimates will be the same regardless of the particular paths used
4961 : * for the outer and inner relation, so we compute these once and then pass
4962 : * them to all the join cost estimation functions.
4963 : *
4964 : * Input parameters:
4965 : * joinrel: join relation under consideration
4966 : * outerrel: outer relation under consideration
4967 : * innerrel: inner relation under consideration
4968 : * jointype: if not JOIN_SEMI or JOIN_ANTI, we assume it's inner_unique
4969 : * sjinfo: SpecialJoinInfo relevant to this join
4970 : * restrictlist: join quals
4971 : * Output parameters:
4972 : * *semifactors is filled in (see pathnodes.h for field definitions)
4973 : */
4974 : void
4975 166676 : compute_semi_anti_join_factors(PlannerInfo *root,
4976 : RelOptInfo *joinrel,
4977 : RelOptInfo *outerrel,
4978 : RelOptInfo *innerrel,
4979 : JoinType jointype,
4980 : SpecialJoinInfo *sjinfo,
4981 : List *restrictlist,
4982 : SemiAntiJoinFactors *semifactors)
4983 : {
4984 : Selectivity jselec;
4985 : Selectivity nselec;
4986 : Selectivity avgmatch;
4987 : SpecialJoinInfo norm_sjinfo;
4988 : List *joinquals;
4989 : ListCell *l;
4990 :
4991 : /*
4992 : * In an ANTI join, we must ignore clauses that are "pushed down", since
4993 : * those won't affect the match logic. In a SEMI join, we do not
4994 : * distinguish joinquals from "pushed down" quals, so just use the whole
4995 : * restrictinfo list. For other outer join types, we should consider only
4996 : * non-pushed-down quals, so that this devolves to an IS_OUTER_JOIN check.
4997 : */
4998 166676 : if (IS_OUTER_JOIN(jointype))
4999 : {
5000 71846 : joinquals = NIL;
5001 154222 : foreach(l, restrictlist)
5002 : {
5003 82376 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
5004 :
5005 82376 : if (!RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5006 78852 : joinquals = lappend(joinquals, rinfo);
5007 : }
5008 : }
5009 : else
5010 94830 : joinquals = restrictlist;
5011 :
5012 : /*
5013 : * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
5014 : */
5015 166676 : jselec = clauselist_selectivity(root,
5016 : joinquals,
5017 : 0,
5018 : (jointype == JOIN_ANTI) ? JOIN_ANTI : JOIN_SEMI,
5019 : sjinfo);
5020 :
5021 : /*
5022 : * Also get the normal inner-join selectivity of the join clauses.
5023 : */
5024 166676 : norm_sjinfo.type = T_SpecialJoinInfo;
5025 166676 : norm_sjinfo.min_lefthand = outerrel->relids;
5026 166676 : norm_sjinfo.min_righthand = innerrel->relids;
5027 166676 : norm_sjinfo.syn_lefthand = outerrel->relids;
5028 166676 : norm_sjinfo.syn_righthand = innerrel->relids;
5029 166676 : norm_sjinfo.jointype = JOIN_INNER;
5030 166676 : norm_sjinfo.ojrelid = 0;
5031 166676 : norm_sjinfo.commute_above_l = NULL;
5032 166676 : norm_sjinfo.commute_above_r = NULL;
5033 166676 : norm_sjinfo.commute_below_l = NULL;
5034 166676 : norm_sjinfo.commute_below_r = NULL;
5035 : /* we don't bother trying to make the remaining fields valid */
5036 166676 : norm_sjinfo.lhs_strict = false;
5037 166676 : norm_sjinfo.semi_can_btree = false;
5038 166676 : norm_sjinfo.semi_can_hash = false;
5039 166676 : norm_sjinfo.semi_operators = NIL;
5040 166676 : norm_sjinfo.semi_rhs_exprs = NIL;
5041 :
5042 166676 : nselec = clauselist_selectivity(root,
5043 : joinquals,
5044 : 0,
5045 : JOIN_INNER,
5046 : &norm_sjinfo);
5047 :
5048 : /* Avoid leaking a lot of ListCells */
5049 166676 : if (IS_OUTER_JOIN(jointype))
5050 71846 : list_free(joinquals);
5051 :
5052 : /*
5053 : * jselec can be interpreted as the fraction of outer-rel rows that have
5054 : * any matches (this is true for both SEMI and ANTI cases). And nselec is
5055 : * the fraction of the Cartesian product that matches. So, the average
5056 : * number of matches for each outer-rel row that has at least one match is
5057 : * nselec * inner_rows / jselec.
5058 : *
5059 : * Note: it is correct to use the inner rel's "rows" count here, even
5060 : * though we might later be considering a parameterized inner path with
5061 : * fewer rows. This is because we have included all the join clauses in
5062 : * the selectivity estimate.
5063 : */
5064 166676 : if (jselec > 0) /* protect against zero divide */
5065 : {
5066 166364 : avgmatch = nselec * innerrel->rows / jselec;
5067 : /* Clamp to sane range */
5068 166364 : avgmatch = Max(1.0, avgmatch);
5069 : }
5070 : else
5071 312 : avgmatch = 1.0;
5072 :
5073 166676 : semifactors->outer_match_frac = jselec;
5074 166676 : semifactors->match_count = avgmatch;
5075 166676 : }
5076 :
5077 : /*
5078 : * has_indexed_join_quals
5079 : * Check whether all the joinquals of a nestloop join are used as
5080 : * inner index quals.
5081 : *
5082 : * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
5083 : * indexscan) that uses all the joinquals as indexquals, we can assume that an
5084 : * unmatched outer tuple is cheap to process, whereas otherwise it's probably
5085 : * expensive.
5086 : */
5087 : static bool
5088 649876 : has_indexed_join_quals(NestPath *path)
5089 : {
5090 649876 : JoinPath *joinpath = &path->jpath;
5091 649876 : Relids joinrelids = joinpath->path.parent->relids;
5092 649876 : Path *innerpath = joinpath->innerjoinpath;
5093 : List *indexclauses;
5094 : bool found_one;
5095 : ListCell *lc;
5096 :
5097 : /* If join still has quals to evaluate, it's not fast */
5098 649876 : if (joinpath->joinrestrictinfo != NIL)
5099 450028 : return false;
5100 : /* Nor if the inner path isn't parameterized at all */
5101 199848 : if (innerpath->param_info == NULL)
5102 4764 : return false;
5103 :
5104 : /* Find the indexclauses list for the inner scan */
5105 195084 : switch (innerpath->pathtype)
5106 : {
5107 123070 : case T_IndexScan:
5108 : case T_IndexOnlyScan:
5109 123070 : indexclauses = ((IndexPath *) innerpath)->indexclauses;
5110 123070 : break;
5111 270 : case T_BitmapHeapScan:
5112 : {
5113 : /* Accept only a simple bitmap scan, not AND/OR cases */
5114 270 : Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
5115 :
5116 270 : if (IsA(bmqual, IndexPath))
5117 222 : indexclauses = ((IndexPath *) bmqual)->indexclauses;
5118 : else
5119 48 : return false;
5120 222 : break;
5121 : }
5122 71744 : default:
5123 :
5124 : /*
5125 : * If it's not a simple indexscan, it probably doesn't run quickly
5126 : * for zero rows out, even if it's a parameterized path using all
5127 : * the joinquals.
5128 : */
5129 71744 : return false;
5130 : }
5131 :
5132 : /*
5133 : * Examine the inner path's param clauses. Any that are from the outer
5134 : * path must be found in the indexclauses list, either exactly or in an
5135 : * equivalent form generated by equivclass.c. Also, we must find at least
5136 : * one such clause, else it's a clauseless join which isn't fast.
5137 : */
5138 123292 : found_one = false;
5139 245734 : foreach(lc, innerpath->param_info->ppi_clauses)
5140 : {
5141 125354 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
5142 :
5143 125354 : if (join_clause_is_movable_into(rinfo,
5144 125354 : innerpath->parent->relids,
5145 : joinrelids))
5146 : {
5147 125354 : if (!is_redundant_with_indexclauses(rinfo, indexclauses))
5148 2912 : return false;
5149 122442 : found_one = true;
5150 : }
5151 : }
5152 120380 : return found_one;
5153 : }
5154 :
5155 :
5156 : /*
5157 : * approx_tuple_count
5158 : * Quick-and-dirty estimation of the number of join rows passing
5159 : * a set of qual conditions.
5160 : *
5161 : * The quals can be either an implicitly-ANDed list of boolean expressions,
5162 : * or a list of RestrictInfo nodes (typically the latter).
5163 : *
5164 : * We intentionally compute the selectivity under JOIN_INNER rules, even
5165 : * if it's some type of outer join. This is appropriate because we are
5166 : * trying to figure out how many tuples pass the initial merge or hash
5167 : * join step.
5168 : *
5169 : * This is quick-and-dirty because we bypass clauselist_selectivity, and
5170 : * simply multiply the independent clause selectivities together. Now
5171 : * clauselist_selectivity often can't do any better than that anyhow, but
5172 : * for some situations (such as range constraints) it is smarter. However,
5173 : * we can't effectively cache the results of clauselist_selectivity, whereas
5174 : * the individual clause selectivities can be and are cached.
5175 : *
5176 : * Since we are only using the results to estimate how many potential
5177 : * output tuples are generated and passed through qpqual checking, it
5178 : * seems OK to live with the approximation.
5179 : */
5180 : static double
5181 356908 : approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
5182 : {
5183 : double tuples;
5184 356908 : double outer_tuples = path->outerjoinpath->rows;
5185 356908 : double inner_tuples = path->innerjoinpath->rows;
5186 : SpecialJoinInfo sjinfo;
5187 356908 : Selectivity selec = 1.0;
5188 : ListCell *l;
5189 :
5190 : /*
5191 : * Make up a SpecialJoinInfo for JOIN_INNER semantics.
5192 : */
5193 356908 : sjinfo.type = T_SpecialJoinInfo;
5194 356908 : sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
5195 356908 : sjinfo.min_righthand = path->innerjoinpath->parent->relids;
5196 356908 : sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
5197 356908 : sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
5198 356908 : sjinfo.jointype = JOIN_INNER;
5199 356908 : sjinfo.ojrelid = 0;
5200 356908 : sjinfo.commute_above_l = NULL;
5201 356908 : sjinfo.commute_above_r = NULL;
5202 356908 : sjinfo.commute_below_l = NULL;
5203 356908 : sjinfo.commute_below_r = NULL;
5204 : /* we don't bother trying to make the remaining fields valid */
5205 356908 : sjinfo.lhs_strict = false;
5206 356908 : sjinfo.semi_can_btree = false;
5207 356908 : sjinfo.semi_can_hash = false;
5208 356908 : sjinfo.semi_operators = NIL;
5209 356908 : sjinfo.semi_rhs_exprs = NIL;
5210 :
5211 : /* Get the approximate selectivity */
5212 764948 : foreach(l, quals)
5213 : {
5214 408040 : Node *qual = (Node *) lfirst(l);
5215 :
5216 : /* Note that clause_selectivity will be able to cache its result */
5217 408040 : selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
5218 : }
5219 :
5220 : /* Apply it to the input relation sizes */
5221 356908 : tuples = selec * outer_tuples * inner_tuples;
5222 :
5223 356908 : return clamp_row_est(tuples);
5224 : }
5225 :
5226 :
5227 : /*
5228 : * set_baserel_size_estimates
5229 : * Set the size estimates for the given base relation.
5230 : *
5231 : * The rel's targetlist and restrictinfo list must have been constructed
5232 : * already, and rel->tuples must be set.
5233 : *
5234 : * We set the following fields of the rel node:
5235 : * rows: the estimated number of output tuples (after applying
5236 : * restriction clauses).
5237 : * width: the estimated average output tuple width in bytes.
5238 : * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
5239 : */
5240 : void
5241 400024 : set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5242 : {
5243 : double nrows;
5244 :
5245 : /* Should only be applied to base relations */
5246 : Assert(rel->relid > 0);
5247 :
5248 800040 : nrows = rel->tuples *
5249 400024 : clauselist_selectivity(root,
5250 : rel->baserestrictinfo,
5251 : 0,
5252 : JOIN_INNER,
5253 : NULL);
5254 :
5255 400016 : rel->rows = clamp_row_est(nrows);
5256 :
5257 400016 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
5258 :
5259 400016 : set_rel_width(root, rel);
5260 400016 : }
5261 :
5262 : /*
5263 : * get_parameterized_baserel_size
5264 : * Make a size estimate for a parameterized scan of a base relation.
5265 : *
5266 : * 'param_clauses' lists the additional join clauses to be used.
5267 : *
5268 : * set_baserel_size_estimates must have been applied already.
5269 : */
5270 : double
5271 115214 : get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel,
5272 : List *param_clauses)
5273 : {
5274 : List *allclauses;
5275 : double nrows;
5276 :
5277 : /*
5278 : * Estimate the number of rows returned by the parameterized scan, knowing
5279 : * that it will apply all the extra join clauses as well as the rel's own
5280 : * restriction clauses. Note that we force the clauses to be treated as
5281 : * non-join clauses during selectivity estimation.
5282 : */
5283 115214 : allclauses = list_concat_copy(param_clauses, rel->baserestrictinfo);
5284 230428 : nrows = rel->tuples *
5285 115214 : clauselist_selectivity(root,
5286 : allclauses,
5287 115214 : rel->relid, /* do not use 0! */
5288 : JOIN_INNER,
5289 : NULL);
5290 115214 : nrows = clamp_row_est(nrows);
5291 : /* For safety, make sure result is not more than the base estimate */
5292 115214 : if (nrows > rel->rows)
5293 0 : nrows = rel->rows;
5294 115214 : return nrows;
5295 : }
5296 :
5297 : /*
5298 : * set_joinrel_size_estimates
5299 : * Set the size estimates for the given join relation.
5300 : *
5301 : * The rel's targetlist must have been constructed already, and a
5302 : * restriction clause list that matches the given component rels must
5303 : * be provided.
5304 : *
5305 : * Since there is more than one way to make a joinrel for more than two
5306 : * base relations, the results we get here could depend on which component
5307 : * rel pair is provided. In theory we should get the same answers no matter
5308 : * which pair is provided; in practice, since the selectivity estimation
5309 : * routines don't handle all cases equally well, we might not. But there's
5310 : * not much to be done about it. (Would it make sense to repeat the
5311 : * calculations for each pair of input rels that's encountered, and somehow
5312 : * average the results? Probably way more trouble than it's worth, and
5313 : * anyway we must keep the rowcount estimate the same for all paths for the
5314 : * joinrel.)
5315 : *
5316 : * We set only the rows field here. The reltarget field was already set by
5317 : * build_joinrel_tlist, and baserestrictcost is not used for join rels.
5318 : */
5319 : void
5320 170588 : set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
5321 : RelOptInfo *outer_rel,
5322 : RelOptInfo *inner_rel,
5323 : SpecialJoinInfo *sjinfo,
5324 : List *restrictlist)
5325 : {
5326 170588 : rel->rows = calc_joinrel_size_estimate(root,
5327 : rel,
5328 : outer_rel,
5329 : inner_rel,
5330 : outer_rel->rows,
5331 : inner_rel->rows,
5332 : sjinfo,
5333 : restrictlist);
5334 170588 : }
5335 :
5336 : /*
5337 : * get_parameterized_joinrel_size
5338 : * Make a size estimate for a parameterized scan of a join relation.
5339 : *
5340 : * 'rel' is the joinrel under consideration.
5341 : * 'outer_path', 'inner_path' are (probably also parameterized) Paths that
5342 : * produce the relations being joined.
5343 : * 'sjinfo' is any SpecialJoinInfo relevant to this join.
5344 : * 'restrict_clauses' lists the join clauses that need to be applied at the
5345 : * join node (including any movable clauses that were moved down to this join,
5346 : * and not including any movable clauses that were pushed down into the
5347 : * child paths).
5348 : *
5349 : * set_joinrel_size_estimates must have been applied already.
5350 : */
5351 : double
5352 6962 : get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel,
5353 : Path *outer_path,
5354 : Path *inner_path,
5355 : SpecialJoinInfo *sjinfo,
5356 : List *restrict_clauses)
5357 : {
5358 : double nrows;
5359 :
5360 : /*
5361 : * Estimate the number of rows returned by the parameterized join as the
5362 : * sizes of the input paths times the selectivity of the clauses that have
5363 : * ended up at this join node.
5364 : *
5365 : * As with set_joinrel_size_estimates, the rowcount estimate could depend
5366 : * on the pair of input paths provided, though ideally we'd get the same
5367 : * estimate for any pair with the same parameterization.
5368 : */
5369 6962 : nrows = calc_joinrel_size_estimate(root,
5370 : rel,
5371 : outer_path->parent,
5372 : inner_path->parent,
5373 : outer_path->rows,
5374 : inner_path->rows,
5375 : sjinfo,
5376 : restrict_clauses);
5377 : /* For safety, make sure result is not more than the base estimate */
5378 6962 : if (nrows > rel->rows)
5379 12 : nrows = rel->rows;
5380 6962 : return nrows;
5381 : }
5382 :
5383 : /*
5384 : * calc_joinrel_size_estimate
5385 : * Workhorse for set_joinrel_size_estimates and
5386 : * get_parameterized_joinrel_size.
5387 : *
5388 : * outer_rel/inner_rel are the relations being joined, but they should be
5389 : * assumed to have sizes outer_rows/inner_rows; those numbers might be less
5390 : * than what rel->rows says, when we are considering parameterized paths.
5391 : */
5392 : static double
5393 177550 : calc_joinrel_size_estimate(PlannerInfo *root,
5394 : RelOptInfo *joinrel,
5395 : RelOptInfo *outer_rel,
5396 : RelOptInfo *inner_rel,
5397 : double outer_rows,
5398 : double inner_rows,
5399 : SpecialJoinInfo *sjinfo,
5400 : List *restrictlist)
5401 : {
5402 177550 : JoinType jointype = sjinfo->jointype;
5403 : Selectivity fkselec;
5404 : Selectivity jselec;
5405 : Selectivity pselec;
5406 : double nrows;
5407 :
5408 : /*
5409 : * Compute joinclause selectivity. Note that we are only considering
5410 : * clauses that become restriction clauses at this join level; we are not
5411 : * double-counting them because they were not considered in estimating the
5412 : * sizes of the component rels.
5413 : *
5414 : * First, see whether any of the joinclauses can be matched to known FK
5415 : * constraints. If so, drop those clauses from the restrictlist, and
5416 : * instead estimate their selectivity using FK semantics. (We do this
5417 : * without regard to whether said clauses are local or "pushed down".
5418 : * Probably, an FK-matching clause could never be seen as pushed down at
5419 : * an outer join, since it would be strict and hence would be grounds for
5420 : * join strength reduction.) fkselec gets the net selectivity for
5421 : * FK-matching clauses, or 1.0 if there are none.
5422 : */
5423 177550 : fkselec = get_foreign_key_join_selectivity(root,
5424 : outer_rel->relids,
5425 : inner_rel->relids,
5426 : sjinfo,
5427 : &restrictlist);
5428 :
5429 : /*
5430 : * For an outer join, we have to distinguish the selectivity of the join's
5431 : * own clauses (JOIN/ON conditions) from any clauses that were "pushed
5432 : * down". For inner joins we just count them all as joinclauses.
5433 : */
5434 177550 : if (IS_OUTER_JOIN(jointype))
5435 : {
5436 69948 : List *joinquals = NIL;
5437 69948 : List *pushedquals = NIL;
5438 : ListCell *l;
5439 :
5440 : /* Grovel through the clauses to separate into two lists */
5441 155596 : foreach(l, restrictlist)
5442 : {
5443 85648 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
5444 :
5445 85648 : if (RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5446 3564 : pushedquals = lappend(pushedquals, rinfo);
5447 : else
5448 82084 : joinquals = lappend(joinquals, rinfo);
5449 : }
5450 :
5451 : /* Get the separate selectivities */
5452 69948 : jselec = clauselist_selectivity(root,
5453 : joinquals,
5454 : 0,
5455 : jointype,
5456 : sjinfo);
5457 69948 : pselec = clauselist_selectivity(root,
5458 : pushedquals,
5459 : 0,
5460 : jointype,
5461 : sjinfo);
5462 :
5463 : /* Avoid leaking a lot of ListCells */
5464 69948 : list_free(joinquals);
5465 69948 : list_free(pushedquals);
5466 : }
5467 : else
5468 : {
5469 107602 : jselec = clauselist_selectivity(root,
5470 : restrictlist,
5471 : 0,
5472 : jointype,
5473 : sjinfo);
5474 107602 : pselec = 0.0; /* not used, keep compiler quiet */
5475 : }
5476 :
5477 : /*
5478 : * Basically, we multiply size of Cartesian product by selectivity.
5479 : *
5480 : * If we are doing an outer join, take that into account: the joinqual
5481 : * selectivity has to be clamped using the knowledge that the output must
5482 : * be at least as large as the non-nullable input. However, any
5483 : * pushed-down quals are applied after the outer join, so their
5484 : * selectivity applies fully.
5485 : *
5486 : * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
5487 : * of LHS rows that have matches, and we apply that straightforwardly.
5488 : */
5489 177550 : switch (jointype)
5490 : {
5491 103080 : case JOIN_INNER:
5492 103080 : nrows = outer_rows * inner_rows * fkselec * jselec;
5493 : /* pselec not used */
5494 103080 : break;
5495 64158 : case JOIN_LEFT:
5496 64158 : nrows = outer_rows * inner_rows * fkselec * jselec;
5497 64158 : if (nrows < outer_rows)
5498 23800 : nrows = outer_rows;
5499 64158 : nrows *= pselec;
5500 64158 : break;
5501 1582 : case JOIN_FULL:
5502 1582 : nrows = outer_rows * inner_rows * fkselec * jselec;
5503 1582 : if (nrows < outer_rows)
5504 1038 : nrows = outer_rows;
5505 1582 : if (nrows < inner_rows)
5506 114 : nrows = inner_rows;
5507 1582 : nrows *= pselec;
5508 1582 : break;
5509 4522 : case JOIN_SEMI:
5510 4522 : nrows = outer_rows * fkselec * jselec;
5511 : /* pselec not used */
5512 4522 : break;
5513 4208 : case JOIN_ANTI:
5514 4208 : nrows = outer_rows * (1.0 - fkselec * jselec);
5515 4208 : nrows *= pselec;
5516 4208 : break;
5517 0 : default:
5518 : /* other values not expected here */
5519 0 : elog(ERROR, "unrecognized join type: %d", (int) jointype);
5520 : nrows = 0; /* keep compiler quiet */
5521 : break;
5522 : }
5523 :
5524 177550 : return clamp_row_est(nrows);
5525 : }
5526 :
5527 : /*
5528 : * get_foreign_key_join_selectivity
5529 : * Estimate join selectivity for foreign-key-related clauses.
5530 : *
5531 : * Remove any clauses that can be matched to FK constraints from *restrictlist,
5532 : * and return a substitute estimate of their selectivity. 1.0 is returned
5533 : * when there are no such clauses.
5534 : *
5535 : * The reason for treating such clauses specially is that we can get better
5536 : * estimates this way than by relying on clauselist_selectivity(), especially
5537 : * for multi-column FKs where that function's assumption that the clauses are
5538 : * independent falls down badly. But even with single-column FKs, we may be
5539 : * able to get a better answer when the pg_statistic stats are missing or out
5540 : * of date.
5541 : */
5542 : static Selectivity
5543 177550 : get_foreign_key_join_selectivity(PlannerInfo *root,
5544 : Relids outer_relids,
5545 : Relids inner_relids,
5546 : SpecialJoinInfo *sjinfo,
5547 : List **restrictlist)
5548 : {
5549 177550 : Selectivity fkselec = 1.0;
5550 177550 : JoinType jointype = sjinfo->jointype;
5551 177550 : List *worklist = *restrictlist;
5552 : ListCell *lc;
5553 :
5554 : /* Consider each FK constraint that is known to match the query */
5555 179420 : foreach(lc, root->fkey_list)
5556 : {
5557 1870 : ForeignKeyOptInfo *fkinfo = (ForeignKeyOptInfo *) lfirst(lc);
5558 : bool ref_is_outer;
5559 : List *removedlist;
5560 : ListCell *cell;
5561 :
5562 : /*
5563 : * This FK is not relevant unless it connects a baserel on one side of
5564 : * this join to a baserel on the other side.
5565 : */
5566 3394 : if (bms_is_member(fkinfo->con_relid, outer_relids) &&
5567 1524 : bms_is_member(fkinfo->ref_relid, inner_relids))
5568 1356 : ref_is_outer = false;
5569 842 : else if (bms_is_member(fkinfo->ref_relid, outer_relids) &&
5570 328 : bms_is_member(fkinfo->con_relid, inner_relids))
5571 130 : ref_is_outer = true;
5572 : else
5573 384 : continue;
5574 :
5575 : /*
5576 : * If we're dealing with a semi/anti join, and the FK's referenced
5577 : * relation is on the outside, then knowledge of the FK doesn't help
5578 : * us figure out what we need to know (which is the fraction of outer
5579 : * rows that have matches). On the other hand, if the referenced rel
5580 : * is on the inside, then all outer rows must have matches in the
5581 : * referenced table (ignoring nulls). But any restriction or join
5582 : * clauses that filter that table will reduce the fraction of matches.
5583 : * We can account for restriction clauses, but it's too hard to guess
5584 : * how many table rows would get through a join that's inside the RHS.
5585 : * Hence, if either case applies, punt and ignore the FK.
5586 : */
5587 1486 : if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
5588 964 : (ref_is_outer || bms_membership(inner_relids) != BMS_SINGLETON))
5589 12 : continue;
5590 :
5591 : /*
5592 : * Modify the restrictlist by removing clauses that match the FK (and
5593 : * putting them into removedlist instead). It seems unsafe to modify
5594 : * the originally-passed List structure, so we make a shallow copy the
5595 : * first time through.
5596 : */
5597 1474 : if (worklist == *restrictlist)
5598 1250 : worklist = list_copy(worklist);
5599 :
5600 1474 : removedlist = NIL;
5601 3024 : foreach(cell, worklist)
5602 : {
5603 1550 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
5604 1550 : bool remove_it = false;
5605 : int i;
5606 :
5607 : /* Drop this clause if it matches any column of the FK */
5608 1936 : for (i = 0; i < fkinfo->nkeys; i++)
5609 : {
5610 1906 : if (rinfo->parent_ec)
5611 : {
5612 : /*
5613 : * EC-derived clauses can only match by EC. It is okay to
5614 : * consider any clause derived from the same EC as
5615 : * matching the FK: even if equivclass.c chose to generate
5616 : * a clause equating some other pair of Vars, it could
5617 : * have generated one equating the FK's Vars. So for
5618 : * purposes of estimation, we can act as though it did so.
5619 : *
5620 : * Note: checking parent_ec is a bit of a cheat because
5621 : * there are EC-derived clauses that don't have parent_ec
5622 : * set; but such clauses must compare expressions that
5623 : * aren't just Vars, so they cannot match the FK anyway.
5624 : */
5625 304 : if (fkinfo->eclass[i] == rinfo->parent_ec)
5626 : {
5627 298 : remove_it = true;
5628 298 : break;
5629 : }
5630 : }
5631 : else
5632 : {
5633 : /*
5634 : * Otherwise, see if rinfo was previously matched to FK as
5635 : * a "loose" clause.
5636 : */
5637 1602 : if (list_member_ptr(fkinfo->rinfos[i], rinfo))
5638 : {
5639 1222 : remove_it = true;
5640 1222 : break;
5641 : }
5642 : }
5643 : }
5644 1550 : if (remove_it)
5645 : {
5646 1520 : worklist = foreach_delete_current(worklist, cell);
5647 1520 : removedlist = lappend(removedlist, rinfo);
5648 : }
5649 : }
5650 :
5651 : /*
5652 : * If we failed to remove all the matching clauses we expected to
5653 : * find, chicken out and ignore this FK; applying its selectivity
5654 : * might result in double-counting. Put any clauses we did manage to
5655 : * remove back into the worklist.
5656 : *
5657 : * Since the matching clauses are known not outerjoin-delayed, they
5658 : * would normally have appeared in the initial joinclause list. If we
5659 : * didn't find them, there are two possibilities:
5660 : *
5661 : * 1. If the FK match is based on an EC that is ec_has_const, it won't
5662 : * have generated any join clauses at all. We discount such ECs while
5663 : * checking to see if we have "all" the clauses. (Below, we'll adjust
5664 : * the selectivity estimate for this case.)
5665 : *
5666 : * 2. The clauses were matched to some other FK in a previous
5667 : * iteration of this loop, and thus removed from worklist. (A likely
5668 : * case is that two FKs are matched to the same EC; there will be only
5669 : * one EC-derived clause in the initial list, so the first FK will
5670 : * consume it.) Applying both FKs' selectivity independently risks
5671 : * underestimating the join size; in particular, this would undo one
5672 : * of the main things that ECs were invented for, namely to avoid
5673 : * double-counting the selectivity of redundant equality conditions.
5674 : * Later we might think of a reasonable way to combine the estimates,
5675 : * but for now, just punt, since this is a fairly uncommon situation.
5676 : */
5677 1474 : if (removedlist == NIL ||
5678 1188 : list_length(removedlist) !=
5679 1188 : (fkinfo->nmatched_ec - fkinfo->nconst_ec + fkinfo->nmatched_ri))
5680 : {
5681 286 : worklist = list_concat(worklist, removedlist);
5682 286 : continue;
5683 : }
5684 :
5685 : /*
5686 : * Finally we get to the payoff: estimate selectivity using the
5687 : * knowledge that each referencing row will match exactly one row in
5688 : * the referenced table.
5689 : *
5690 : * XXX that's not true in the presence of nulls in the referencing
5691 : * column(s), so in principle we should derate the estimate for those.
5692 : * However (1) if there are any strict restriction clauses for the
5693 : * referencing column(s) elsewhere in the query, derating here would
5694 : * be double-counting the null fraction, and (2) it's not very clear
5695 : * how to combine null fractions for multiple referencing columns. So
5696 : * we do nothing for now about correcting for nulls.
5697 : *
5698 : * XXX another point here is that if either side of an FK constraint
5699 : * is an inheritance parent, we estimate as though the constraint
5700 : * covers all its children as well. This is not an unreasonable
5701 : * assumption for a referencing table, ie the user probably applied
5702 : * identical constraints to all child tables (though perhaps we ought
5703 : * to check that). But it's not possible to have done that for a
5704 : * referenced table. Fortunately, precisely because that doesn't
5705 : * work, it is uncommon in practice to have an FK referencing a parent
5706 : * table. So, at least for now, disregard inheritance here.
5707 : */
5708 1188 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
5709 740 : {
5710 : /*
5711 : * For JOIN_SEMI and JOIN_ANTI, we only get here when the FK's
5712 : * referenced table is exactly the inside of the join. The join
5713 : * selectivity is defined as the fraction of LHS rows that have
5714 : * matches. The FK implies that every LHS row has a match *in the
5715 : * referenced table*; but any restriction clauses on it will
5716 : * reduce the number of matches. Hence we take the join
5717 : * selectivity as equal to the selectivity of the table's
5718 : * restriction clauses, which is rows / tuples; but we must guard
5719 : * against tuples == 0.
5720 : */
5721 740 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5722 740 : double ref_tuples = Max(ref_rel->tuples, 1.0);
5723 :
5724 740 : fkselec *= ref_rel->rows / ref_tuples;
5725 : }
5726 : else
5727 : {
5728 : /*
5729 : * Otherwise, selectivity is exactly 1/referenced-table-size; but
5730 : * guard against tuples == 0. Note we should use the raw table
5731 : * tuple count, not any estimate of its filtered or joined size.
5732 : */
5733 448 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5734 448 : double ref_tuples = Max(ref_rel->tuples, 1.0);
5735 :
5736 448 : fkselec *= 1.0 / ref_tuples;
5737 : }
5738 :
5739 : /*
5740 : * If any of the FK columns participated in ec_has_const ECs, then
5741 : * equivclass.c will have generated "var = const" restrictions for
5742 : * each side of the join, thus reducing the sizes of both input
5743 : * relations. Taking the fkselec at face value would amount to
5744 : * double-counting the selectivity of the constant restriction for the
5745 : * referencing Var. Hence, look for the restriction clause(s) that
5746 : * were applied to the referencing Var(s), and divide out their
5747 : * selectivity to correct for this.
5748 : */
5749 1188 : if (fkinfo->nconst_ec > 0)
5750 : {
5751 24 : for (int i = 0; i < fkinfo->nkeys; i++)
5752 : {
5753 18 : EquivalenceClass *ec = fkinfo->eclass[i];
5754 :
5755 18 : if (ec && ec->ec_has_const)
5756 : {
5757 6 : EquivalenceMember *em = fkinfo->fk_eclass_member[i];
5758 6 : RestrictInfo *rinfo = find_derived_clause_for_ec_member(ec,
5759 : em);
5760 :
5761 6 : if (rinfo)
5762 : {
5763 : Selectivity s0;
5764 :
5765 6 : s0 = clause_selectivity(root,
5766 : (Node *) rinfo,
5767 : 0,
5768 : jointype,
5769 : sjinfo);
5770 6 : if (s0 > 0)
5771 6 : fkselec /= s0;
5772 : }
5773 : }
5774 : }
5775 : }
5776 : }
5777 :
5778 177550 : *restrictlist = worklist;
5779 177550 : CLAMP_PROBABILITY(fkselec);
5780 177550 : return fkselec;
5781 : }
5782 :
5783 : /*
5784 : * set_subquery_size_estimates
5785 : * Set the size estimates for a base relation that is a subquery.
5786 : *
5787 : * The rel's targetlist and restrictinfo list must have been constructed
5788 : * already, and the Paths for the subquery must have been completed.
5789 : * We look at the subquery's PlannerInfo to extract data.
5790 : *
5791 : * We set the same fields as set_baserel_size_estimates.
5792 : */
5793 : void
5794 19192 : set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5795 : {
5796 19192 : PlannerInfo *subroot = rel->subroot;
5797 : RelOptInfo *sub_final_rel;
5798 : ListCell *lc;
5799 :
5800 : /* Should only be applied to base relations that are subqueries */
5801 : Assert(rel->relid > 0);
5802 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_SUBQUERY);
5803 :
5804 : /*
5805 : * Copy raw number of output rows from subquery. All of its paths should
5806 : * have the same output rowcount, so just look at cheapest-total.
5807 : */
5808 19192 : sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
5809 19192 : rel->tuples = sub_final_rel->cheapest_total_path->rows;
5810 :
5811 : /*
5812 : * Compute per-output-column width estimates by examining the subquery's
5813 : * targetlist. For any output that is a plain Var, get the width estimate
5814 : * that was made while planning the subquery. Otherwise, we leave it to
5815 : * set_rel_width to fill in a datatype-based default estimate.
5816 : */
5817 77522 : foreach(lc, subroot->parse->targetList)
5818 : {
5819 58330 : TargetEntry *te = lfirst_node(TargetEntry, lc);
5820 58330 : Node *texpr = (Node *) te->expr;
5821 58330 : int32 item_width = 0;
5822 :
5823 : /* junk columns aren't visible to upper query */
5824 58330 : if (te->resjunk)
5825 578 : continue;
5826 :
5827 : /*
5828 : * The subquery could be an expansion of a view that's had columns
5829 : * added to it since the current query was parsed, so that there are
5830 : * non-junk tlist columns in it that don't correspond to any column
5831 : * visible at our query level. Ignore such columns.
5832 : */
5833 57752 : if (te->resno < rel->min_attr || te->resno > rel->max_attr)
5834 0 : continue;
5835 :
5836 : /*
5837 : * XXX This currently doesn't work for subqueries containing set
5838 : * operations, because the Vars in their tlists are bogus references
5839 : * to the first leaf subquery, which wouldn't give the right answer
5840 : * even if we could still get to its PlannerInfo.
5841 : *
5842 : * Also, the subquery could be an appendrel for which all branches are
5843 : * known empty due to constraint exclusion, in which case
5844 : * set_append_rel_pathlist will have left the attr_widths set to zero.
5845 : *
5846 : * In either case, we just leave the width estimate zero until
5847 : * set_rel_width fixes it.
5848 : */
5849 57752 : if (IsA(texpr, Var) &&
5850 25940 : subroot->parse->setOperations == NULL)
5851 : {
5852 24536 : Var *var = (Var *) texpr;
5853 24536 : RelOptInfo *subrel = find_base_rel(subroot, var->varno);
5854 :
5855 24536 : item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
5856 : }
5857 57752 : rel->attr_widths[te->resno - rel->min_attr] = item_width;
5858 : }
5859 :
5860 : /* Now estimate number of output rows, etc */
5861 19192 : set_baserel_size_estimates(root, rel);
5862 19192 : }
5863 :
5864 : /*
5865 : * set_function_size_estimates
5866 : * Set the size estimates for a base relation that is a function call.
5867 : *
5868 : * The rel's targetlist and restrictinfo list must have been constructed
5869 : * already.
5870 : *
5871 : * We set the same fields as set_baserel_size_estimates.
5872 : */
5873 : void
5874 38142 : set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5875 : {
5876 : RangeTblEntry *rte;
5877 : ListCell *lc;
5878 :
5879 : /* Should only be applied to base relations that are functions */
5880 : Assert(rel->relid > 0);
5881 38142 : rte = planner_rt_fetch(rel->relid, root);
5882 : Assert(rte->rtekind == RTE_FUNCTION);
5883 :
5884 : /*
5885 : * Estimate number of rows the functions will return. The rowcount of the
5886 : * node is that of the largest function result.
5887 : */
5888 38142 : rel->tuples = 0;
5889 76596 : foreach(lc, rte->functions)
5890 : {
5891 38454 : RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
5892 38454 : double ntup = expression_returns_set_rows(root, rtfunc->funcexpr);
5893 :
5894 38454 : if (ntup > rel->tuples)
5895 38166 : rel->tuples = ntup;
5896 : }
5897 :
5898 : /* Now estimate number of output rows, etc */
5899 38142 : set_baserel_size_estimates(root, rel);
5900 38142 : }
5901 :
5902 : /*
5903 : * set_function_size_estimates
5904 : * Set the size estimates for a base relation that is a function call.
5905 : *
5906 : * The rel's targetlist and restrictinfo list must have been constructed
5907 : * already.
5908 : *
5909 : * We set the same fields as set_tablefunc_size_estimates.
5910 : */
5911 : void
5912 216 : set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5913 : {
5914 : /* Should only be applied to base relations that are functions */
5915 : Assert(rel->relid > 0);
5916 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_TABLEFUNC);
5917 :
5918 216 : rel->tuples = 100;
5919 :
5920 : /* Now estimate number of output rows, etc */
5921 216 : set_baserel_size_estimates(root, rel);
5922 216 : }
5923 :
5924 : /*
5925 : * set_values_size_estimates
5926 : * Set the size estimates for a base relation that is a values list.
5927 : *
5928 : * The rel's targetlist and restrictinfo list must have been constructed
5929 : * already.
5930 : *
5931 : * We set the same fields as set_baserel_size_estimates.
5932 : */
5933 : void
5934 7322 : set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5935 : {
5936 : RangeTblEntry *rte;
5937 :
5938 : /* Should only be applied to base relations that are values lists */
5939 : Assert(rel->relid > 0);
5940 7322 : rte = planner_rt_fetch(rel->relid, root);
5941 : Assert(rte->rtekind == RTE_VALUES);
5942 :
5943 : /*
5944 : * Estimate number of rows the values list will return. We know this
5945 : * precisely based on the list length (well, barring set-returning
5946 : * functions in list items, but that's a refinement not catered for
5947 : * anywhere else either).
5948 : */
5949 7322 : rel->tuples = list_length(rte->values_lists);
5950 :
5951 : /* Now estimate number of output rows, etc */
5952 7322 : set_baserel_size_estimates(root, rel);
5953 7322 : }
5954 :
5955 : /*
5956 : * set_cte_size_estimates
5957 : * Set the size estimates for a base relation that is a CTE reference.
5958 : *
5959 : * The rel's targetlist and restrictinfo list must have been constructed
5960 : * already, and we need an estimate of the number of rows returned by the CTE
5961 : * (if a regular CTE) or the non-recursive term (if a self-reference).
5962 : *
5963 : * We set the same fields as set_baserel_size_estimates.
5964 : */
5965 : void
5966 3776 : set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
5967 : {
5968 : RangeTblEntry *rte;
5969 :
5970 : /* Should only be applied to base relations that are CTE references */
5971 : Assert(rel->relid > 0);
5972 3776 : rte = planner_rt_fetch(rel->relid, root);
5973 : Assert(rte->rtekind == RTE_CTE);
5974 :
5975 3776 : if (rte->self_reference)
5976 : {
5977 : /*
5978 : * In a self-reference, we assume the average worktable size is a
5979 : * multiple of the nonrecursive term's size. The best multiplier will
5980 : * vary depending on query "fan-out", so make its value adjustable.
5981 : */
5982 784 : rel->tuples = clamp_row_est(recursive_worktable_factor * cte_rows);
5983 : }
5984 : else
5985 : {
5986 : /* Otherwise just believe the CTE's rowcount estimate */
5987 2992 : rel->tuples = cte_rows;
5988 : }
5989 :
5990 : /* Now estimate number of output rows, etc */
5991 3776 : set_baserel_size_estimates(root, rel);
5992 3776 : }
5993 :
5994 : /*
5995 : * set_namedtuplestore_size_estimates
5996 : * Set the size estimates for a base relation that is a tuplestore reference.
5997 : *
5998 : * The rel's targetlist and restrictinfo list must have been constructed
5999 : * already.
6000 : *
6001 : * We set the same fields as set_baserel_size_estimates.
6002 : */
6003 : void
6004 446 : set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6005 : {
6006 : RangeTblEntry *rte;
6007 :
6008 : /* Should only be applied to base relations that are tuplestore references */
6009 : Assert(rel->relid > 0);
6010 446 : rte = planner_rt_fetch(rel->relid, root);
6011 : Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
6012 :
6013 : /*
6014 : * Use the estimate provided by the code which is generating the named
6015 : * tuplestore. In some cases, the actual number might be available; in
6016 : * others the same plan will be re-used, so a "typical" value might be
6017 : * estimated and used.
6018 : */
6019 446 : rel->tuples = rte->enrtuples;
6020 446 : if (rel->tuples < 0)
6021 0 : rel->tuples = 1000;
6022 :
6023 : /* Now estimate number of output rows, etc */
6024 446 : set_baserel_size_estimates(root, rel);
6025 446 : }
6026 :
6027 : /*
6028 : * set_result_size_estimates
6029 : * Set the size estimates for an RTE_RESULT base relation
6030 : *
6031 : * The rel's targetlist and restrictinfo list must have been constructed
6032 : * already.
6033 : *
6034 : * We set the same fields as set_baserel_size_estimates.
6035 : */
6036 : void
6037 1354 : set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6038 : {
6039 : /* Should only be applied to RTE_RESULT base relations */
6040 : Assert(rel->relid > 0);
6041 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_RESULT);
6042 :
6043 : /* RTE_RESULT always generates a single row, natively */
6044 1354 : rel->tuples = 1;
6045 :
6046 : /* Now estimate number of output rows, etc */
6047 1354 : set_baserel_size_estimates(root, rel);
6048 1354 : }
6049 :
6050 : /*
6051 : * set_foreign_size_estimates
6052 : * Set the size estimates for a base relation that is a foreign table.
6053 : *
6054 : * There is not a whole lot that we can do here; the foreign-data wrapper
6055 : * is responsible for producing useful estimates. We can do a decent job
6056 : * of estimating baserestrictcost, so we set that, and we also set up width
6057 : * using what will be purely datatype-driven estimates from the targetlist.
6058 : * There is no way to do anything sane with the rows value, so we just put
6059 : * a default estimate and hope that the wrapper can improve on it. The
6060 : * wrapper's GetForeignRelSize function will be called momentarily.
6061 : *
6062 : * The rel's targetlist and restrictinfo list must have been constructed
6063 : * already.
6064 : */
6065 : void
6066 2324 : set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6067 : {
6068 : /* Should only be applied to base relations */
6069 : Assert(rel->relid > 0);
6070 :
6071 2324 : rel->rows = 1000; /* entirely bogus default estimate */
6072 :
6073 2324 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
6074 :
6075 2324 : set_rel_width(root, rel);
6076 2324 : }
6077 :
6078 :
6079 : /*
6080 : * set_rel_width
6081 : * Set the estimated output width of a base relation.
6082 : *
6083 : * The estimated output width is the sum of the per-attribute width estimates
6084 : * for the actually-referenced columns, plus any PHVs or other expressions
6085 : * that have to be calculated at this relation. This is the amount of data
6086 : * we'd need to pass upwards in case of a sort, hash, etc.
6087 : *
6088 : * This function also sets reltarget->cost, so it's a bit misnamed now.
6089 : *
6090 : * NB: this works best on plain relations because it prefers to look at
6091 : * real Vars. For subqueries, set_subquery_size_estimates will already have
6092 : * copied up whatever per-column estimates were made within the subquery,
6093 : * and for other types of rels there isn't much we can do anyway. We fall
6094 : * back on (fairly stupid) datatype-based width estimates if we can't get
6095 : * any better number.
6096 : *
6097 : * The per-attribute width estimates are cached for possible re-use while
6098 : * building join relations or post-scan/join pathtargets.
6099 : */
6100 : static void
6101 402340 : set_rel_width(PlannerInfo *root, RelOptInfo *rel)
6102 : {
6103 402340 : Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
6104 402340 : int32 tuple_width = 0;
6105 402340 : bool have_wholerow_var = false;
6106 : ListCell *lc;
6107 :
6108 : /* Vars are assumed to have cost zero, but other exprs do not */
6109 402340 : rel->reltarget->cost.startup = 0;
6110 402340 : rel->reltarget->cost.per_tuple = 0;
6111 :
6112 1395750 : foreach(lc, rel->reltarget->exprs)
6113 : {
6114 993410 : Node *node = (Node *) lfirst(lc);
6115 :
6116 : /*
6117 : * Ordinarily, a Var in a rel's targetlist must belong to that rel;
6118 : * but there are corner cases involving LATERAL references where that
6119 : * isn't so. If the Var has the wrong varno, fall through to the
6120 : * generic case (it doesn't seem worth the trouble to be any smarter).
6121 : */
6122 993410 : if (IsA(node, Var) &&
6123 975224 : ((Var *) node)->varno == rel->relid)
6124 254528 : {
6125 975158 : Var *var = (Var *) node;
6126 : int ndx;
6127 : int32 item_width;
6128 :
6129 : Assert(var->varattno >= rel->min_attr);
6130 : Assert(var->varattno <= rel->max_attr);
6131 :
6132 975158 : ndx = var->varattno - rel->min_attr;
6133 :
6134 : /*
6135 : * If it's a whole-row Var, we'll deal with it below after we have
6136 : * already cached as many attr widths as possible.
6137 : */
6138 975158 : if (var->varattno == 0)
6139 : {
6140 2462 : have_wholerow_var = true;
6141 2462 : continue;
6142 : }
6143 :
6144 : /*
6145 : * The width may have been cached already (especially if it's a
6146 : * subquery), so don't duplicate effort.
6147 : */
6148 972696 : if (rel->attr_widths[ndx] > 0)
6149 : {
6150 192042 : tuple_width += rel->attr_widths[ndx];
6151 192042 : continue;
6152 : }
6153 :
6154 : /* Try to get column width from statistics */
6155 780654 : if (reloid != InvalidOid && var->varattno > 0)
6156 : {
6157 606670 : item_width = get_attavgwidth(reloid, var->varattno);
6158 606670 : if (item_width > 0)
6159 : {
6160 526126 : rel->attr_widths[ndx] = item_width;
6161 526126 : tuple_width += item_width;
6162 526126 : continue;
6163 : }
6164 : }
6165 :
6166 : /*
6167 : * Not a plain relation, or can't find statistics for it. Estimate
6168 : * using just the type info.
6169 : */
6170 254528 : item_width = get_typavgwidth(var->vartype, var->vartypmod);
6171 : Assert(item_width > 0);
6172 254528 : rel->attr_widths[ndx] = item_width;
6173 254528 : tuple_width += item_width;
6174 : }
6175 18252 : else if (IsA(node, PlaceHolderVar))
6176 : {
6177 : /*
6178 : * We will need to evaluate the PHV's contained expression while
6179 : * scanning this rel, so be sure to include it in reltarget->cost.
6180 : */
6181 992 : PlaceHolderVar *phv = (PlaceHolderVar *) node;
6182 992 : PlaceHolderInfo *phinfo = find_placeholder_info(root, phv);
6183 : QualCost cost;
6184 :
6185 992 : tuple_width += phinfo->ph_width;
6186 992 : cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
6187 992 : rel->reltarget->cost.startup += cost.startup;
6188 992 : rel->reltarget->cost.per_tuple += cost.per_tuple;
6189 : }
6190 : else
6191 : {
6192 : /*
6193 : * We could be looking at an expression pulled up from a subquery,
6194 : * or a ROW() representing a whole-row child Var, etc. Do what we
6195 : * can using the expression type information.
6196 : */
6197 : int32 item_width;
6198 : QualCost cost;
6199 :
6200 17260 : item_width = get_typavgwidth(exprType(node), exprTypmod(node));
6201 : Assert(item_width > 0);
6202 17260 : tuple_width += item_width;
6203 : /* Not entirely clear if we need to account for cost, but do so */
6204 17260 : cost_qual_eval_node(&cost, node, root);
6205 17260 : rel->reltarget->cost.startup += cost.startup;
6206 17260 : rel->reltarget->cost.per_tuple += cost.per_tuple;
6207 : }
6208 : }
6209 :
6210 : /*
6211 : * If we have a whole-row reference, estimate its width as the sum of
6212 : * per-column widths plus heap tuple header overhead.
6213 : */
6214 402340 : if (have_wholerow_var)
6215 : {
6216 2462 : int32 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
6217 :
6218 2462 : if (reloid != InvalidOid)
6219 : {
6220 : /* Real relation, so estimate true tuple width */
6221 2042 : wholerow_width += get_relation_data_width(reloid,
6222 2042 : rel->attr_widths - rel->min_attr);
6223 : }
6224 : else
6225 : {
6226 : /* Do what we can with info for a phony rel */
6227 : AttrNumber i;
6228 :
6229 1108 : for (i = 1; i <= rel->max_attr; i++)
6230 688 : wholerow_width += rel->attr_widths[i - rel->min_attr];
6231 : }
6232 :
6233 2462 : rel->attr_widths[0 - rel->min_attr] = wholerow_width;
6234 :
6235 : /*
6236 : * Include the whole-row Var as part of the output tuple. Yes, that
6237 : * really is what happens at runtime.
6238 : */
6239 2462 : tuple_width += wholerow_width;
6240 : }
6241 :
6242 : Assert(tuple_width >= 0);
6243 402340 : rel->reltarget->width = tuple_width;
6244 402340 : }
6245 :
6246 : /*
6247 : * set_pathtarget_cost_width
6248 : * Set the estimated eval cost and output width of a PathTarget tlist.
6249 : *
6250 : * As a notational convenience, returns the same PathTarget pointer passed in.
6251 : *
6252 : * Most, though not quite all, uses of this function occur after we've run
6253 : * set_rel_width() for base relations; so we can usually obtain cached width
6254 : * estimates for Vars. If we can't, fall back on datatype-based width
6255 : * estimates. Present early-planning uses of PathTargets don't need accurate
6256 : * widths badly enough to justify going to the catalogs for better data.
6257 : */
6258 : PathTarget *
6259 514190 : set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
6260 : {
6261 514190 : int32 tuple_width = 0;
6262 : ListCell *lc;
6263 :
6264 : /* Vars are assumed to have cost zero, but other exprs do not */
6265 514190 : target->cost.startup = 0;
6266 514190 : target->cost.per_tuple = 0;
6267 :
6268 1717184 : foreach(lc, target->exprs)
6269 : {
6270 1202994 : Node *node = (Node *) lfirst(lc);
6271 :
6272 1202994 : tuple_width += get_expr_width(root, node);
6273 :
6274 : /* For non-Vars, account for evaluation cost */
6275 1202994 : if (!IsA(node, Var))
6276 : {
6277 : QualCost cost;
6278 :
6279 537452 : cost_qual_eval_node(&cost, node, root);
6280 537452 : target->cost.startup += cost.startup;
6281 537452 : target->cost.per_tuple += cost.per_tuple;
6282 : }
6283 : }
6284 :
6285 : Assert(tuple_width >= 0);
6286 514190 : target->width = tuple_width;
6287 :
6288 514190 : return target;
6289 : }
6290 :
6291 : /*
6292 : * get_expr_width
6293 : * Estimate the width of the given expr attempting to use the width
6294 : * cached in a Var's owning RelOptInfo, else fallback on the type's
6295 : * average width when unable to or when the given Node is not a Var.
6296 : */
6297 : static int32
6298 1436106 : get_expr_width(PlannerInfo *root, const Node *expr)
6299 : {
6300 : int32 width;
6301 :
6302 1436106 : if (IsA(expr, Var))
6303 : {
6304 887654 : const Var *var = (const Var *) expr;
6305 :
6306 : /* We should not see any upper-level Vars here */
6307 : Assert(var->varlevelsup == 0);
6308 :
6309 : /* Try to get data from RelOptInfo cache */
6310 887654 : if (!IS_SPECIAL_VARNO(var->varno) &&
6311 882892 : var->varno < root->simple_rel_array_size)
6312 : {
6313 882892 : RelOptInfo *rel = root->simple_rel_array[var->varno];
6314 :
6315 882892 : if (rel != NULL &&
6316 860320 : var->varattno >= rel->min_attr &&
6317 860320 : var->varattno <= rel->max_attr)
6318 : {
6319 860320 : int ndx = var->varattno - rel->min_attr;
6320 :
6321 860320 : if (rel->attr_widths[ndx] > 0)
6322 836054 : return rel->attr_widths[ndx];
6323 : }
6324 : }
6325 :
6326 : /*
6327 : * No cached data available, so estimate using just the type info.
6328 : */
6329 51600 : width = get_typavgwidth(var->vartype, var->vartypmod);
6330 : Assert(width > 0);
6331 :
6332 51600 : return width;
6333 : }
6334 :
6335 548452 : width = get_typavgwidth(exprType(expr), exprTypmod(expr));
6336 : Assert(width > 0);
6337 548452 : return width;
6338 : }
6339 :
6340 : /*
6341 : * relation_byte_size
6342 : * Estimate the storage space in bytes for a given number of tuples
6343 : * of a given width (size in bytes).
6344 : */
6345 : static double
6346 2953826 : relation_byte_size(double tuples, int width)
6347 : {
6348 2953826 : return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
6349 : }
6350 :
6351 : /*
6352 : * page_size
6353 : * Returns an estimate of the number of pages covered by a given
6354 : * number of tuples of a given width (size in bytes).
6355 : */
6356 : static double
6357 8600 : page_size(double tuples, int width)
6358 : {
6359 8600 : return ceil(relation_byte_size(tuples, width) / BLCKSZ);
6360 : }
6361 :
6362 : /*
6363 : * Estimate the fraction of the work that each worker will do given the
6364 : * number of workers budgeted for the path.
6365 : */
6366 : static double
6367 133506 : get_parallel_divisor(Path *path)
6368 : {
6369 133506 : double parallel_divisor = path->parallel_workers;
6370 :
6371 : /*
6372 : * Early experience with parallel query suggests that when there is only
6373 : * one worker, the leader often makes a very substantial contribution to
6374 : * executing the parallel portion of the plan, but as more workers are
6375 : * added, it does less and less, because it's busy reading tuples from the
6376 : * workers and doing whatever non-parallel post-processing is needed. By
6377 : * the time we reach 4 workers, the leader no longer makes a meaningful
6378 : * contribution. Thus, for now, estimate that the leader spends 30% of
6379 : * its time servicing each worker, and the remainder executing the
6380 : * parallel plan.
6381 : */
6382 133506 : if (parallel_leader_participation)
6383 : {
6384 : double leader_contribution;
6385 :
6386 132738 : leader_contribution = 1.0 - (0.3 * path->parallel_workers);
6387 132738 : if (leader_contribution > 0)
6388 131826 : parallel_divisor += leader_contribution;
6389 : }
6390 :
6391 133506 : return parallel_divisor;
6392 : }
6393 :
6394 : /*
6395 : * compute_bitmap_pages
6396 : *
6397 : * compute number of pages fetched from heap in bitmap heap scan.
6398 : */
6399 : double
6400 524612 : compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual,
6401 : int loop_count, Cost *cost, double *tuple)
6402 : {
6403 : Cost indexTotalCost;
6404 : Selectivity indexSelectivity;
6405 : double T;
6406 : double pages_fetched;
6407 : double tuples_fetched;
6408 : double heap_pages;
6409 : long maxentries;
6410 :
6411 : /*
6412 : * Fetch total cost of obtaining the bitmap, as well as its total
6413 : * selectivity.
6414 : */
6415 524612 : cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
6416 :
6417 : /*
6418 : * Estimate number of main-table pages fetched.
6419 : */
6420 524612 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
6421 :
6422 524612 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
6423 :
6424 : /*
6425 : * For a single scan, the number of heap pages that need to be fetched is
6426 : * the same as the Mackert and Lohman formula for the case T <= b (ie, no
6427 : * re-reads needed).
6428 : */
6429 524612 : pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
6430 :
6431 : /*
6432 : * Calculate the number of pages fetched from the heap. Then based on
6433 : * current work_mem estimate get the estimated maxentries in the bitmap.
6434 : * (Note that we always do this calculation based on the number of pages
6435 : * that would be fetched in a single iteration, even if loop_count > 1.
6436 : * That's correct, because only that number of entries will be stored in
6437 : * the bitmap at one time.)
6438 : */
6439 524612 : heap_pages = Min(pages_fetched, baserel->pages);
6440 524612 : maxentries = tbm_calculate_entries(work_mem * 1024L);
6441 :
6442 524612 : if (loop_count > 1)
6443 : {
6444 : /*
6445 : * For repeated bitmap scans, scale up the number of tuples fetched in
6446 : * the Mackert and Lohman formula by the number of scans, so that we
6447 : * estimate the number of pages fetched by all the scans. Then
6448 : * pro-rate for one scan.
6449 : */
6450 98838 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
6451 : baserel->pages,
6452 : get_indexpath_pages(bitmapqual),
6453 : root);
6454 98838 : pages_fetched /= loop_count;
6455 : }
6456 :
6457 524612 : if (pages_fetched >= T)
6458 48742 : pages_fetched = T;
6459 : else
6460 475870 : pages_fetched = ceil(pages_fetched);
6461 :
6462 524612 : if (maxentries < heap_pages)
6463 : {
6464 : double exact_pages;
6465 : double lossy_pages;
6466 :
6467 : /*
6468 : * Crude approximation of the number of lossy pages. Because of the
6469 : * way tbm_lossify() is coded, the number of lossy pages increases
6470 : * very sharply as soon as we run short of memory; this formula has
6471 : * that property and seems to perform adequately in testing, but it's
6472 : * possible we could do better somehow.
6473 : */
6474 18 : lossy_pages = Max(0, heap_pages - maxentries / 2);
6475 18 : exact_pages = heap_pages - lossy_pages;
6476 :
6477 : /*
6478 : * If there are lossy pages then recompute the number of tuples
6479 : * processed by the bitmap heap node. We assume here that the chance
6480 : * of a given tuple coming from an exact page is the same as the
6481 : * chance that a given page is exact. This might not be true, but
6482 : * it's not clear how we can do any better.
6483 : */
6484 18 : if (lossy_pages > 0)
6485 : tuples_fetched =
6486 18 : clamp_row_est(indexSelectivity *
6487 18 : (exact_pages / heap_pages) * baserel->tuples +
6488 18 : (lossy_pages / heap_pages) * baserel->tuples);
6489 : }
6490 :
6491 524612 : if (cost)
6492 403588 : *cost = indexTotalCost;
6493 524612 : if (tuple)
6494 403588 : *tuple = tuples_fetched;
6495 :
6496 524612 : return pages_fetched;
6497 : }
|