Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * costsize.c
4 : * Routines to compute (and set) relation sizes and path costs
5 : *
6 : * Path costs are measured in arbitrary units established by these basic
7 : * parameters:
8 : *
9 : * seq_page_cost Cost of a sequential page fetch
10 : * random_page_cost Cost of a non-sequential page fetch
11 : * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 : * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 : * cpu_operator_cost Cost of CPU time to execute an operator or function
14 : * parallel_tuple_cost Cost of CPU time to pass a tuple from worker to leader backend
15 : * parallel_setup_cost Cost of setting up shared memory for parallelism
16 : *
17 : * We expect that the kernel will typically do some amount of read-ahead
18 : * optimization; this in conjunction with seek costs means that seq_page_cost
19 : * is normally considerably less than random_page_cost. (However, if the
20 : * database is fully cached in RAM, it is reasonable to set them equal.)
21 : *
22 : * We also use a rough estimate "effective_cache_size" of the number of
23 : * disk pages in Postgres + OS-level disk cache. (We can't simply use
24 : * NBuffers for this purpose because that would ignore the effects of
25 : * the kernel's disk cache.)
26 : *
27 : * Obviously, taking constants for these values is an oversimplification,
28 : * but it's tough enough to get any useful estimates even at this level of
29 : * detail. Note that all of these parameters are user-settable, in case
30 : * the default values are drastically off for a particular platform.
31 : *
32 : * seq_page_cost and random_page_cost can also be overridden for an individual
33 : * tablespace, in case some data is on a fast disk and other data is on a slow
34 : * disk. Per-tablespace overrides never apply to temporary work files such as
35 : * an external sort or a materialize node that overflows work_mem.
36 : *
37 : * We compute two separate costs for each path:
38 : * total_cost: total estimated cost to fetch all tuples
39 : * startup_cost: cost that is expended before first tuple is fetched
40 : * In some scenarios, such as when there is a LIMIT or we are implementing
41 : * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
42 : * path's result. A caller can estimate the cost of fetching a partial
43 : * result by interpolating between startup_cost and total_cost. In detail:
44 : * actual_cost = startup_cost +
45 : * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
46 : * Note that a base relation's rows count (and, by extension, plan_rows for
47 : * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
48 : * that this equation works properly. (Note: while path->rows is never zero
49 : * for ordinary relations, it is zero for paths for provably-empty relations,
50 : * so beware of division-by-zero.) The LIMIT is applied as a top-level
51 : * plan node.
52 : *
53 : * For largely historical reasons, most of the routines in this module use
54 : * the passed result Path only to store their results (rows, startup_cost and
55 : * total_cost) into. All the input data they need is passed as separate
56 : * parameters, even though much of it could be extracted from the Path.
57 : * An exception is made for the cost_XXXjoin() routines, which expect all
58 : * the other fields of the passed XXXPath to be filled in, and similarly
59 : * cost_index() assumes the passed IndexPath is valid except for its output
60 : * values.
61 : *
62 : *
63 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
64 : * Portions Copyright (c) 1994, Regents of the University of California
65 : *
66 : * IDENTIFICATION
67 : * src/backend/optimizer/path/costsize.c
68 : *
69 : *-------------------------------------------------------------------------
70 : */
71 :
72 : #include "postgres.h"
73 :
74 : #include <limits.h>
75 : #include <math.h>
76 :
77 : #include "access/amapi.h"
78 : #include "access/htup_details.h"
79 : #include "access/tsmapi.h"
80 : #include "executor/executor.h"
81 : #include "executor/nodeAgg.h"
82 : #include "executor/nodeHash.h"
83 : #include "executor/nodeMemoize.h"
84 : #include "miscadmin.h"
85 : #include "nodes/makefuncs.h"
86 : #include "nodes/nodeFuncs.h"
87 : #include "optimizer/clauses.h"
88 : #include "optimizer/cost.h"
89 : #include "optimizer/optimizer.h"
90 : #include "optimizer/pathnode.h"
91 : #include "optimizer/paths.h"
92 : #include "optimizer/placeholder.h"
93 : #include "optimizer/plancat.h"
94 : #include "optimizer/restrictinfo.h"
95 : #include "parser/parsetree.h"
96 : #include "utils/lsyscache.h"
97 : #include "utils/selfuncs.h"
98 : #include "utils/spccache.h"
99 : #include "utils/tuplesort.h"
100 :
101 :
102 : #define LOG2(x) (log(x) / 0.693147180559945)
103 :
104 : /*
105 : * Append and MergeAppend nodes are less expensive than some other operations
106 : * which use cpu_tuple_cost; instead of adding a separate GUC, estimate the
107 : * per-tuple cost as cpu_tuple_cost multiplied by this value.
108 : */
109 : #define APPEND_CPU_COST_MULTIPLIER 0.5
110 :
111 : /*
112 : * Maximum value for row estimates. We cap row estimates to this to help
113 : * ensure that costs based on these estimates remain within the range of what
114 : * double can represent. add_path() wouldn't act sanely given infinite or NaN
115 : * cost values.
116 : */
117 : #define MAXIMUM_ROWCOUNT 1e100
118 :
119 : double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
120 : double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
121 : double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
122 : double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
123 : double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
124 : double parallel_tuple_cost = DEFAULT_PARALLEL_TUPLE_COST;
125 : double parallel_setup_cost = DEFAULT_PARALLEL_SETUP_COST;
126 : double recursive_worktable_factor = DEFAULT_RECURSIVE_WORKTABLE_FACTOR;
127 :
128 : int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
129 :
130 : Cost disable_cost = 1.0e10;
131 :
132 : int max_parallel_workers_per_gather = 2;
133 :
134 : bool enable_seqscan = true;
135 : bool enable_indexscan = true;
136 : bool enable_indexonlyscan = true;
137 : bool enable_bitmapscan = true;
138 : bool enable_tidscan = true;
139 : bool enable_sort = true;
140 : bool enable_incremental_sort = true;
141 : bool enable_hashagg = true;
142 : bool enable_nestloop = true;
143 : bool enable_material = true;
144 : bool enable_memoize = true;
145 : bool enable_mergejoin = true;
146 : bool enable_hashjoin = true;
147 : bool enable_gathermerge = true;
148 : bool enable_partitionwise_join = false;
149 : bool enable_partitionwise_aggregate = false;
150 : bool enable_parallel_append = true;
151 : bool enable_parallel_hash = true;
152 : bool enable_partition_pruning = true;
153 : bool enable_presorted_aggregate = true;
154 : bool enable_async_append = true;
155 :
156 : typedef struct
157 : {
158 : PlannerInfo *root;
159 : QualCost total;
160 : } cost_qual_eval_context;
161 :
162 : static List *extract_nonindex_conditions(List *qual_clauses, List *indexclauses);
163 : static MergeScanSelCache *cached_scansel(PlannerInfo *root,
164 : RestrictInfo *rinfo,
165 : PathKey *pathkey);
166 : static void cost_rescan(PlannerInfo *root, Path *path,
167 : Cost *rescan_startup_cost, Cost *rescan_total_cost);
168 : static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
169 : static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
170 : ParamPathInfo *param_info,
171 : QualCost *qpqual_cost);
172 : static bool has_indexed_join_quals(NestPath *path);
173 : static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
174 : List *quals);
175 : static double calc_joinrel_size_estimate(PlannerInfo *root,
176 : RelOptInfo *joinrel,
177 : RelOptInfo *outer_rel,
178 : RelOptInfo *inner_rel,
179 : double outer_rows,
180 : double inner_rows,
181 : SpecialJoinInfo *sjinfo,
182 : List *restrictlist);
183 : static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root,
184 : Relids outer_relids,
185 : Relids inner_relids,
186 : SpecialJoinInfo *sjinfo,
187 : List **restrictlist);
188 : static Cost append_nonpartial_cost(List *subpaths, int numpaths,
189 : int parallel_workers);
190 : static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
191 : static int32 get_expr_width(PlannerInfo *root, const Node *expr);
192 : static double relation_byte_size(double tuples, int width);
193 : static double page_size(double tuples, int width);
194 : static double get_parallel_divisor(Path *path);
195 :
196 :
197 : /*
198 : * clamp_row_est
199 : * Force a row-count estimate to a sane value.
200 : */
201 : double
202 6739772 : clamp_row_est(double nrows)
203 : {
204 : /*
205 : * Avoid infinite and NaN row estimates. Costs derived from such values
206 : * are going to be useless. Also force the estimate to be at least one
207 : * row, to make explain output look better and to avoid possible
208 : * divide-by-zero when interpolating costs. Make it an integer, too.
209 : */
210 6739772 : if (nrows > MAXIMUM_ROWCOUNT || isnan(nrows))
211 0 : nrows = MAXIMUM_ROWCOUNT;
212 6739772 : else if (nrows <= 1.0)
213 2593444 : nrows = 1.0;
214 : else
215 4146328 : nrows = rint(nrows);
216 :
217 6739772 : return nrows;
218 : }
219 :
220 : /*
221 : * clamp_width_est
222 : * Force a tuple-width estimate to a sane value.
223 : *
224 : * The planner represents datatype width and tuple width estimates as int32.
225 : * When summing column width estimates to create a tuple width estimate,
226 : * it's possible to reach integer overflow in edge cases. To ensure sane
227 : * behavior, we form such sums in int64 arithmetic and then apply this routine
228 : * to clamp to int32 range.
229 : */
230 : int32
231 1655680 : clamp_width_est(int64 tuple_width)
232 : {
233 : /*
234 : * Anything more than MaxAllocSize is clearly bogus, since we could not
235 : * create a tuple that large.
236 : */
237 1655680 : if (tuple_width > MaxAllocSize)
238 0 : return (int32) MaxAllocSize;
239 :
240 : /*
241 : * Unlike clamp_row_est, we just Assert that the value isn't negative,
242 : * rather than masking such errors.
243 : */
244 : Assert(tuple_width >= 0);
245 :
246 1655680 : return (int32) tuple_width;
247 : }
248 :
249 : /*
250 : * clamp_cardinality_to_long
251 : * Cast a Cardinality value to a sane long value.
252 : */
253 : long
254 39892 : clamp_cardinality_to_long(Cardinality x)
255 : {
256 : /*
257 : * Just for paranoia's sake, ensure we do something sane with negative or
258 : * NaN values.
259 : */
260 39892 : if (isnan(x))
261 0 : return LONG_MAX;
262 39892 : if (x <= 0)
263 488 : return 0;
264 :
265 : /*
266 : * If "long" is 64 bits, then LONG_MAX cannot be represented exactly as a
267 : * double. Casting it to double and back may well result in overflow due
268 : * to rounding, so avoid doing that. We trust that any double value that
269 : * compares strictly less than "(double) LONG_MAX" will cast to a
270 : * representable "long" value.
271 : */
272 39404 : return (x < (double) LONG_MAX) ? (long) x : LONG_MAX;
273 : }
274 :
275 :
276 : /*
277 : * cost_seqscan
278 : * Determines and returns the cost of scanning a relation sequentially.
279 : *
280 : * 'baserel' is the relation to be scanned
281 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
282 : */
283 : void
284 375280 : cost_seqscan(Path *path, PlannerInfo *root,
285 : RelOptInfo *baserel, ParamPathInfo *param_info)
286 : {
287 375280 : Cost startup_cost = 0;
288 : Cost cpu_run_cost;
289 : Cost disk_run_cost;
290 : double spc_seq_page_cost;
291 : QualCost qpqual_cost;
292 : Cost cpu_per_tuple;
293 :
294 : /* Should only be applied to base relations */
295 : Assert(baserel->relid > 0);
296 : Assert(baserel->rtekind == RTE_RELATION);
297 :
298 : /* Mark the path with the correct row estimate */
299 375280 : if (param_info)
300 744 : path->rows = param_info->ppi_rows;
301 : else
302 374536 : path->rows = baserel->rows;
303 :
304 375280 : if (!enable_seqscan)
305 15620 : startup_cost += disable_cost;
306 :
307 : /* fetch estimated page cost for tablespace containing table */
308 375280 : get_tablespace_page_costs(baserel->reltablespace,
309 : NULL,
310 : &spc_seq_page_cost);
311 :
312 : /*
313 : * disk costs
314 : */
315 375280 : disk_run_cost = spc_seq_page_cost * baserel->pages;
316 :
317 : /* CPU costs */
318 375280 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
319 :
320 375280 : startup_cost += qpqual_cost.startup;
321 375280 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
322 375280 : cpu_run_cost = cpu_per_tuple * baserel->tuples;
323 : /* tlist eval costs are paid per output row, not per tuple scanned */
324 375280 : startup_cost += path->pathtarget->cost.startup;
325 375280 : cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
326 :
327 : /* Adjust costing for parallelism, if used. */
328 375280 : if (path->parallel_workers > 0)
329 : {
330 26102 : double parallel_divisor = get_parallel_divisor(path);
331 :
332 : /* The CPU cost is divided among all the workers. */
333 26102 : cpu_run_cost /= parallel_divisor;
334 :
335 : /*
336 : * It may be possible to amortize some of the I/O cost, but probably
337 : * not very much, because most operating systems already do aggressive
338 : * prefetching. For now, we assume that the disk run cost can't be
339 : * amortized at all.
340 : */
341 :
342 : /*
343 : * In the case of a parallel plan, the row count needs to represent
344 : * the number of tuples processed per worker.
345 : */
346 26102 : path->rows = clamp_row_est(path->rows / parallel_divisor);
347 : }
348 :
349 375280 : path->startup_cost = startup_cost;
350 375280 : path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
351 375280 : }
352 :
353 : /*
354 : * cost_samplescan
355 : * Determines and returns the cost of scanning a relation using sampling.
356 : *
357 : * 'baserel' is the relation to be scanned
358 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
359 : */
360 : void
361 300 : cost_samplescan(Path *path, PlannerInfo *root,
362 : RelOptInfo *baserel, ParamPathInfo *param_info)
363 : {
364 300 : Cost startup_cost = 0;
365 300 : Cost run_cost = 0;
366 : RangeTblEntry *rte;
367 : TableSampleClause *tsc;
368 : TsmRoutine *tsm;
369 : double spc_seq_page_cost,
370 : spc_random_page_cost,
371 : spc_page_cost;
372 : QualCost qpqual_cost;
373 : Cost cpu_per_tuple;
374 :
375 : /* Should only be applied to base relations with tablesample clauses */
376 : Assert(baserel->relid > 0);
377 300 : rte = planner_rt_fetch(baserel->relid, root);
378 : Assert(rte->rtekind == RTE_RELATION);
379 300 : tsc = rte->tablesample;
380 : Assert(tsc != NULL);
381 300 : tsm = GetTsmRoutine(tsc->tsmhandler);
382 :
383 : /* Mark the path with the correct row estimate */
384 300 : if (param_info)
385 66 : path->rows = param_info->ppi_rows;
386 : else
387 234 : path->rows = baserel->rows;
388 :
389 : /* fetch estimated page cost for tablespace containing table */
390 300 : get_tablespace_page_costs(baserel->reltablespace,
391 : &spc_random_page_cost,
392 : &spc_seq_page_cost);
393 :
394 : /* if NextSampleBlock is used, assume random access, else sequential */
395 600 : spc_page_cost = (tsm->NextSampleBlock != NULL) ?
396 300 : spc_random_page_cost : spc_seq_page_cost;
397 :
398 : /*
399 : * disk costs (recall that baserel->pages has already been set to the
400 : * number of pages the sampling method will visit)
401 : */
402 300 : run_cost += spc_page_cost * baserel->pages;
403 :
404 : /*
405 : * CPU costs (recall that baserel->tuples has already been set to the
406 : * number of tuples the sampling method will select). Note that we ignore
407 : * execution cost of the TABLESAMPLE parameter expressions; they will be
408 : * evaluated only once per scan, and in most usages they'll likely be
409 : * simple constants anyway. We also don't charge anything for the
410 : * calculations the sampling method might do internally.
411 : */
412 300 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
413 :
414 300 : startup_cost += qpqual_cost.startup;
415 300 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
416 300 : run_cost += cpu_per_tuple * baserel->tuples;
417 : /* tlist eval costs are paid per output row, not per tuple scanned */
418 300 : startup_cost += path->pathtarget->cost.startup;
419 300 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
420 :
421 300 : path->startup_cost = startup_cost;
422 300 : path->total_cost = startup_cost + run_cost;
423 300 : }
424 :
425 : /*
426 : * cost_gather
427 : * Determines and returns the cost of gather path.
428 : *
429 : * 'rel' is the relation to be operated upon
430 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
431 : * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
432 : * both 'rel' and 'param_info'. This is useful when the path doesn't exactly
433 : * correspond to any particular RelOptInfo.
434 : */
435 : void
436 16144 : cost_gather(GatherPath *path, PlannerInfo *root,
437 : RelOptInfo *rel, ParamPathInfo *param_info,
438 : double *rows)
439 : {
440 16144 : Cost startup_cost = 0;
441 16144 : Cost run_cost = 0;
442 :
443 : /* Mark the path with the correct row estimate */
444 16144 : if (rows)
445 1740 : path->path.rows = *rows;
446 14404 : else if (param_info)
447 0 : path->path.rows = param_info->ppi_rows;
448 : else
449 14404 : path->path.rows = rel->rows;
450 :
451 16144 : startup_cost = path->subpath->startup_cost;
452 :
453 16144 : run_cost = path->subpath->total_cost - path->subpath->startup_cost;
454 :
455 : /* Parallel setup and communication cost. */
456 16144 : startup_cost += parallel_setup_cost;
457 16144 : run_cost += parallel_tuple_cost * path->path.rows;
458 :
459 16144 : path->path.startup_cost = startup_cost;
460 16144 : path->path.total_cost = (startup_cost + run_cost);
461 16144 : }
462 :
463 : /*
464 : * cost_gather_merge
465 : * Determines and returns the cost of gather merge path.
466 : *
467 : * GatherMerge merges several pre-sorted input streams, using a heap that at
468 : * any given instant holds the next tuple from each stream. If there are N
469 : * streams, we need about N*log2(N) tuple comparisons to construct the heap at
470 : * startup, and then for each output tuple, about log2(N) comparisons to
471 : * replace the top heap entry with the next tuple from the same stream.
472 : */
473 : void
474 9806 : cost_gather_merge(GatherMergePath *path, PlannerInfo *root,
475 : RelOptInfo *rel, ParamPathInfo *param_info,
476 : Cost input_startup_cost, Cost input_total_cost,
477 : double *rows)
478 : {
479 9806 : Cost startup_cost = 0;
480 9806 : Cost run_cost = 0;
481 : Cost comparison_cost;
482 : double N;
483 : double logN;
484 :
485 : /* Mark the path with the correct row estimate */
486 9806 : if (rows)
487 4486 : path->path.rows = *rows;
488 5320 : else if (param_info)
489 0 : path->path.rows = param_info->ppi_rows;
490 : else
491 5320 : path->path.rows = rel->rows;
492 :
493 9806 : if (!enable_gathermerge)
494 0 : startup_cost += disable_cost;
495 :
496 : /*
497 : * Add one to the number of workers to account for the leader. This might
498 : * be overgenerous since the leader will do less work than other workers
499 : * in typical cases, but we'll go with it for now.
500 : */
501 : Assert(path->num_workers > 0);
502 9806 : N = (double) path->num_workers + 1;
503 9806 : logN = LOG2(N);
504 :
505 : /* Assumed cost per tuple comparison */
506 9806 : comparison_cost = 2.0 * cpu_operator_cost;
507 :
508 : /* Heap creation cost */
509 9806 : startup_cost += comparison_cost * N * logN;
510 :
511 : /* Per-tuple heap maintenance cost */
512 9806 : run_cost += path->path.rows * comparison_cost * logN;
513 :
514 : /* small cost for heap management, like cost_merge_append */
515 9806 : run_cost += cpu_operator_cost * path->path.rows;
516 :
517 : /*
518 : * Parallel setup and communication cost. Since Gather Merge, unlike
519 : * Gather, requires us to block until a tuple is available from every
520 : * worker, we bump the IPC cost up a little bit as compared with Gather.
521 : * For lack of a better idea, charge an extra 5%.
522 : */
523 9806 : startup_cost += parallel_setup_cost;
524 9806 : run_cost += parallel_tuple_cost * path->path.rows * 1.05;
525 :
526 9806 : path->path.startup_cost = startup_cost + input_startup_cost;
527 9806 : path->path.total_cost = (startup_cost + run_cost + input_total_cost);
528 9806 : }
529 :
530 : /*
531 : * cost_index
532 : * Determines and returns the cost of scanning a relation using an index.
533 : *
534 : * 'path' describes the indexscan under consideration, and is complete
535 : * except for the fields to be set by this routine
536 : * 'loop_count' is the number of repetitions of the indexscan to factor into
537 : * estimates of caching behavior
538 : *
539 : * In addition to rows, startup_cost and total_cost, cost_index() sets the
540 : * path's indextotalcost and indexselectivity fields. These values will be
541 : * needed if the IndexPath is used in a BitmapIndexScan.
542 : *
543 : * NOTE: path->indexquals must contain only clauses usable as index
544 : * restrictions. Any additional quals evaluated as qpquals may reduce the
545 : * number of returned tuples, but they won't reduce the number of tuples
546 : * we have to fetch from the table, so they don't reduce the scan cost.
547 : */
548 : void
549 650284 : cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
550 : bool partial_path)
551 : {
552 650284 : IndexOptInfo *index = path->indexinfo;
553 650284 : RelOptInfo *baserel = index->rel;
554 650284 : bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
555 : amcostestimate_function amcostestimate;
556 : List *qpquals;
557 650284 : Cost startup_cost = 0;
558 650284 : Cost run_cost = 0;
559 650284 : Cost cpu_run_cost = 0;
560 : Cost indexStartupCost;
561 : Cost indexTotalCost;
562 : Selectivity indexSelectivity;
563 : double indexCorrelation,
564 : csquared;
565 : double spc_seq_page_cost,
566 : spc_random_page_cost;
567 : Cost min_IO_cost,
568 : max_IO_cost;
569 : QualCost qpqual_cost;
570 : Cost cpu_per_tuple;
571 : double tuples_fetched;
572 : double pages_fetched;
573 : double rand_heap_pages;
574 : double index_pages;
575 :
576 : /* Should only be applied to base relations */
577 : Assert(IsA(baserel, RelOptInfo) &&
578 : IsA(index, IndexOptInfo));
579 : Assert(baserel->relid > 0);
580 : Assert(baserel->rtekind == RTE_RELATION);
581 :
582 : /*
583 : * Mark the path with the correct row estimate, and identify which quals
584 : * will need to be enforced as qpquals. We need not check any quals that
585 : * are implied by the index's predicate, so we can use indrestrictinfo not
586 : * baserestrictinfo as the list of relevant restriction clauses for the
587 : * rel.
588 : */
589 650284 : if (path->path.param_info)
590 : {
591 117156 : path->path.rows = path->path.param_info->ppi_rows;
592 : /* qpquals come from the rel's restriction clauses and ppi_clauses */
593 117156 : qpquals = list_concat(extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
594 : path->indexclauses),
595 117156 : extract_nonindex_conditions(path->path.param_info->ppi_clauses,
596 : path->indexclauses));
597 : }
598 : else
599 : {
600 533128 : path->path.rows = baserel->rows;
601 : /* qpquals come from just the rel's restriction clauses */
602 533128 : qpquals = extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
603 : path->indexclauses);
604 : }
605 :
606 650284 : if (!enable_indexscan)
607 3944 : startup_cost += disable_cost;
608 : /* we don't need to check enable_indexonlyscan; indxpath.c does that */
609 :
610 : /*
611 : * Call index-access-method-specific code to estimate the processing cost
612 : * for scanning the index, as well as the selectivity of the index (ie,
613 : * the fraction of main-table tuples we will have to retrieve) and its
614 : * correlation to the main-table tuple order. We need a cast here because
615 : * pathnodes.h uses a weak function type to avoid including amapi.h.
616 : */
617 650284 : amcostestimate = (amcostestimate_function) index->amcostestimate;
618 650284 : amcostestimate(root, path, loop_count,
619 : &indexStartupCost, &indexTotalCost,
620 : &indexSelectivity, &indexCorrelation,
621 : &index_pages);
622 :
623 : /*
624 : * Save amcostestimate's results for possible use in bitmap scan planning.
625 : * We don't bother to save indexStartupCost or indexCorrelation, because a
626 : * bitmap scan doesn't care about either.
627 : */
628 650284 : path->indextotalcost = indexTotalCost;
629 650284 : path->indexselectivity = indexSelectivity;
630 :
631 : /* all costs for touching index itself included here */
632 650284 : startup_cost += indexStartupCost;
633 650284 : run_cost += indexTotalCost - indexStartupCost;
634 :
635 : /* estimate number of main-table tuples fetched */
636 650284 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
637 :
638 : /* fetch estimated page costs for tablespace containing table */
639 650284 : get_tablespace_page_costs(baserel->reltablespace,
640 : &spc_random_page_cost,
641 : &spc_seq_page_cost);
642 :
643 : /*----------
644 : * Estimate number of main-table pages fetched, and compute I/O cost.
645 : *
646 : * When the index ordering is uncorrelated with the table ordering,
647 : * we use an approximation proposed by Mackert and Lohman (see
648 : * index_pages_fetched() for details) to compute the number of pages
649 : * fetched, and then charge spc_random_page_cost per page fetched.
650 : *
651 : * When the index ordering is exactly correlated with the table ordering
652 : * (just after a CLUSTER, for example), the number of pages fetched should
653 : * be exactly selectivity * table_size. What's more, all but the first
654 : * will be sequential fetches, not the random fetches that occur in the
655 : * uncorrelated case. So if the number of pages is more than 1, we
656 : * ought to charge
657 : * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
658 : * For partially-correlated indexes, we ought to charge somewhere between
659 : * these two estimates. We currently interpolate linearly between the
660 : * estimates based on the correlation squared (XXX is that appropriate?).
661 : *
662 : * If it's an index-only scan, then we will not need to fetch any heap
663 : * pages for which the visibility map shows all tuples are visible.
664 : * Hence, reduce the estimated number of heap fetches accordingly.
665 : * We use the measured fraction of the entire heap that is all-visible,
666 : * which might not be particularly relevant to the subset of the heap
667 : * that this query will fetch; but it's not clear how to do better.
668 : *----------
669 : */
670 650284 : if (loop_count > 1)
671 : {
672 : /*
673 : * For repeated indexscans, the appropriate estimate for the
674 : * uncorrelated case is to scale up the number of tuples fetched in
675 : * the Mackert and Lohman formula by the number of scans, so that we
676 : * estimate the number of pages fetched by all the scans; then
677 : * pro-rate the costs for one scan. In this case we assume all the
678 : * fetches are random accesses.
679 : */
680 63990 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
681 : baserel->pages,
682 63990 : (double) index->pages,
683 : root);
684 :
685 63990 : if (indexonly)
686 8078 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
687 :
688 63990 : rand_heap_pages = pages_fetched;
689 :
690 63990 : max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
691 :
692 : /*
693 : * In the perfectly correlated case, the number of pages touched by
694 : * each scan is selectivity * table_size, and we can use the Mackert
695 : * and Lohman formula at the page level to estimate how much work is
696 : * saved by caching across scans. We still assume all the fetches are
697 : * random, though, which is an overestimate that's hard to correct for
698 : * without double-counting the cache effects. (But in most cases
699 : * where such a plan is actually interesting, only one page would get
700 : * fetched per scan anyway, so it shouldn't matter much.)
701 : */
702 63990 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
703 :
704 63990 : pages_fetched = index_pages_fetched(pages_fetched * loop_count,
705 : baserel->pages,
706 63990 : (double) index->pages,
707 : root);
708 :
709 63990 : if (indexonly)
710 8078 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
711 :
712 63990 : min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
713 : }
714 : else
715 : {
716 : /*
717 : * Normal case: apply the Mackert and Lohman formula, and then
718 : * interpolate between that and the correlation-derived result.
719 : */
720 586294 : pages_fetched = index_pages_fetched(tuples_fetched,
721 : baserel->pages,
722 586294 : (double) index->pages,
723 : root);
724 :
725 586294 : if (indexonly)
726 58174 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
727 :
728 586294 : rand_heap_pages = pages_fetched;
729 :
730 : /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
731 586294 : max_IO_cost = pages_fetched * spc_random_page_cost;
732 :
733 : /* min_IO_cost is for the perfectly correlated case (csquared=1) */
734 586294 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
735 :
736 586294 : if (indexonly)
737 58174 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
738 :
739 586294 : if (pages_fetched > 0)
740 : {
741 531982 : min_IO_cost = spc_random_page_cost;
742 531982 : if (pages_fetched > 1)
743 146694 : min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
744 : }
745 : else
746 54312 : min_IO_cost = 0;
747 : }
748 :
749 650284 : if (partial_path)
750 : {
751 : /*
752 : * For index only scans compute workers based on number of index pages
753 : * fetched; the number of heap pages we fetch might be so small as to
754 : * effectively rule out parallelism, which we don't want to do.
755 : */
756 223996 : if (indexonly)
757 21094 : rand_heap_pages = -1;
758 :
759 : /*
760 : * Estimate the number of parallel workers required to scan index. Use
761 : * the number of heap pages computed considering heap fetches won't be
762 : * sequential as for parallel scans the pages are accessed in random
763 : * order.
764 : */
765 223996 : path->path.parallel_workers = compute_parallel_worker(baserel,
766 : rand_heap_pages,
767 : index_pages,
768 : max_parallel_workers_per_gather);
769 :
770 : /*
771 : * Fall out if workers can't be assigned for parallel scan, because in
772 : * such a case this path will be rejected. So there is no benefit in
773 : * doing extra computation.
774 : */
775 223996 : if (path->path.parallel_workers <= 0)
776 214194 : return;
777 :
778 9802 : path->path.parallel_aware = true;
779 : }
780 :
781 : /*
782 : * Now interpolate based on estimated index order correlation to get total
783 : * disk I/O cost for main table accesses.
784 : */
785 436090 : csquared = indexCorrelation * indexCorrelation;
786 :
787 436090 : run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
788 :
789 : /*
790 : * Estimate CPU costs per tuple.
791 : *
792 : * What we want here is cpu_tuple_cost plus the evaluation costs of any
793 : * qual clauses that we have to evaluate as qpquals.
794 : */
795 436090 : cost_qual_eval(&qpqual_cost, qpquals, root);
796 :
797 436090 : startup_cost += qpqual_cost.startup;
798 436090 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
799 :
800 436090 : cpu_run_cost += cpu_per_tuple * tuples_fetched;
801 :
802 : /* tlist eval costs are paid per output row, not per tuple scanned */
803 436090 : startup_cost += path->path.pathtarget->cost.startup;
804 436090 : cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
805 :
806 : /* Adjust costing for parallelism, if used. */
807 436090 : if (path->path.parallel_workers > 0)
808 : {
809 9802 : double parallel_divisor = get_parallel_divisor(&path->path);
810 :
811 9802 : path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
812 :
813 : /* The CPU cost is divided among all the workers. */
814 9802 : cpu_run_cost /= parallel_divisor;
815 : }
816 :
817 436090 : run_cost += cpu_run_cost;
818 :
819 436090 : path->path.startup_cost = startup_cost;
820 436090 : path->path.total_cost = startup_cost + run_cost;
821 : }
822 :
823 : /*
824 : * extract_nonindex_conditions
825 : *
826 : * Given a list of quals to be enforced in an indexscan, extract the ones that
827 : * will have to be applied as qpquals (ie, the index machinery won't handle
828 : * them). Here we detect only whether a qual clause is directly redundant
829 : * with some indexclause. If the index path is chosen for use, createplan.c
830 : * will try a bit harder to get rid of redundant qual conditions; specifically
831 : * it will see if quals can be proven to be implied by the indexquals. But
832 : * it does not seem worth the cycles to try to factor that in at this stage,
833 : * since we're only trying to estimate qual eval costs. Otherwise this must
834 : * match the logic in create_indexscan_plan().
835 : *
836 : * qual_clauses, and the result, are lists of RestrictInfos.
837 : * indexclauses is a list of IndexClauses.
838 : */
839 : static List *
840 767440 : extract_nonindex_conditions(List *qual_clauses, List *indexclauses)
841 : {
842 767440 : List *result = NIL;
843 : ListCell *lc;
844 :
845 1580148 : foreach(lc, qual_clauses)
846 : {
847 812708 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
848 :
849 812708 : if (rinfo->pseudoconstant)
850 8582 : continue; /* we may drop pseudoconstants here */
851 804126 : if (is_redundant_with_indexclauses(rinfo, indexclauses))
852 475740 : continue; /* dup or derived from same EquivalenceClass */
853 : /* ... skip the predicate proof attempt createplan.c will try ... */
854 328386 : result = lappend(result, rinfo);
855 : }
856 767440 : return result;
857 : }
858 :
859 : /*
860 : * index_pages_fetched
861 : * Estimate the number of pages actually fetched after accounting for
862 : * cache effects.
863 : *
864 : * We use an approximation proposed by Mackert and Lohman, "Index Scans
865 : * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
866 : * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
867 : * The Mackert and Lohman approximation is that the number of pages
868 : * fetched is
869 : * PF =
870 : * min(2TNs/(2T+Ns), T) when T <= b
871 : * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
872 : * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
873 : * where
874 : * T = # pages in table
875 : * N = # tuples in table
876 : * s = selectivity = fraction of table to be scanned
877 : * b = # buffer pages available (we include kernel space here)
878 : *
879 : * We assume that effective_cache_size is the total number of buffer pages
880 : * available for the whole query, and pro-rate that space across all the
881 : * tables in the query and the index currently under consideration. (This
882 : * ignores space needed for other indexes used by the query, but since we
883 : * don't know which indexes will get used, we can't estimate that very well;
884 : * and in any case counting all the tables may well be an overestimate, since
885 : * depending on the join plan not all the tables may be scanned concurrently.)
886 : *
887 : * The product Ns is the number of tuples fetched; we pass in that
888 : * product rather than calculating it here. "pages" is the number of pages
889 : * in the object under consideration (either an index or a table).
890 : * "index_pages" is the amount to add to the total table space, which was
891 : * computed for us by make_one_rel.
892 : *
893 : * Caller is expected to have ensured that tuples_fetched is greater than zero
894 : * and rounded to integer (see clamp_row_est). The result will likewise be
895 : * greater than zero and integral.
896 : */
897 : double
898 887292 : index_pages_fetched(double tuples_fetched, BlockNumber pages,
899 : double index_pages, PlannerInfo *root)
900 : {
901 : double pages_fetched;
902 : double total_pages;
903 : double T,
904 : b;
905 :
906 : /* T is # pages in table, but don't allow it to be zero */
907 887292 : T = (pages > 1) ? (double) pages : 1.0;
908 :
909 : /* Compute number of pages assumed to be competing for cache space */
910 887292 : total_pages = root->total_table_pages + index_pages;
911 887292 : total_pages = Max(total_pages, 1.0);
912 : Assert(T <= total_pages);
913 :
914 : /* b is pro-rated share of effective_cache_size */
915 887292 : b = (double) effective_cache_size * T / total_pages;
916 :
917 : /* force it positive and integral */
918 887292 : if (b <= 1.0)
919 0 : b = 1.0;
920 : else
921 887292 : b = ceil(b);
922 :
923 : /* This part is the Mackert and Lohman formula */
924 887292 : if (T <= b)
925 : {
926 887292 : pages_fetched =
927 887292 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
928 887292 : if (pages_fetched >= T)
929 499362 : pages_fetched = T;
930 : else
931 387930 : pages_fetched = ceil(pages_fetched);
932 : }
933 : else
934 : {
935 : double lim;
936 :
937 0 : lim = (2.0 * T * b) / (2.0 * T - b);
938 0 : if (tuples_fetched <= lim)
939 : {
940 0 : pages_fetched =
941 0 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
942 : }
943 : else
944 : {
945 0 : pages_fetched =
946 0 : b + (tuples_fetched - lim) * (T - b) / T;
947 : }
948 0 : pages_fetched = ceil(pages_fetched);
949 : }
950 887292 : return pages_fetched;
951 : }
952 :
953 : /*
954 : * get_indexpath_pages
955 : * Determine the total size of the indexes used in a bitmap index path.
956 : *
957 : * Note: if the same index is used more than once in a bitmap tree, we will
958 : * count it multiple times, which perhaps is the wrong thing ... but it's
959 : * not completely clear, and detecting duplicates is difficult, so ignore it
960 : * for now.
961 : */
962 : static double
963 140964 : get_indexpath_pages(Path *bitmapqual)
964 : {
965 140964 : double result = 0;
966 : ListCell *l;
967 :
968 140964 : if (IsA(bitmapqual, BitmapAndPath))
969 : {
970 17076 : BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
971 :
972 51228 : foreach(l, apath->bitmapquals)
973 : {
974 34152 : result += get_indexpath_pages((Path *) lfirst(l));
975 : }
976 : }
977 123888 : else if (IsA(bitmapqual, BitmapOrPath))
978 : {
979 66 : BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
980 :
981 198 : foreach(l, opath->bitmapquals)
982 : {
983 132 : result += get_indexpath_pages((Path *) lfirst(l));
984 : }
985 : }
986 123822 : else if (IsA(bitmapqual, IndexPath))
987 : {
988 123822 : IndexPath *ipath = (IndexPath *) bitmapqual;
989 :
990 123822 : result = (double) ipath->indexinfo->pages;
991 : }
992 : else
993 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
994 :
995 140964 : return result;
996 : }
997 :
998 : /*
999 : * cost_bitmap_heap_scan
1000 : * Determines and returns the cost of scanning a relation using a bitmap
1001 : * index-then-heap plan.
1002 : *
1003 : * 'baserel' is the relation to be scanned
1004 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1005 : * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
1006 : * 'loop_count' is the number of repetitions of the indexscan to factor into
1007 : * estimates of caching behavior
1008 : *
1009 : * Note: the component IndexPaths in bitmapqual should have been costed
1010 : * using the same loop_count.
1011 : */
1012 : void
1013 431960 : cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
1014 : ParamPathInfo *param_info,
1015 : Path *bitmapqual, double loop_count)
1016 : {
1017 431960 : Cost startup_cost = 0;
1018 431960 : Cost run_cost = 0;
1019 : Cost indexTotalCost;
1020 : QualCost qpqual_cost;
1021 : Cost cpu_per_tuple;
1022 : Cost cost_per_page;
1023 : Cost cpu_run_cost;
1024 : double tuples_fetched;
1025 : double pages_fetched;
1026 : double spc_seq_page_cost,
1027 : spc_random_page_cost;
1028 : double T;
1029 :
1030 : /* Should only be applied to base relations */
1031 : Assert(IsA(baserel, RelOptInfo));
1032 : Assert(baserel->relid > 0);
1033 : Assert(baserel->rtekind == RTE_RELATION);
1034 :
1035 : /* Mark the path with the correct row estimate */
1036 431960 : if (param_info)
1037 177306 : path->rows = param_info->ppi_rows;
1038 : else
1039 254654 : path->rows = baserel->rows;
1040 :
1041 431960 : if (!enable_bitmapscan)
1042 9474 : startup_cost += disable_cost;
1043 :
1044 431960 : pages_fetched = compute_bitmap_pages(root, baserel, bitmapqual,
1045 : loop_count, &indexTotalCost,
1046 : &tuples_fetched);
1047 :
1048 431960 : startup_cost += indexTotalCost;
1049 431960 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
1050 :
1051 : /* Fetch estimated page costs for tablespace containing table. */
1052 431960 : get_tablespace_page_costs(baserel->reltablespace,
1053 : &spc_random_page_cost,
1054 : &spc_seq_page_cost);
1055 :
1056 : /*
1057 : * For small numbers of pages we should charge spc_random_page_cost
1058 : * apiece, while if nearly all the table's pages are being read, it's more
1059 : * appropriate to charge spc_seq_page_cost apiece. The effect is
1060 : * nonlinear, too. For lack of a better idea, interpolate like this to
1061 : * determine the cost per page.
1062 : */
1063 431960 : if (pages_fetched >= 2.0)
1064 85498 : cost_per_page = spc_random_page_cost -
1065 85498 : (spc_random_page_cost - spc_seq_page_cost)
1066 85498 : * sqrt(pages_fetched / T);
1067 : else
1068 346462 : cost_per_page = spc_random_page_cost;
1069 :
1070 431960 : run_cost += pages_fetched * cost_per_page;
1071 :
1072 : /*
1073 : * Estimate CPU costs per tuple.
1074 : *
1075 : * Often the indexquals don't need to be rechecked at each tuple ... but
1076 : * not always, especially not if there are enough tuples involved that the
1077 : * bitmaps become lossy. For the moment, just assume they will be
1078 : * rechecked always. This means we charge the full freight for all the
1079 : * scan clauses.
1080 : */
1081 431960 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1082 :
1083 431960 : startup_cost += qpqual_cost.startup;
1084 431960 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1085 431960 : cpu_run_cost = cpu_per_tuple * tuples_fetched;
1086 :
1087 : /* Adjust costing for parallelism, if used. */
1088 431960 : if (path->parallel_workers > 0)
1089 : {
1090 4172 : double parallel_divisor = get_parallel_divisor(path);
1091 :
1092 : /* The CPU cost is divided among all the workers. */
1093 4172 : cpu_run_cost /= parallel_divisor;
1094 :
1095 4172 : path->rows = clamp_row_est(path->rows / parallel_divisor);
1096 : }
1097 :
1098 :
1099 431960 : run_cost += cpu_run_cost;
1100 :
1101 : /* tlist eval costs are paid per output row, not per tuple scanned */
1102 431960 : startup_cost += path->pathtarget->cost.startup;
1103 431960 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1104 :
1105 431960 : path->startup_cost = startup_cost;
1106 431960 : path->total_cost = startup_cost + run_cost;
1107 431960 : }
1108 :
1109 : /*
1110 : * cost_bitmap_tree_node
1111 : * Extract cost and selectivity from a bitmap tree node (index/and/or)
1112 : */
1113 : void
1114 791780 : cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
1115 : {
1116 791780 : if (IsA(path, IndexPath))
1117 : {
1118 754206 : *cost = ((IndexPath *) path)->indextotalcost;
1119 754206 : *selec = ((IndexPath *) path)->indexselectivity;
1120 :
1121 : /*
1122 : * Charge a small amount per retrieved tuple to reflect the costs of
1123 : * manipulating the bitmap. This is mostly to make sure that a bitmap
1124 : * scan doesn't look to be the same cost as an indexscan to retrieve a
1125 : * single tuple.
1126 : */
1127 754206 : *cost += 0.1 * cpu_operator_cost * path->rows;
1128 : }
1129 37574 : else if (IsA(path, BitmapAndPath))
1130 : {
1131 34228 : *cost = path->total_cost;
1132 34228 : *selec = ((BitmapAndPath *) path)->bitmapselectivity;
1133 : }
1134 3346 : else if (IsA(path, BitmapOrPath))
1135 : {
1136 3346 : *cost = path->total_cost;
1137 3346 : *selec = ((BitmapOrPath *) path)->bitmapselectivity;
1138 : }
1139 : else
1140 : {
1141 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(path));
1142 : *cost = *selec = 0; /* keep compiler quiet */
1143 : }
1144 791780 : }
1145 :
1146 : /*
1147 : * cost_bitmap_and_node
1148 : * Estimate the cost of a BitmapAnd node
1149 : *
1150 : * Note that this considers only the costs of index scanning and bitmap
1151 : * creation, not the eventual heap access. In that sense the object isn't
1152 : * truly a Path, but it has enough path-like properties (costs in particular)
1153 : * to warrant treating it as one. We don't bother to set the path rows field,
1154 : * however.
1155 : */
1156 : void
1157 34148 : cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
1158 : {
1159 : Cost totalCost;
1160 : Selectivity selec;
1161 : ListCell *l;
1162 :
1163 : /*
1164 : * We estimate AND selectivity on the assumption that the inputs are
1165 : * independent. This is probably often wrong, but we don't have the info
1166 : * to do better.
1167 : *
1168 : * The runtime cost of the BitmapAnd itself is estimated at 100x
1169 : * cpu_operator_cost for each tbm_intersect needed. Probably too small,
1170 : * definitely too simplistic?
1171 : */
1172 34148 : totalCost = 0.0;
1173 34148 : selec = 1.0;
1174 102444 : foreach(l, path->bitmapquals)
1175 : {
1176 68296 : Path *subpath = (Path *) lfirst(l);
1177 : Cost subCost;
1178 : Selectivity subselec;
1179 :
1180 68296 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1181 :
1182 68296 : selec *= subselec;
1183 :
1184 68296 : totalCost += subCost;
1185 68296 : if (l != list_head(path->bitmapquals))
1186 34148 : totalCost += 100.0 * cpu_operator_cost;
1187 : }
1188 34148 : path->bitmapselectivity = selec;
1189 34148 : path->path.rows = 0; /* per above, not used */
1190 34148 : path->path.startup_cost = totalCost;
1191 34148 : path->path.total_cost = totalCost;
1192 34148 : }
1193 :
1194 : /*
1195 : * cost_bitmap_or_node
1196 : * Estimate the cost of a BitmapOr node
1197 : *
1198 : * See comments for cost_bitmap_and_node.
1199 : */
1200 : void
1201 960 : cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
1202 : {
1203 : Cost totalCost;
1204 : Selectivity selec;
1205 : ListCell *l;
1206 :
1207 : /*
1208 : * We estimate OR selectivity on the assumption that the inputs are
1209 : * non-overlapping, since that's often the case in "x IN (list)" type
1210 : * situations. Of course, we clamp to 1.0 at the end.
1211 : *
1212 : * The runtime cost of the BitmapOr itself is estimated at 100x
1213 : * cpu_operator_cost for each tbm_union needed. Probably too small,
1214 : * definitely too simplistic? We are aware that the tbm_unions are
1215 : * optimized out when the inputs are BitmapIndexScans.
1216 : */
1217 960 : totalCost = 0.0;
1218 960 : selec = 0.0;
1219 2940 : foreach(l, path->bitmapquals)
1220 : {
1221 1980 : Path *subpath = (Path *) lfirst(l);
1222 : Cost subCost;
1223 : Selectivity subselec;
1224 :
1225 1980 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1226 :
1227 1980 : selec += subselec;
1228 :
1229 1980 : totalCost += subCost;
1230 1980 : if (l != list_head(path->bitmapquals) &&
1231 1020 : !IsA(subpath, IndexPath))
1232 30 : totalCost += 100.0 * cpu_operator_cost;
1233 : }
1234 960 : path->bitmapselectivity = Min(selec, 1.0);
1235 960 : path->path.rows = 0; /* per above, not used */
1236 960 : path->path.startup_cost = totalCost;
1237 960 : path->path.total_cost = totalCost;
1238 960 : }
1239 :
1240 : /*
1241 : * cost_tidscan
1242 : * Determines and returns the cost of scanning a relation using TIDs.
1243 : *
1244 : * 'baserel' is the relation to be scanned
1245 : * 'tidquals' is the list of TID-checkable quals
1246 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1247 : */
1248 : void
1249 756 : cost_tidscan(Path *path, PlannerInfo *root,
1250 : RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
1251 : {
1252 756 : Cost startup_cost = 0;
1253 756 : Cost run_cost = 0;
1254 756 : bool isCurrentOf = false;
1255 : QualCost qpqual_cost;
1256 : Cost cpu_per_tuple;
1257 : QualCost tid_qual_cost;
1258 : double ntuples;
1259 : ListCell *l;
1260 : double spc_random_page_cost;
1261 :
1262 : /* Should only be applied to base relations */
1263 : Assert(baserel->relid > 0);
1264 : Assert(baserel->rtekind == RTE_RELATION);
1265 :
1266 : /* Mark the path with the correct row estimate */
1267 756 : if (param_info)
1268 144 : path->rows = param_info->ppi_rows;
1269 : else
1270 612 : path->rows = baserel->rows;
1271 :
1272 : /* Count how many tuples we expect to retrieve */
1273 756 : ntuples = 0;
1274 1536 : foreach(l, tidquals)
1275 : {
1276 780 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
1277 780 : Expr *qual = rinfo->clause;
1278 :
1279 780 : if (IsA(qual, ScalarArrayOpExpr))
1280 : {
1281 : /* Each element of the array yields 1 tuple */
1282 30 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) qual;
1283 30 : Node *arraynode = (Node *) lsecond(saop->args);
1284 :
1285 30 : ntuples += estimate_array_length(root, arraynode);
1286 : }
1287 750 : else if (IsA(qual, CurrentOfExpr))
1288 : {
1289 : /* CURRENT OF yields 1 tuple */
1290 392 : isCurrentOf = true;
1291 392 : ntuples++;
1292 : }
1293 : else
1294 : {
1295 : /* It's just CTID = something, count 1 tuple */
1296 358 : ntuples++;
1297 : }
1298 : }
1299 :
1300 : /*
1301 : * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
1302 : * understands how to do it correctly. Therefore, honor enable_tidscan
1303 : * only when CURRENT OF isn't present. Also note that cost_qual_eval
1304 : * counts a CurrentOfExpr as having startup cost disable_cost, which we
1305 : * subtract off here; that's to prevent other plan types such as seqscan
1306 : * from winning.
1307 : */
1308 756 : if (isCurrentOf)
1309 : {
1310 : Assert(baserel->baserestrictcost.startup >= disable_cost);
1311 392 : startup_cost -= disable_cost;
1312 : }
1313 364 : else if (!enable_tidscan)
1314 0 : startup_cost += disable_cost;
1315 :
1316 : /*
1317 : * The TID qual expressions will be computed once, any other baserestrict
1318 : * quals once per retrieved tuple.
1319 : */
1320 756 : cost_qual_eval(&tid_qual_cost, tidquals, root);
1321 :
1322 : /* fetch estimated page cost for tablespace containing table */
1323 756 : get_tablespace_page_costs(baserel->reltablespace,
1324 : &spc_random_page_cost,
1325 : NULL);
1326 :
1327 : /* disk costs --- assume each tuple on a different page */
1328 756 : run_cost += spc_random_page_cost * ntuples;
1329 :
1330 : /* Add scanning CPU costs */
1331 756 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1332 :
1333 : /* XXX currently we assume TID quals are a subset of qpquals */
1334 756 : startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1335 756 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1336 756 : tid_qual_cost.per_tuple;
1337 756 : run_cost += cpu_per_tuple * ntuples;
1338 :
1339 : /* tlist eval costs are paid per output row, not per tuple scanned */
1340 756 : startup_cost += path->pathtarget->cost.startup;
1341 756 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1342 :
1343 756 : path->startup_cost = startup_cost;
1344 756 : path->total_cost = startup_cost + run_cost;
1345 756 : }
1346 :
1347 : /*
1348 : * cost_tidrangescan
1349 : * Determines and sets the costs of scanning a relation using a range of
1350 : * TIDs for 'path'
1351 : *
1352 : * 'baserel' is the relation to be scanned
1353 : * 'tidrangequals' is the list of TID-checkable range quals
1354 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1355 : */
1356 : void
1357 202 : cost_tidrangescan(Path *path, PlannerInfo *root,
1358 : RelOptInfo *baserel, List *tidrangequals,
1359 : ParamPathInfo *param_info)
1360 : {
1361 : Selectivity selectivity;
1362 : double pages;
1363 202 : Cost startup_cost = 0;
1364 202 : Cost run_cost = 0;
1365 : QualCost qpqual_cost;
1366 : Cost cpu_per_tuple;
1367 : QualCost tid_qual_cost;
1368 : double ntuples;
1369 : double nseqpages;
1370 : double spc_random_page_cost;
1371 : double spc_seq_page_cost;
1372 :
1373 : /* Should only be applied to base relations */
1374 : Assert(baserel->relid > 0);
1375 : Assert(baserel->rtekind == RTE_RELATION);
1376 :
1377 : /* Mark the path with the correct row estimate */
1378 202 : if (param_info)
1379 0 : path->rows = param_info->ppi_rows;
1380 : else
1381 202 : path->rows = baserel->rows;
1382 :
1383 : /* Count how many tuples and pages we expect to scan */
1384 202 : selectivity = clauselist_selectivity(root, tidrangequals, baserel->relid,
1385 : JOIN_INNER, NULL);
1386 202 : pages = ceil(selectivity * baserel->pages);
1387 :
1388 202 : if (pages <= 0.0)
1389 42 : pages = 1.0;
1390 :
1391 : /*
1392 : * The first page in a range requires a random seek, but each subsequent
1393 : * page is just a normal sequential page read. NOTE: it's desirable for
1394 : * TID Range Scans to cost more than the equivalent Sequential Scans,
1395 : * because Seq Scans have some performance advantages such as scan
1396 : * synchronization and parallelizability, and we'd prefer one of them to
1397 : * be picked unless a TID Range Scan really is better.
1398 : */
1399 202 : ntuples = selectivity * baserel->tuples;
1400 202 : nseqpages = pages - 1.0;
1401 :
1402 202 : if (!enable_tidscan)
1403 0 : startup_cost += disable_cost;
1404 :
1405 : /*
1406 : * The TID qual expressions will be computed once, any other baserestrict
1407 : * quals once per retrieved tuple.
1408 : */
1409 202 : cost_qual_eval(&tid_qual_cost, tidrangequals, root);
1410 :
1411 : /* fetch estimated page cost for tablespace containing table */
1412 202 : get_tablespace_page_costs(baserel->reltablespace,
1413 : &spc_random_page_cost,
1414 : &spc_seq_page_cost);
1415 :
1416 : /* disk costs; 1 random page and the remainder as seq pages */
1417 202 : run_cost += spc_random_page_cost + spc_seq_page_cost * nseqpages;
1418 :
1419 : /* Add scanning CPU costs */
1420 202 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1421 :
1422 : /*
1423 : * XXX currently we assume TID quals are a subset of qpquals at this
1424 : * point; they will be removed (if possible) when we create the plan, so
1425 : * we subtract their cost from the total qpqual cost. (If the TID quals
1426 : * can't be removed, this is a mistake and we're going to underestimate
1427 : * the CPU cost a bit.)
1428 : */
1429 202 : startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1430 202 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1431 202 : tid_qual_cost.per_tuple;
1432 202 : run_cost += cpu_per_tuple * ntuples;
1433 :
1434 : /* tlist eval costs are paid per output row, not per tuple scanned */
1435 202 : startup_cost += path->pathtarget->cost.startup;
1436 202 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1437 :
1438 202 : path->startup_cost = startup_cost;
1439 202 : path->total_cost = startup_cost + run_cost;
1440 202 : }
1441 :
1442 : /*
1443 : * cost_subqueryscan
1444 : * Determines and returns the cost of scanning a subquery RTE.
1445 : *
1446 : * 'baserel' is the relation to be scanned
1447 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1448 : * 'trivial_pathtarget' is true if the pathtarget is believed to be trivial.
1449 : */
1450 : void
1451 39332 : cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root,
1452 : RelOptInfo *baserel, ParamPathInfo *param_info,
1453 : bool trivial_pathtarget)
1454 : {
1455 : Cost startup_cost;
1456 : Cost run_cost;
1457 : List *qpquals;
1458 : QualCost qpqual_cost;
1459 : Cost cpu_per_tuple;
1460 :
1461 : /* Should only be applied to base relations that are subqueries */
1462 : Assert(baserel->relid > 0);
1463 : Assert(baserel->rtekind == RTE_SUBQUERY);
1464 :
1465 : /*
1466 : * We compute the rowcount estimate as the subplan's estimate times the
1467 : * selectivity of relevant restriction clauses. In simple cases this will
1468 : * come out the same as baserel->rows; but when dealing with parallelized
1469 : * paths we must do it like this to get the right answer.
1470 : */
1471 39332 : if (param_info)
1472 486 : qpquals = list_concat_copy(param_info->ppi_clauses,
1473 486 : baserel->baserestrictinfo);
1474 : else
1475 38846 : qpquals = baserel->baserestrictinfo;
1476 :
1477 39332 : path->path.rows = clamp_row_est(path->subpath->rows *
1478 39332 : clauselist_selectivity(root,
1479 : qpquals,
1480 : 0,
1481 : JOIN_INNER,
1482 : NULL));
1483 :
1484 : /*
1485 : * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1486 : * any restriction clauses and tlist that will be attached to the
1487 : * SubqueryScan node, plus cpu_tuple_cost to account for selection and
1488 : * projection overhead.
1489 : */
1490 39332 : path->path.startup_cost = path->subpath->startup_cost;
1491 39332 : path->path.total_cost = path->subpath->total_cost;
1492 :
1493 : /*
1494 : * However, if there are no relevant restriction clauses and the
1495 : * pathtarget is trivial, then we expect that setrefs.c will optimize away
1496 : * the SubqueryScan plan node altogether, so we should just make its cost
1497 : * and rowcount equal to the input path's.
1498 : *
1499 : * Note: there are some edge cases where createplan.c will apply a
1500 : * different targetlist to the SubqueryScan node, thus falsifying our
1501 : * current estimate of whether the target is trivial, and making the cost
1502 : * estimate (though not the rowcount) wrong. It does not seem worth the
1503 : * extra complication to try to account for that exactly, especially since
1504 : * that behavior falsifies other cost estimates as well.
1505 : */
1506 39332 : if (qpquals == NIL && trivial_pathtarget)
1507 16816 : return;
1508 :
1509 22516 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1510 :
1511 22516 : startup_cost = qpqual_cost.startup;
1512 22516 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1513 22516 : run_cost = cpu_per_tuple * path->subpath->rows;
1514 :
1515 : /* tlist eval costs are paid per output row, not per tuple scanned */
1516 22516 : startup_cost += path->path.pathtarget->cost.startup;
1517 22516 : run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
1518 :
1519 22516 : path->path.startup_cost += startup_cost;
1520 22516 : path->path.total_cost += startup_cost + run_cost;
1521 : }
1522 :
1523 : /*
1524 : * cost_functionscan
1525 : * Determines and returns the cost of scanning a function RTE.
1526 : *
1527 : * 'baserel' is the relation to be scanned
1528 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1529 : */
1530 : void
1531 39980 : cost_functionscan(Path *path, PlannerInfo *root,
1532 : RelOptInfo *baserel, ParamPathInfo *param_info)
1533 : {
1534 39980 : Cost startup_cost = 0;
1535 39980 : Cost run_cost = 0;
1536 : QualCost qpqual_cost;
1537 : Cost cpu_per_tuple;
1538 : RangeTblEntry *rte;
1539 : QualCost exprcost;
1540 :
1541 : /* Should only be applied to base relations that are functions */
1542 : Assert(baserel->relid > 0);
1543 39980 : rte = planner_rt_fetch(baserel->relid, root);
1544 : Assert(rte->rtekind == RTE_FUNCTION);
1545 :
1546 : /* Mark the path with the correct row estimate */
1547 39980 : if (param_info)
1548 8118 : path->rows = param_info->ppi_rows;
1549 : else
1550 31862 : path->rows = baserel->rows;
1551 :
1552 : /*
1553 : * Estimate costs of executing the function expression(s).
1554 : *
1555 : * Currently, nodeFunctionscan.c always executes the functions to
1556 : * completion before returning any rows, and caches the results in a
1557 : * tuplestore. So the function eval cost is all startup cost, and per-row
1558 : * costs are minimal.
1559 : *
1560 : * XXX in principle we ought to charge tuplestore spill costs if the
1561 : * number of rows is large. However, given how phony our rowcount
1562 : * estimates for functions tend to be, there's not a lot of point in that
1563 : * refinement right now.
1564 : */
1565 39980 : cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1566 :
1567 39980 : startup_cost += exprcost.startup + exprcost.per_tuple;
1568 :
1569 : /* Add scanning CPU costs */
1570 39980 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1571 :
1572 39980 : startup_cost += qpqual_cost.startup;
1573 39980 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1574 39980 : run_cost += cpu_per_tuple * baserel->tuples;
1575 :
1576 : /* tlist eval costs are paid per output row, not per tuple scanned */
1577 39980 : startup_cost += path->pathtarget->cost.startup;
1578 39980 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1579 :
1580 39980 : path->startup_cost = startup_cost;
1581 39980 : path->total_cost = startup_cost + run_cost;
1582 39980 : }
1583 :
1584 : /*
1585 : * cost_tablefuncscan
1586 : * Determines and returns the cost of scanning a table function.
1587 : *
1588 : * 'baserel' is the relation to be scanned
1589 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1590 : */
1591 : void
1592 548 : cost_tablefuncscan(Path *path, PlannerInfo *root,
1593 : RelOptInfo *baserel, ParamPathInfo *param_info)
1594 : {
1595 548 : Cost startup_cost = 0;
1596 548 : Cost run_cost = 0;
1597 : QualCost qpqual_cost;
1598 : Cost cpu_per_tuple;
1599 : RangeTblEntry *rte;
1600 : QualCost exprcost;
1601 :
1602 : /* Should only be applied to base relations that are functions */
1603 : Assert(baserel->relid > 0);
1604 548 : rte = planner_rt_fetch(baserel->relid, root);
1605 : Assert(rte->rtekind == RTE_TABLEFUNC);
1606 :
1607 : /* Mark the path with the correct row estimate */
1608 548 : if (param_info)
1609 234 : path->rows = param_info->ppi_rows;
1610 : else
1611 314 : path->rows = baserel->rows;
1612 :
1613 : /*
1614 : * Estimate costs of executing the table func expression(s).
1615 : *
1616 : * XXX in principle we ought to charge tuplestore spill costs if the
1617 : * number of rows is large. However, given how phony our rowcount
1618 : * estimates for tablefuncs tend to be, there's not a lot of point in that
1619 : * refinement right now.
1620 : */
1621 548 : cost_qual_eval_node(&exprcost, (Node *) rte->tablefunc, root);
1622 :
1623 548 : startup_cost += exprcost.startup + exprcost.per_tuple;
1624 :
1625 : /* Add scanning CPU costs */
1626 548 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1627 :
1628 548 : startup_cost += qpqual_cost.startup;
1629 548 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1630 548 : run_cost += cpu_per_tuple * baserel->tuples;
1631 :
1632 : /* tlist eval costs are paid per output row, not per tuple scanned */
1633 548 : startup_cost += path->pathtarget->cost.startup;
1634 548 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1635 :
1636 548 : path->startup_cost = startup_cost;
1637 548 : path->total_cost = startup_cost + run_cost;
1638 548 : }
1639 :
1640 : /*
1641 : * cost_valuesscan
1642 : * Determines and returns the cost of scanning a VALUES RTE.
1643 : *
1644 : * 'baserel' is the relation to be scanned
1645 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1646 : */
1647 : void
1648 7736 : cost_valuesscan(Path *path, PlannerInfo *root,
1649 : RelOptInfo *baserel, ParamPathInfo *param_info)
1650 : {
1651 7736 : Cost startup_cost = 0;
1652 7736 : Cost run_cost = 0;
1653 : QualCost qpqual_cost;
1654 : Cost cpu_per_tuple;
1655 :
1656 : /* Should only be applied to base relations that are values lists */
1657 : Assert(baserel->relid > 0);
1658 : Assert(baserel->rtekind == RTE_VALUES);
1659 :
1660 : /* Mark the path with the correct row estimate */
1661 7736 : if (param_info)
1662 48 : path->rows = param_info->ppi_rows;
1663 : else
1664 7688 : path->rows = baserel->rows;
1665 :
1666 : /*
1667 : * For now, estimate list evaluation cost at one operator eval per list
1668 : * (probably pretty bogus, but is it worth being smarter?)
1669 : */
1670 7736 : cpu_per_tuple = cpu_operator_cost;
1671 :
1672 : /* Add scanning CPU costs */
1673 7736 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1674 :
1675 7736 : startup_cost += qpqual_cost.startup;
1676 7736 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1677 7736 : run_cost += cpu_per_tuple * baserel->tuples;
1678 :
1679 : /* tlist eval costs are paid per output row, not per tuple scanned */
1680 7736 : startup_cost += path->pathtarget->cost.startup;
1681 7736 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1682 :
1683 7736 : path->startup_cost = startup_cost;
1684 7736 : path->total_cost = startup_cost + run_cost;
1685 7736 : }
1686 :
1687 : /*
1688 : * cost_ctescan
1689 : * Determines and returns the cost of scanning a CTE RTE.
1690 : *
1691 : * Note: this is used for both self-reference and regular CTEs; the
1692 : * possible cost differences are below the threshold of what we could
1693 : * estimate accurately anyway. Note that the costs of evaluating the
1694 : * referenced CTE query are added into the final plan as initplan costs,
1695 : * and should NOT be counted here.
1696 : */
1697 : void
1698 4026 : cost_ctescan(Path *path, PlannerInfo *root,
1699 : RelOptInfo *baserel, ParamPathInfo *param_info)
1700 : {
1701 4026 : Cost startup_cost = 0;
1702 4026 : Cost run_cost = 0;
1703 : QualCost qpqual_cost;
1704 : Cost cpu_per_tuple;
1705 :
1706 : /* Should only be applied to base relations that are CTEs */
1707 : Assert(baserel->relid > 0);
1708 : Assert(baserel->rtekind == RTE_CTE);
1709 :
1710 : /* Mark the path with the correct row estimate */
1711 4026 : if (param_info)
1712 0 : path->rows = param_info->ppi_rows;
1713 : else
1714 4026 : path->rows = baserel->rows;
1715 :
1716 : /* Charge one CPU tuple cost per row for tuplestore manipulation */
1717 4026 : cpu_per_tuple = cpu_tuple_cost;
1718 :
1719 : /* Add scanning CPU costs */
1720 4026 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1721 :
1722 4026 : startup_cost += qpqual_cost.startup;
1723 4026 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1724 4026 : run_cost += cpu_per_tuple * baserel->tuples;
1725 :
1726 : /* tlist eval costs are paid per output row, not per tuple scanned */
1727 4026 : startup_cost += path->pathtarget->cost.startup;
1728 4026 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1729 :
1730 4026 : path->startup_cost = startup_cost;
1731 4026 : path->total_cost = startup_cost + run_cost;
1732 4026 : }
1733 :
1734 : /*
1735 : * cost_namedtuplestorescan
1736 : * Determines and returns the cost of scanning a named tuplestore.
1737 : */
1738 : void
1739 438 : cost_namedtuplestorescan(Path *path, PlannerInfo *root,
1740 : RelOptInfo *baserel, ParamPathInfo *param_info)
1741 : {
1742 438 : Cost startup_cost = 0;
1743 438 : Cost run_cost = 0;
1744 : QualCost qpqual_cost;
1745 : Cost cpu_per_tuple;
1746 :
1747 : /* Should only be applied to base relations that are Tuplestores */
1748 : Assert(baserel->relid > 0);
1749 : Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
1750 :
1751 : /* Mark the path with the correct row estimate */
1752 438 : if (param_info)
1753 0 : path->rows = param_info->ppi_rows;
1754 : else
1755 438 : path->rows = baserel->rows;
1756 :
1757 : /* Charge one CPU tuple cost per row for tuplestore manipulation */
1758 438 : cpu_per_tuple = cpu_tuple_cost;
1759 :
1760 : /* Add scanning CPU costs */
1761 438 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1762 :
1763 438 : startup_cost += qpqual_cost.startup;
1764 438 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1765 438 : run_cost += cpu_per_tuple * baserel->tuples;
1766 :
1767 438 : path->startup_cost = startup_cost;
1768 438 : path->total_cost = startup_cost + run_cost;
1769 438 : }
1770 :
1771 : /*
1772 : * cost_resultscan
1773 : * Determines and returns the cost of scanning an RTE_RESULT relation.
1774 : */
1775 : void
1776 1610 : cost_resultscan(Path *path, PlannerInfo *root,
1777 : RelOptInfo *baserel, ParamPathInfo *param_info)
1778 : {
1779 1610 : Cost startup_cost = 0;
1780 1610 : Cost run_cost = 0;
1781 : QualCost qpqual_cost;
1782 : Cost cpu_per_tuple;
1783 :
1784 : /* Should only be applied to RTE_RESULT base relations */
1785 : Assert(baserel->relid > 0);
1786 : Assert(baserel->rtekind == RTE_RESULT);
1787 :
1788 : /* Mark the path with the correct row estimate */
1789 1610 : if (param_info)
1790 144 : path->rows = param_info->ppi_rows;
1791 : else
1792 1466 : path->rows = baserel->rows;
1793 :
1794 : /* We charge qual cost plus cpu_tuple_cost */
1795 1610 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1796 :
1797 1610 : startup_cost += qpqual_cost.startup;
1798 1610 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1799 1610 : run_cost += cpu_per_tuple * baserel->tuples;
1800 :
1801 1610 : path->startup_cost = startup_cost;
1802 1610 : path->total_cost = startup_cost + run_cost;
1803 1610 : }
1804 :
1805 : /*
1806 : * cost_recursive_union
1807 : * Determines and returns the cost of performing a recursive union,
1808 : * and also the estimated output size.
1809 : *
1810 : * We are given Paths for the nonrecursive and recursive terms.
1811 : */
1812 : void
1813 800 : cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
1814 : {
1815 : Cost startup_cost;
1816 : Cost total_cost;
1817 : double total_rows;
1818 :
1819 : /* We probably have decent estimates for the non-recursive term */
1820 800 : startup_cost = nrterm->startup_cost;
1821 800 : total_cost = nrterm->total_cost;
1822 800 : total_rows = nrterm->rows;
1823 :
1824 : /*
1825 : * We arbitrarily assume that about 10 recursive iterations will be
1826 : * needed, and that we've managed to get a good fix on the cost and output
1827 : * size of each one of them. These are mighty shaky assumptions but it's
1828 : * hard to see how to do better.
1829 : */
1830 800 : total_cost += 10 * rterm->total_cost;
1831 800 : total_rows += 10 * rterm->rows;
1832 :
1833 : /*
1834 : * Also charge cpu_tuple_cost per row to account for the costs of
1835 : * manipulating the tuplestores. (We don't worry about possible
1836 : * spill-to-disk costs.)
1837 : */
1838 800 : total_cost += cpu_tuple_cost * total_rows;
1839 :
1840 800 : runion->startup_cost = startup_cost;
1841 800 : runion->total_cost = total_cost;
1842 800 : runion->rows = total_rows;
1843 800 : runion->pathtarget->width = Max(nrterm->pathtarget->width,
1844 : rterm->pathtarget->width);
1845 800 : }
1846 :
1847 : /*
1848 : * cost_tuplesort
1849 : * Determines and returns the cost of sorting a relation using tuplesort,
1850 : * not including the cost of reading the input data.
1851 : *
1852 : * If the total volume of data to sort is less than sort_mem, we will do
1853 : * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1854 : * comparisons for t tuples.
1855 : *
1856 : * If the total volume exceeds sort_mem, we switch to a tape-style merge
1857 : * algorithm. There will still be about t*log2(t) tuple comparisons in
1858 : * total, but we will also need to write and read each tuple once per
1859 : * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1860 : * number of initial runs formed and M is the merge order used by tuplesort.c.
1861 : * Since the average initial run should be about sort_mem, we have
1862 : * disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
1863 : * cpu = comparison_cost * t * log2(t)
1864 : *
1865 : * If the sort is bounded (i.e., only the first k result tuples are needed)
1866 : * and k tuples can fit into sort_mem, we use a heap method that keeps only
1867 : * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1868 : *
1869 : * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1870 : * accesses (XXX can't we refine that guess?)
1871 : *
1872 : * By default, we charge two operator evals per tuple comparison, which should
1873 : * be in the right ballpark in most cases. The caller can tweak this by
1874 : * specifying nonzero comparison_cost; typically that's used for any extra
1875 : * work that has to be done to prepare the inputs to the comparison operators.
1876 : *
1877 : * 'tuples' is the number of tuples in the relation
1878 : * 'width' is the average tuple width in bytes
1879 : * 'comparison_cost' is the extra cost per comparison, if any
1880 : * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1881 : * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1882 : */
1883 : static void
1884 1375774 : cost_tuplesort(Cost *startup_cost, Cost *run_cost,
1885 : double tuples, int width,
1886 : Cost comparison_cost, int sort_mem,
1887 : double limit_tuples)
1888 : {
1889 1375774 : double input_bytes = relation_byte_size(tuples, width);
1890 : double output_bytes;
1891 : double output_tuples;
1892 1375774 : long sort_mem_bytes = sort_mem * 1024L;
1893 :
1894 : /*
1895 : * We want to be sure the cost of a sort is never estimated as zero, even
1896 : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1897 : */
1898 1375774 : if (tuples < 2.0)
1899 442362 : tuples = 2.0;
1900 :
1901 : /* Include the default cost-per-comparison */
1902 1375774 : comparison_cost += 2.0 * cpu_operator_cost;
1903 :
1904 : /* Do we have a useful LIMIT? */
1905 1375774 : if (limit_tuples > 0 && limit_tuples < tuples)
1906 : {
1907 1736 : output_tuples = limit_tuples;
1908 1736 : output_bytes = relation_byte_size(output_tuples, width);
1909 : }
1910 : else
1911 : {
1912 1374038 : output_tuples = tuples;
1913 1374038 : output_bytes = input_bytes;
1914 : }
1915 :
1916 1375774 : if (output_bytes > sort_mem_bytes)
1917 : {
1918 : /*
1919 : * We'll have to use a disk-based sort of all the tuples
1920 : */
1921 16728 : double npages = ceil(input_bytes / BLCKSZ);
1922 16728 : double nruns = input_bytes / sort_mem_bytes;
1923 16728 : double mergeorder = tuplesort_merge_order(sort_mem_bytes);
1924 : double log_runs;
1925 : double npageaccesses;
1926 :
1927 : /*
1928 : * CPU costs
1929 : *
1930 : * Assume about N log2 N comparisons
1931 : */
1932 16728 : *startup_cost = comparison_cost * tuples * LOG2(tuples);
1933 :
1934 : /* Disk costs */
1935 :
1936 : /* Compute logM(r) as log(r) / log(M) */
1937 16728 : if (nruns > mergeorder)
1938 4374 : log_runs = ceil(log(nruns) / log(mergeorder));
1939 : else
1940 12354 : log_runs = 1.0;
1941 16728 : npageaccesses = 2.0 * npages * log_runs;
1942 : /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1943 16728 : *startup_cost += npageaccesses *
1944 16728 : (seq_page_cost * 0.75 + random_page_cost * 0.25);
1945 : }
1946 1359046 : else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1947 : {
1948 : /*
1949 : * We'll use a bounded heap-sort keeping just K tuples in memory, for
1950 : * a total number of tuple comparisons of N log2 K; but the constant
1951 : * factor is a bit higher than for quicksort. Tweak it so that the
1952 : * cost curve is continuous at the crossover point.
1953 : */
1954 1312 : *startup_cost = comparison_cost * tuples * LOG2(2.0 * output_tuples);
1955 : }
1956 : else
1957 : {
1958 : /* We'll use plain quicksort on all the input tuples */
1959 1357734 : *startup_cost = comparison_cost * tuples * LOG2(tuples);
1960 : }
1961 :
1962 : /*
1963 : * Also charge a small amount (arbitrarily set equal to operator cost) per
1964 : * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
1965 : * doesn't do qual-checking or projection, so it has less overhead than
1966 : * most plan nodes. Note it's correct to use tuples not output_tuples
1967 : * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1968 : * counting the LIMIT otherwise.
1969 : */
1970 1375774 : *run_cost = cpu_operator_cost * tuples;
1971 1375774 : }
1972 :
1973 : /*
1974 : * cost_incremental_sort
1975 : * Determines and returns the cost of sorting a relation incrementally, when
1976 : * the input path is presorted by a prefix of the pathkeys.
1977 : *
1978 : * 'presorted_keys' is the number of leading pathkeys by which the input path
1979 : * is sorted.
1980 : *
1981 : * We estimate the number of groups into which the relation is divided by the
1982 : * leading pathkeys, and then calculate the cost of sorting a single group
1983 : * with tuplesort using cost_tuplesort().
1984 : */
1985 : void
1986 7704 : cost_incremental_sort(Path *path,
1987 : PlannerInfo *root, List *pathkeys, int presorted_keys,
1988 : Cost input_startup_cost, Cost input_total_cost,
1989 : double input_tuples, int width, Cost comparison_cost, int sort_mem,
1990 : double limit_tuples)
1991 : {
1992 : Cost startup_cost,
1993 : run_cost,
1994 7704 : input_run_cost = input_total_cost - input_startup_cost;
1995 : double group_tuples,
1996 : input_groups;
1997 : Cost group_startup_cost,
1998 : group_run_cost,
1999 : group_input_run_cost;
2000 7704 : List *presortedExprs = NIL;
2001 : ListCell *l;
2002 7704 : bool unknown_varno = false;
2003 :
2004 : Assert(presorted_keys > 0 && presorted_keys < list_length(pathkeys));
2005 :
2006 : /*
2007 : * We want to be sure the cost of a sort is never estimated as zero, even
2008 : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
2009 : */
2010 7704 : if (input_tuples < 2.0)
2011 4792 : input_tuples = 2.0;
2012 :
2013 : /* Default estimate of number of groups, capped to one group per row. */
2014 7704 : input_groups = Min(input_tuples, DEFAULT_NUM_DISTINCT);
2015 :
2016 : /*
2017 : * Extract presorted keys as list of expressions.
2018 : *
2019 : * We need to be careful about Vars containing "varno 0" which might have
2020 : * been introduced by generate_append_tlist, which would confuse
2021 : * estimate_num_groups (in fact it'd fail for such expressions). See
2022 : * recurse_set_operations which has to deal with the same issue.
2023 : *
2024 : * Unlike recurse_set_operations we can't access the original target list
2025 : * here, and even if we could it's not very clear how useful would that be
2026 : * for a set operation combining multiple tables. So we simply detect if
2027 : * there are any expressions with "varno 0" and use the default
2028 : * DEFAULT_NUM_DISTINCT in that case.
2029 : *
2030 : * We might also use either 1.0 (a single group) or input_tuples (each row
2031 : * being a separate group), pretty much the worst and best case for
2032 : * incremental sort. But those are extreme cases and using something in
2033 : * between seems reasonable. Furthermore, generate_append_tlist is used
2034 : * for set operations, which are likely to produce mostly unique output
2035 : * anyway - from that standpoint the DEFAULT_NUM_DISTINCT is defensive
2036 : * while maintaining lower startup cost.
2037 : */
2038 7842 : foreach(l, pathkeys)
2039 : {
2040 7842 : PathKey *key = (PathKey *) lfirst(l);
2041 7842 : EquivalenceMember *member = (EquivalenceMember *)
2042 7842 : linitial(key->pk_eclass->ec_members);
2043 :
2044 : /*
2045 : * Check if the expression contains Var with "varno 0" so that we
2046 : * don't call estimate_num_groups in that case.
2047 : */
2048 7842 : if (bms_is_member(0, pull_varnos(root, (Node *) member->em_expr)))
2049 : {
2050 10 : unknown_varno = true;
2051 10 : break;
2052 : }
2053 :
2054 : /* expression not containing any Vars with "varno 0" */
2055 7832 : presortedExprs = lappend(presortedExprs, member->em_expr);
2056 :
2057 7832 : if (foreach_current_index(l) + 1 >= presorted_keys)
2058 7694 : break;
2059 : }
2060 :
2061 : /* Estimate the number of groups with equal presorted keys. */
2062 7704 : if (!unknown_varno)
2063 7694 : input_groups = estimate_num_groups(root, presortedExprs, input_tuples,
2064 : NULL, NULL);
2065 :
2066 7704 : group_tuples = input_tuples / input_groups;
2067 7704 : group_input_run_cost = input_run_cost / input_groups;
2068 :
2069 : /*
2070 : * Estimate the average cost of sorting of one group where presorted keys
2071 : * are equal.
2072 : */
2073 7704 : cost_tuplesort(&group_startup_cost, &group_run_cost,
2074 : group_tuples, width, comparison_cost, sort_mem,
2075 : limit_tuples);
2076 :
2077 : /*
2078 : * Startup cost of incremental sort is the startup cost of its first group
2079 : * plus the cost of its input.
2080 : */
2081 7704 : startup_cost = group_startup_cost + input_startup_cost +
2082 : group_input_run_cost;
2083 :
2084 : /*
2085 : * After we started producing tuples from the first group, the cost of
2086 : * producing all the tuples is given by the cost to finish processing this
2087 : * group, plus the total cost to process the remaining groups, plus the
2088 : * remaining cost of input.
2089 : */
2090 7704 : run_cost = group_run_cost + (group_run_cost + group_startup_cost) *
2091 7704 : (input_groups - 1) + group_input_run_cost * (input_groups - 1);
2092 :
2093 : /*
2094 : * Incremental sort adds some overhead by itself. Firstly, it has to
2095 : * detect the sort groups. This is roughly equal to one extra copy and
2096 : * comparison per tuple.
2097 : */
2098 7704 : run_cost += (cpu_tuple_cost + comparison_cost) * input_tuples;
2099 :
2100 : /*
2101 : * Additionally, we charge double cpu_tuple_cost for each input group to
2102 : * account for the tuplesort_reset that's performed after each group.
2103 : */
2104 7704 : run_cost += 2.0 * cpu_tuple_cost * input_groups;
2105 :
2106 7704 : path->rows = input_tuples;
2107 7704 : path->startup_cost = startup_cost;
2108 7704 : path->total_cost = startup_cost + run_cost;
2109 7704 : }
2110 :
2111 : /*
2112 : * cost_sort
2113 : * Determines and returns the cost of sorting a relation, including
2114 : * the cost of reading the input data.
2115 : *
2116 : * NOTE: some callers currently pass NIL for pathkeys because they
2117 : * can't conveniently supply the sort keys. Since this routine doesn't
2118 : * currently do anything with pathkeys anyway, that doesn't matter...
2119 : * but if it ever does, it should react gracefully to lack of key data.
2120 : * (Actually, the thing we'd most likely be interested in is just the number
2121 : * of sort keys, which all callers *could* supply.)
2122 : */
2123 : void
2124 1368070 : cost_sort(Path *path, PlannerInfo *root,
2125 : List *pathkeys, Cost input_cost, double tuples, int width,
2126 : Cost comparison_cost, int sort_mem,
2127 : double limit_tuples)
2128 :
2129 : {
2130 : Cost startup_cost;
2131 : Cost run_cost;
2132 :
2133 1368070 : cost_tuplesort(&startup_cost, &run_cost,
2134 : tuples, width,
2135 : comparison_cost, sort_mem,
2136 : limit_tuples);
2137 :
2138 1368070 : if (!enable_sort)
2139 1282 : startup_cost += disable_cost;
2140 :
2141 1368070 : startup_cost += input_cost;
2142 :
2143 1368070 : path->rows = tuples;
2144 1368070 : path->startup_cost = startup_cost;
2145 1368070 : path->total_cost = startup_cost + run_cost;
2146 1368070 : }
2147 :
2148 : /*
2149 : * append_nonpartial_cost
2150 : * Estimate the cost of the non-partial paths in a Parallel Append.
2151 : * The non-partial paths are assumed to be the first "numpaths" paths
2152 : * from the subpaths list, and to be in order of decreasing cost.
2153 : */
2154 : static Cost
2155 15610 : append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
2156 : {
2157 : Cost *costarr;
2158 : int arrlen;
2159 : ListCell *l;
2160 : ListCell *cell;
2161 : int path_index;
2162 : int min_index;
2163 : int max_index;
2164 :
2165 15610 : if (numpaths == 0)
2166 14076 : return 0;
2167 :
2168 : /*
2169 : * Array length is number of workers or number of relevant paths,
2170 : * whichever is less.
2171 : */
2172 1534 : arrlen = Min(parallel_workers, numpaths);
2173 1534 : costarr = (Cost *) palloc(sizeof(Cost) * arrlen);
2174 :
2175 : /* The first few paths will each be claimed by a different worker. */
2176 1534 : path_index = 0;
2177 4146 : foreach(cell, subpaths)
2178 : {
2179 3438 : Path *subpath = (Path *) lfirst(cell);
2180 :
2181 3438 : if (path_index == arrlen)
2182 826 : break;
2183 2612 : costarr[path_index++] = subpath->total_cost;
2184 : }
2185 :
2186 : /*
2187 : * Since subpaths are sorted by decreasing cost, the last one will have
2188 : * the minimum cost.
2189 : */
2190 1534 : min_index = arrlen - 1;
2191 :
2192 : /*
2193 : * For each of the remaining subpaths, add its cost to the array element
2194 : * with minimum cost.
2195 : */
2196 2016 : for_each_cell(l, subpaths, cell)
2197 : {
2198 1010 : Path *subpath = (Path *) lfirst(l);
2199 :
2200 : /* Consider only the non-partial paths */
2201 1010 : if (path_index++ == numpaths)
2202 528 : break;
2203 :
2204 482 : costarr[min_index] += subpath->total_cost;
2205 :
2206 : /* Update the new min cost array index */
2207 482 : min_index = 0;
2208 1482 : for (int i = 0; i < arrlen; i++)
2209 : {
2210 1000 : if (costarr[i] < costarr[min_index])
2211 202 : min_index = i;
2212 : }
2213 : }
2214 :
2215 : /* Return the highest cost from the array */
2216 1534 : max_index = 0;
2217 4146 : for (int i = 0; i < arrlen; i++)
2218 : {
2219 2612 : if (costarr[i] > costarr[max_index])
2220 188 : max_index = i;
2221 : }
2222 :
2223 1534 : return costarr[max_index];
2224 : }
2225 :
2226 : /*
2227 : * cost_append
2228 : * Determines and returns the cost of an Append node.
2229 : */
2230 : void
2231 47058 : cost_append(AppendPath *apath)
2232 : {
2233 : ListCell *l;
2234 :
2235 47058 : apath->path.startup_cost = 0;
2236 47058 : apath->path.total_cost = 0;
2237 47058 : apath->path.rows = 0;
2238 :
2239 47058 : if (apath->subpaths == NIL)
2240 1530 : return;
2241 :
2242 45528 : if (!apath->path.parallel_aware)
2243 : {
2244 29918 : List *pathkeys = apath->path.pathkeys;
2245 :
2246 29918 : if (pathkeys == NIL)
2247 : {
2248 27918 : Path *firstsubpath = (Path *) linitial(apath->subpaths);
2249 :
2250 : /*
2251 : * For an unordered, non-parallel-aware Append we take the startup
2252 : * cost as the startup cost of the first subpath.
2253 : */
2254 27918 : apath->path.startup_cost = firstsubpath->startup_cost;
2255 :
2256 : /* Compute rows and costs as sums of subplan rows and costs. */
2257 110576 : foreach(l, apath->subpaths)
2258 : {
2259 82658 : Path *subpath = (Path *) lfirst(l);
2260 :
2261 82658 : apath->path.rows += subpath->rows;
2262 82658 : apath->path.total_cost += subpath->total_cost;
2263 : }
2264 : }
2265 : else
2266 : {
2267 : /*
2268 : * For an ordered, non-parallel-aware Append we take the startup
2269 : * cost as the sum of the subpath startup costs. This ensures
2270 : * that we don't underestimate the startup cost when a query's
2271 : * LIMIT is such that several of the children have to be run to
2272 : * satisfy it. This might be overkill --- another plausible hack
2273 : * would be to take the Append's startup cost as the maximum of
2274 : * the child startup costs. But we don't want to risk believing
2275 : * that an ORDER BY LIMIT query can be satisfied at small cost
2276 : * when the first child has small startup cost but later ones
2277 : * don't. (If we had the ability to deal with nonlinear cost
2278 : * interpolation for partial retrievals, we would not need to be
2279 : * so conservative about this.)
2280 : *
2281 : * This case is also different from the above in that we have to
2282 : * account for possibly injecting sorts into subpaths that aren't
2283 : * natively ordered.
2284 : */
2285 7812 : foreach(l, apath->subpaths)
2286 : {
2287 5812 : Path *subpath = (Path *) lfirst(l);
2288 : Path sort_path; /* dummy for result of cost_sort */
2289 :
2290 5812 : if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
2291 : {
2292 : /*
2293 : * We'll need to insert a Sort node, so include costs for
2294 : * that. We can use the parent's LIMIT if any, since we
2295 : * certainly won't pull more than that many tuples from
2296 : * any child.
2297 : */
2298 44 : cost_sort(&sort_path,
2299 : NULL, /* doesn't currently need root */
2300 : pathkeys,
2301 : subpath->total_cost,
2302 : subpath->rows,
2303 44 : subpath->pathtarget->width,
2304 : 0.0,
2305 : work_mem,
2306 : apath->limit_tuples);
2307 44 : subpath = &sort_path;
2308 : }
2309 :
2310 5812 : apath->path.rows += subpath->rows;
2311 5812 : apath->path.startup_cost += subpath->startup_cost;
2312 5812 : apath->path.total_cost += subpath->total_cost;
2313 : }
2314 : }
2315 : }
2316 : else /* parallel-aware */
2317 : {
2318 15610 : int i = 0;
2319 15610 : double parallel_divisor = get_parallel_divisor(&apath->path);
2320 :
2321 : /* Parallel-aware Append never produces ordered output. */
2322 : Assert(apath->path.pathkeys == NIL);
2323 :
2324 : /* Calculate startup cost. */
2325 63754 : foreach(l, apath->subpaths)
2326 : {
2327 48144 : Path *subpath = (Path *) lfirst(l);
2328 :
2329 : /*
2330 : * Append will start returning tuples when the child node having
2331 : * lowest startup cost is done setting up. We consider only the
2332 : * first few subplans that immediately get a worker assigned.
2333 : */
2334 48144 : if (i == 0)
2335 15610 : apath->path.startup_cost = subpath->startup_cost;
2336 32534 : else if (i < apath->path.parallel_workers)
2337 15076 : apath->path.startup_cost = Min(apath->path.startup_cost,
2338 : subpath->startup_cost);
2339 :
2340 : /*
2341 : * Apply parallel divisor to subpaths. Scale the number of rows
2342 : * for each partial subpath based on the ratio of the parallel
2343 : * divisor originally used for the subpath to the one we adopted.
2344 : * Also add the cost of partial paths to the total cost, but
2345 : * ignore non-partial paths for now.
2346 : */
2347 48144 : if (i < apath->first_partial_path)
2348 3094 : apath->path.rows += subpath->rows / parallel_divisor;
2349 : else
2350 : {
2351 : double subpath_parallel_divisor;
2352 :
2353 45050 : subpath_parallel_divisor = get_parallel_divisor(subpath);
2354 45050 : apath->path.rows += subpath->rows * (subpath_parallel_divisor /
2355 : parallel_divisor);
2356 45050 : apath->path.total_cost += subpath->total_cost;
2357 : }
2358 :
2359 48144 : apath->path.rows = clamp_row_est(apath->path.rows);
2360 :
2361 48144 : i++;
2362 : }
2363 :
2364 : /* Add cost for non-partial subpaths. */
2365 15610 : apath->path.total_cost +=
2366 15610 : append_nonpartial_cost(apath->subpaths,
2367 : apath->first_partial_path,
2368 : apath->path.parallel_workers);
2369 : }
2370 :
2371 : /*
2372 : * Although Append does not do any selection or projection, it's not free;
2373 : * add a small per-tuple overhead.
2374 : */
2375 45528 : apath->path.total_cost +=
2376 45528 : cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * apath->path.rows;
2377 : }
2378 :
2379 : /*
2380 : * cost_merge_append
2381 : * Determines and returns the cost of a MergeAppend node.
2382 : *
2383 : * MergeAppend merges several pre-sorted input streams, using a heap that
2384 : * at any given instant holds the next tuple from each stream. If there
2385 : * are N streams, we need about N*log2(N) tuple comparisons to construct
2386 : * the heap at startup, and then for each output tuple, about log2(N)
2387 : * comparisons to replace the top entry.
2388 : *
2389 : * (The effective value of N will drop once some of the input streams are
2390 : * exhausted, but it seems unlikely to be worth trying to account for that.)
2391 : *
2392 : * The heap is never spilled to disk, since we assume N is not very large.
2393 : * So this is much simpler than cost_sort.
2394 : *
2395 : * As in cost_sort, we charge two operator evals per tuple comparison.
2396 : *
2397 : * 'pathkeys' is a list of sort keys
2398 : * 'n_streams' is the number of input streams
2399 : * 'input_startup_cost' is the sum of the input streams' startup costs
2400 : * 'input_total_cost' is the sum of the input streams' total costs
2401 : * 'tuples' is the number of tuples in all the streams
2402 : */
2403 : void
2404 3776 : cost_merge_append(Path *path, PlannerInfo *root,
2405 : List *pathkeys, int n_streams,
2406 : Cost input_startup_cost, Cost input_total_cost,
2407 : double tuples)
2408 : {
2409 3776 : Cost startup_cost = 0;
2410 3776 : Cost run_cost = 0;
2411 : Cost comparison_cost;
2412 : double N;
2413 : double logN;
2414 :
2415 : /*
2416 : * Avoid log(0)...
2417 : */
2418 3776 : N = (n_streams < 2) ? 2.0 : (double) n_streams;
2419 3776 : logN = LOG2(N);
2420 :
2421 : /* Assumed cost per tuple comparison */
2422 3776 : comparison_cost = 2.0 * cpu_operator_cost;
2423 :
2424 : /* Heap creation cost */
2425 3776 : startup_cost += comparison_cost * N * logN;
2426 :
2427 : /* Per-tuple heap maintenance cost */
2428 3776 : run_cost += tuples * comparison_cost * logN;
2429 :
2430 : /*
2431 : * Although MergeAppend does not do any selection or projection, it's not
2432 : * free; add a small per-tuple overhead.
2433 : */
2434 3776 : run_cost += cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * tuples;
2435 :
2436 3776 : path->startup_cost = startup_cost + input_startup_cost;
2437 3776 : path->total_cost = startup_cost + run_cost + input_total_cost;
2438 3776 : }
2439 :
2440 : /*
2441 : * cost_material
2442 : * Determines and returns the cost of materializing a relation, including
2443 : * the cost of reading the input data.
2444 : *
2445 : * If the total volume of data to materialize exceeds work_mem, we will need
2446 : * to write it to disk, so the cost is much higher in that case.
2447 : *
2448 : * Note that here we are estimating the costs for the first scan of the
2449 : * relation, so the materialization is all overhead --- any savings will
2450 : * occur only on rescan, which is estimated in cost_rescan.
2451 : */
2452 : void
2453 424066 : cost_material(Path *path,
2454 : Cost input_startup_cost, Cost input_total_cost,
2455 : double tuples, int width)
2456 : {
2457 424066 : Cost startup_cost = input_startup_cost;
2458 424066 : Cost run_cost = input_total_cost - input_startup_cost;
2459 424066 : double nbytes = relation_byte_size(tuples, width);
2460 424066 : long work_mem_bytes = work_mem * 1024L;
2461 :
2462 424066 : path->rows = tuples;
2463 :
2464 : /*
2465 : * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
2466 : * reflect bookkeeping overhead. (This rate must be more than what
2467 : * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
2468 : * if it is exactly the same then there will be a cost tie between
2469 : * nestloop with A outer, materialized B inner and nestloop with B outer,
2470 : * materialized A inner. The extra cost ensures we'll prefer
2471 : * materializing the smaller rel.) Note that this is normally a good deal
2472 : * less than cpu_tuple_cost; which is OK because a Material plan node
2473 : * doesn't do qual-checking or projection, so it's got less overhead than
2474 : * most plan nodes.
2475 : */
2476 424066 : run_cost += 2 * cpu_operator_cost * tuples;
2477 :
2478 : /*
2479 : * If we will spill to disk, charge at the rate of seq_page_cost per page.
2480 : * This cost is assumed to be evenly spread through the plan run phase,
2481 : * which isn't exactly accurate but our cost model doesn't allow for
2482 : * nonuniform costs within the run phase.
2483 : */
2484 424066 : if (nbytes > work_mem_bytes)
2485 : {
2486 4328 : double npages = ceil(nbytes / BLCKSZ);
2487 :
2488 4328 : run_cost += seq_page_cost * npages;
2489 : }
2490 :
2491 424066 : path->startup_cost = startup_cost;
2492 424066 : path->total_cost = startup_cost + run_cost;
2493 424066 : }
2494 :
2495 : /*
2496 : * cost_memoize_rescan
2497 : * Determines the estimated cost of rescanning a Memoize node.
2498 : *
2499 : * In order to estimate this, we must gain knowledge of how often we expect to
2500 : * be called and how many distinct sets of parameters we are likely to be
2501 : * called with. If we expect a good cache hit ratio, then we can set our
2502 : * costs to account for that hit ratio, plus a little bit of cost for the
2503 : * caching itself. Caching will not work out well if we expect to be called
2504 : * with too many distinct parameter values. The worst-case here is that we
2505 : * never see any parameter value twice, in which case we'd never get a cache
2506 : * hit and caching would be a complete waste of effort.
2507 : */
2508 : static void
2509 226788 : cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
2510 : Cost *rescan_startup_cost, Cost *rescan_total_cost)
2511 : {
2512 : EstimationInfo estinfo;
2513 : ListCell *lc;
2514 226788 : Cost input_startup_cost = mpath->subpath->startup_cost;
2515 226788 : Cost input_total_cost = mpath->subpath->total_cost;
2516 226788 : double tuples = mpath->subpath->rows;
2517 226788 : double calls = mpath->calls;
2518 226788 : int width = mpath->subpath->pathtarget->width;
2519 :
2520 : double hash_mem_bytes;
2521 : double est_entry_bytes;
2522 : double est_cache_entries;
2523 : double ndistinct;
2524 : double evict_ratio;
2525 : double hit_ratio;
2526 : Cost startup_cost;
2527 : Cost total_cost;
2528 :
2529 : /* available cache space */
2530 226788 : hash_mem_bytes = get_hash_memory_limit();
2531 :
2532 : /*
2533 : * Set the number of bytes each cache entry should consume in the cache.
2534 : * To provide us with better estimations on how many cache entries we can
2535 : * store at once, we make a call to the executor here to ask it what
2536 : * memory overheads there are for a single cache entry.
2537 : */
2538 226788 : est_entry_bytes = relation_byte_size(tuples, width) +
2539 226788 : ExecEstimateCacheEntryOverheadBytes(tuples);
2540 :
2541 : /* include the estimated width for the cache keys */
2542 479022 : foreach(lc, mpath->param_exprs)
2543 252234 : est_entry_bytes += get_expr_width(root, (Node *) lfirst(lc));
2544 :
2545 : /* estimate on the upper limit of cache entries we can hold at once */
2546 226788 : est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
2547 :
2548 : /* estimate on the distinct number of parameter values */
2549 226788 : ndistinct = estimate_num_groups(root, mpath->param_exprs, calls, NULL,
2550 : &estinfo);
2551 :
2552 : /*
2553 : * When the estimation fell back on using a default value, it's a bit too
2554 : * risky to assume that it's ok to use a Memoize node. The use of a
2555 : * default could cause us to use a Memoize node when it's really
2556 : * inappropriate to do so. If we see that this has been done, then we'll
2557 : * assume that every call will have unique parameters, which will almost
2558 : * certainly mean a MemoizePath will never survive add_path().
2559 : */
2560 226788 : if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
2561 11930 : ndistinct = calls;
2562 :
2563 : /*
2564 : * Since we've already estimated the maximum number of entries we can
2565 : * store at once and know the estimated number of distinct values we'll be
2566 : * called with, we'll take this opportunity to set the path's est_entries.
2567 : * This will ultimately determine the hash table size that the executor
2568 : * will use. If we leave this at zero, the executor will just choose the
2569 : * size itself. Really this is not the right place to do this, but it's
2570 : * convenient since everything is already calculated.
2571 : */
2572 226788 : mpath->est_entries = Min(Min(ndistinct, est_cache_entries),
2573 : PG_UINT32_MAX);
2574 :
2575 : /*
2576 : * When the number of distinct parameter values is above the amount we can
2577 : * store in the cache, then we'll have to evict some entries from the
2578 : * cache. This is not free. Here we estimate how often we'll incur the
2579 : * cost of that eviction.
2580 : */
2581 226788 : evict_ratio = 1.0 - Min(est_cache_entries, ndistinct) / ndistinct;
2582 :
2583 : /*
2584 : * In order to estimate how costly a single scan will be, we need to
2585 : * attempt to estimate what the cache hit ratio will be. To do that we
2586 : * must look at how many scans are estimated in total for this node and
2587 : * how many of those scans we expect to get a cache hit.
2588 : */
2589 453576 : hit_ratio = ((calls - ndistinct) / calls) *
2590 226788 : (est_cache_entries / Max(ndistinct, est_cache_entries));
2591 :
2592 : Assert(hit_ratio >= 0 && hit_ratio <= 1.0);
2593 :
2594 : /*
2595 : * Set the total_cost accounting for the expected cache hit ratio. We
2596 : * also add on a cpu_operator_cost to account for a cache lookup. This
2597 : * will happen regardless of whether it's a cache hit or not.
2598 : */
2599 226788 : total_cost = input_total_cost * (1.0 - hit_ratio) + cpu_operator_cost;
2600 :
2601 : /* Now adjust the total cost to account for cache evictions */
2602 :
2603 : /* Charge a cpu_tuple_cost for evicting the actual cache entry */
2604 226788 : total_cost += cpu_tuple_cost * evict_ratio;
2605 :
2606 : /*
2607 : * Charge a 10th of cpu_operator_cost to evict every tuple in that entry.
2608 : * The per-tuple eviction is really just a pfree, so charging a whole
2609 : * cpu_operator_cost seems a little excessive.
2610 : */
2611 226788 : total_cost += cpu_operator_cost / 10.0 * evict_ratio * tuples;
2612 :
2613 : /*
2614 : * Now adjust for storing things in the cache, since that's not free
2615 : * either. Everything must go in the cache. We don't proportion this
2616 : * over any ratio, just apply it once for the scan. We charge a
2617 : * cpu_tuple_cost for the creation of the cache entry and also a
2618 : * cpu_operator_cost for each tuple we expect to cache.
2619 : */
2620 226788 : total_cost += cpu_tuple_cost + cpu_operator_cost * tuples;
2621 :
2622 : /*
2623 : * Getting the first row must be also be proportioned according to the
2624 : * expected cache hit ratio.
2625 : */
2626 226788 : startup_cost = input_startup_cost * (1.0 - hit_ratio);
2627 :
2628 : /*
2629 : * Additionally we charge a cpu_tuple_cost to account for cache lookups,
2630 : * which we'll do regardless of whether it was a cache hit or not.
2631 : */
2632 226788 : startup_cost += cpu_tuple_cost;
2633 :
2634 226788 : *rescan_startup_cost = startup_cost;
2635 226788 : *rescan_total_cost = total_cost;
2636 226788 : }
2637 :
2638 : /*
2639 : * cost_agg
2640 : * Determines and returns the cost of performing an Agg plan node,
2641 : * including the cost of its input.
2642 : *
2643 : * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
2644 : * we are using a hashed Agg node just to do grouping).
2645 : *
2646 : * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
2647 : * are for appropriately-sorted input.
2648 : */
2649 : void
2650 60144 : cost_agg(Path *path, PlannerInfo *root,
2651 : AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
2652 : int numGroupCols, double numGroups,
2653 : List *quals,
2654 : Cost input_startup_cost, Cost input_total_cost,
2655 : double input_tuples, double input_width)
2656 : {
2657 : double output_tuples;
2658 : Cost startup_cost;
2659 : Cost total_cost;
2660 : AggClauseCosts dummy_aggcosts;
2661 :
2662 : /* Use all-zero per-aggregate costs if NULL is passed */
2663 60144 : if (aggcosts == NULL)
2664 : {
2665 : Assert(aggstrategy == AGG_HASHED);
2666 56544 : MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
2667 9424 : aggcosts = &dummy_aggcosts;
2668 : }
2669 :
2670 : /*
2671 : * The transCost.per_tuple component of aggcosts should be charged once
2672 : * per input tuple, corresponding to the costs of evaluating the aggregate
2673 : * transfns and their input expressions. The finalCost.per_tuple component
2674 : * is charged once per output tuple, corresponding to the costs of
2675 : * evaluating the finalfns. Startup costs are of course charged but once.
2676 : *
2677 : * If we are grouping, we charge an additional cpu_operator_cost per
2678 : * grouping column per input tuple for grouping comparisons.
2679 : *
2680 : * We will produce a single output tuple if not grouping, and a tuple per
2681 : * group otherwise. We charge cpu_tuple_cost for each output tuple.
2682 : *
2683 : * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
2684 : * same total CPU cost, but AGG_SORTED has lower startup cost. If the
2685 : * input path is already sorted appropriately, AGG_SORTED should be
2686 : * preferred (since it has no risk of memory overflow). This will happen
2687 : * as long as the computed total costs are indeed exactly equal --- but if
2688 : * there's roundoff error we might do the wrong thing. So be sure that
2689 : * the computations below form the same intermediate values in the same
2690 : * order.
2691 : */
2692 60144 : if (aggstrategy == AGG_PLAIN)
2693 : {
2694 32582 : startup_cost = input_total_cost;
2695 32582 : startup_cost += aggcosts->transCost.startup;
2696 32582 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2697 32582 : startup_cost += aggcosts->finalCost.startup;
2698 32582 : startup_cost += aggcosts->finalCost.per_tuple;
2699 : /* we aren't grouping */
2700 32582 : total_cost = startup_cost + cpu_tuple_cost;
2701 32582 : output_tuples = 1;
2702 : }
2703 27562 : else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
2704 : {
2705 : /* Here we are able to deliver output on-the-fly */
2706 10408 : startup_cost = input_startup_cost;
2707 10408 : total_cost = input_total_cost;
2708 10408 : if (aggstrategy == AGG_MIXED && !enable_hashagg)
2709 : {
2710 456 : startup_cost += disable_cost;
2711 456 : total_cost += disable_cost;
2712 : }
2713 : /* calcs phrased this way to match HASHED case, see note above */
2714 10408 : total_cost += aggcosts->transCost.startup;
2715 10408 : total_cost += aggcosts->transCost.per_tuple * input_tuples;
2716 10408 : total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2717 10408 : total_cost += aggcosts->finalCost.startup;
2718 10408 : total_cost += aggcosts->finalCost.per_tuple * numGroups;
2719 10408 : total_cost += cpu_tuple_cost * numGroups;
2720 10408 : output_tuples = numGroups;
2721 : }
2722 : else
2723 : {
2724 : /* must be AGG_HASHED */
2725 17154 : startup_cost = input_total_cost;
2726 17154 : if (!enable_hashagg)
2727 1482 : startup_cost += disable_cost;
2728 17154 : startup_cost += aggcosts->transCost.startup;
2729 17154 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2730 : /* cost of computing hash value */
2731 17154 : startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2732 17154 : startup_cost += aggcosts->finalCost.startup;
2733 :
2734 17154 : total_cost = startup_cost;
2735 17154 : total_cost += aggcosts->finalCost.per_tuple * numGroups;
2736 : /* cost of retrieving from hash table */
2737 17154 : total_cost += cpu_tuple_cost * numGroups;
2738 17154 : output_tuples = numGroups;
2739 : }
2740 :
2741 : /*
2742 : * Add the disk costs of hash aggregation that spills to disk.
2743 : *
2744 : * Groups that go into the hash table stay in memory until finalized, so
2745 : * spilling and reprocessing tuples doesn't incur additional invocations
2746 : * of transCost or finalCost. Furthermore, the computed hash value is
2747 : * stored with the spilled tuples, so we don't incur extra invocations of
2748 : * the hash function.
2749 : *
2750 : * Hash Agg begins returning tuples after the first batch is complete.
2751 : * Accrue writes (spilled tuples) to startup_cost and to total_cost;
2752 : * accrue reads only to total_cost.
2753 : */
2754 60144 : if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
2755 : {
2756 : double pages;
2757 17998 : double pages_written = 0.0;
2758 17998 : double pages_read = 0.0;
2759 : double spill_cost;
2760 : double hashentrysize;
2761 : double nbatches;
2762 : Size mem_limit;
2763 : uint64 ngroups_limit;
2764 : int num_partitions;
2765 : int depth;
2766 :
2767 : /*
2768 : * Estimate number of batches based on the computed limits. If less
2769 : * than or equal to one, all groups are expected to fit in memory;
2770 : * otherwise we expect to spill.
2771 : */
2772 17998 : hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
2773 : input_width,
2774 : aggcosts->transitionSpace);
2775 17998 : hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
2776 : &ngroups_limit, &num_partitions);
2777 :
2778 17998 : nbatches = Max((numGroups * hashentrysize) / mem_limit,
2779 : numGroups / ngroups_limit);
2780 :
2781 17998 : nbatches = Max(ceil(nbatches), 1.0);
2782 17998 : num_partitions = Max(num_partitions, 2);
2783 :
2784 : /*
2785 : * The number of partitions can change at different levels of
2786 : * recursion; but for the purposes of this calculation assume it stays
2787 : * constant.
2788 : */
2789 17998 : depth = ceil(log(nbatches) / log(num_partitions));
2790 :
2791 : /*
2792 : * Estimate number of pages read and written. For each level of
2793 : * recursion, a tuple must be written and then later read.
2794 : */
2795 17998 : pages = relation_byte_size(input_tuples, input_width) / BLCKSZ;
2796 17998 : pages_written = pages_read = pages * depth;
2797 :
2798 : /*
2799 : * HashAgg has somewhat worse IO behavior than Sort on typical
2800 : * hardware/OS combinations. Account for this with a generic penalty.
2801 : */
2802 17998 : pages_read *= 2.0;
2803 17998 : pages_written *= 2.0;
2804 :
2805 17998 : startup_cost += pages_written * random_page_cost;
2806 17998 : total_cost += pages_written * random_page_cost;
2807 17998 : total_cost += pages_read * seq_page_cost;
2808 :
2809 : /* account for CPU cost of spilling a tuple and reading it back */
2810 17998 : spill_cost = depth * input_tuples * 2.0 * cpu_tuple_cost;
2811 17998 : startup_cost += spill_cost;
2812 17998 : total_cost += spill_cost;
2813 : }
2814 :
2815 : /*
2816 : * If there are quals (HAVING quals), account for their cost and
2817 : * selectivity.
2818 : */
2819 60144 : if (quals)
2820 : {
2821 : QualCost qual_cost;
2822 :
2823 4192 : cost_qual_eval(&qual_cost, quals, root);
2824 4192 : startup_cost += qual_cost.startup;
2825 4192 : total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
2826 :
2827 4192 : output_tuples = clamp_row_est(output_tuples *
2828 4192 : clauselist_selectivity(root,
2829 : quals,
2830 : 0,
2831 : JOIN_INNER,
2832 : NULL));
2833 : }
2834 :
2835 60144 : path->rows = output_tuples;
2836 60144 : path->startup_cost = startup_cost;
2837 60144 : path->total_cost = total_cost;
2838 60144 : }
2839 :
2840 : /*
2841 : * get_windowclause_startup_tuples
2842 : * Estimate how many tuples we'll need to fetch from a WindowAgg's
2843 : * subnode before we can output the first WindowAgg tuple.
2844 : *
2845 : * How many tuples need to be read depends on the WindowClause. For example,
2846 : * a WindowClause with no PARTITION BY and no ORDER BY requires that all
2847 : * subnode tuples are read and aggregated before the WindowAgg can output
2848 : * anything. If there's a PARTITION BY, then we only need to look at tuples
2849 : * in the first partition. Here we attempt to estimate just how many
2850 : * 'input_tuples' the WindowAgg will need to read for the given WindowClause
2851 : * before the first tuple can be output.
2852 : */
2853 : static double
2854 2630 : get_windowclause_startup_tuples(PlannerInfo *root, WindowClause *wc,
2855 : double input_tuples)
2856 : {
2857 2630 : int frameOptions = wc->frameOptions;
2858 : double partition_tuples;
2859 : double return_tuples;
2860 : double peer_tuples;
2861 :
2862 : /*
2863 : * First, figure out how many partitions there are likely to be and set
2864 : * partition_tuples according to that estimate.
2865 : */
2866 2630 : if (wc->partitionClause != NIL)
2867 : {
2868 : double num_partitions;
2869 644 : List *partexprs = get_sortgrouplist_exprs(wc->partitionClause,
2870 644 : root->parse->targetList);
2871 :
2872 644 : num_partitions = estimate_num_groups(root, partexprs, input_tuples,
2873 : NULL, NULL);
2874 644 : list_free(partexprs);
2875 :
2876 644 : partition_tuples = input_tuples / num_partitions;
2877 : }
2878 : else
2879 : {
2880 : /* all tuples belong to the same partition */
2881 1986 : partition_tuples = input_tuples;
2882 : }
2883 :
2884 : /* estimate the number of tuples in each peer group */
2885 2630 : if (wc->orderClause != NIL)
2886 : {
2887 : double num_groups;
2888 : List *orderexprs;
2889 :
2890 2210 : orderexprs = get_sortgrouplist_exprs(wc->orderClause,
2891 2210 : root->parse->targetList);
2892 :
2893 : /* estimate out how many peer groups there are in the partition */
2894 2210 : num_groups = estimate_num_groups(root, orderexprs,
2895 : partition_tuples, NULL,
2896 : NULL);
2897 2210 : list_free(orderexprs);
2898 2210 : peer_tuples = partition_tuples / num_groups;
2899 : }
2900 : else
2901 : {
2902 : /* no ORDER BY so only 1 tuple belongs in each peer group */
2903 420 : peer_tuples = 1.0;
2904 : }
2905 :
2906 2630 : if (frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING)
2907 : {
2908 : /* include all partition rows */
2909 346 : return_tuples = partition_tuples;
2910 : }
2911 2284 : else if (frameOptions & FRAMEOPTION_END_CURRENT_ROW)
2912 : {
2913 1294 : if (frameOptions & FRAMEOPTION_ROWS)
2914 : {
2915 : /* just count the current row */
2916 576 : return_tuples = 1.0;
2917 : }
2918 718 : else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
2919 : {
2920 : /*
2921 : * When in RANGE/GROUPS mode, it's more complex. If there's no
2922 : * ORDER BY, then all rows in the partition are peers, otherwise
2923 : * we'll need to read the first group of peers.
2924 : */
2925 718 : if (wc->orderClause == NIL)
2926 258 : return_tuples = partition_tuples;
2927 : else
2928 460 : return_tuples = peer_tuples;
2929 : }
2930 : else
2931 : {
2932 : /*
2933 : * Something new we don't support yet? This needs attention.
2934 : * We'll just return 1.0 in the meantime.
2935 : */
2936 : Assert(false);
2937 0 : return_tuples = 1.0;
2938 : }
2939 : }
2940 990 : else if (frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING)
2941 : {
2942 : /*
2943 : * BETWEEN ... AND N PRECEDING will only need to read the WindowAgg's
2944 : * subnode after N ROWS/RANGES/GROUPS. N can be 0, but not negative,
2945 : * so we'll just assume only the current row needs to be read to fetch
2946 : * the first WindowAgg row.
2947 : */
2948 108 : return_tuples = 1.0;
2949 : }
2950 882 : else if (frameOptions & FRAMEOPTION_END_OFFSET_FOLLOWING)
2951 : {
2952 882 : Const *endOffset = (Const *) wc->endOffset;
2953 : double end_offset_value;
2954 :
2955 : /* try and figure out the value specified in the endOffset. */
2956 882 : if (IsA(endOffset, Const))
2957 : {
2958 870 : if (endOffset->constisnull)
2959 : {
2960 : /*
2961 : * NULLs are not allowed, but currently, there's no code to
2962 : * error out if there's a NULL Const. We'll only discover
2963 : * this during execution. For now, just pretend everything is
2964 : * fine and assume that just the first row/range/group will be
2965 : * needed.
2966 : */
2967 0 : end_offset_value = 1.0;
2968 : }
2969 : else
2970 : {
2971 870 : switch (endOffset->consttype)
2972 : {
2973 24 : case INT2OID:
2974 24 : end_offset_value =
2975 24 : (double) DatumGetInt16(endOffset->constvalue);
2976 24 : break;
2977 132 : case INT4OID:
2978 132 : end_offset_value =
2979 132 : (double) DatumGetInt32(endOffset->constvalue);
2980 132 : break;
2981 372 : case INT8OID:
2982 372 : end_offset_value =
2983 372 : (double) DatumGetInt64(endOffset->constvalue);
2984 372 : break;
2985 342 : default:
2986 342 : end_offset_value =
2987 342 : partition_tuples / peer_tuples *
2988 : DEFAULT_INEQ_SEL;
2989 342 : break;
2990 : }
2991 : }
2992 : }
2993 : else
2994 : {
2995 : /*
2996 : * When the end bound is not a Const, we'll just need to guess. We
2997 : * just make use of DEFAULT_INEQ_SEL.
2998 : */
2999 12 : end_offset_value =
3000 12 : partition_tuples / peer_tuples * DEFAULT_INEQ_SEL;
3001 : }
3002 :
3003 882 : if (frameOptions & FRAMEOPTION_ROWS)
3004 : {
3005 : /* include the N FOLLOWING and the current row */
3006 222 : return_tuples = end_offset_value + 1.0;
3007 : }
3008 660 : else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
3009 : {
3010 : /* include N FOLLOWING ranges/group and the initial range/group */
3011 660 : return_tuples = peer_tuples * (end_offset_value + 1.0);
3012 : }
3013 : else
3014 : {
3015 : /*
3016 : * Something new we don't support yet? This needs attention.
3017 : * We'll just return 1.0 in the meantime.
3018 : */
3019 : Assert(false);
3020 0 : return_tuples = 1.0;
3021 : }
3022 : }
3023 : else
3024 : {
3025 : /*
3026 : * Something new we don't support yet? This needs attention. We'll
3027 : * just return 1.0 in the meantime.
3028 : */
3029 : Assert(false);
3030 0 : return_tuples = 1.0;
3031 : }
3032 :
3033 2630 : if (wc->partitionClause != NIL || wc->orderClause != NIL)
3034 : {
3035 : /*
3036 : * Cap the return value to the estimated partition tuples and account
3037 : * for the extra tuple WindowAgg will need to read to confirm the next
3038 : * tuple does not belong to the same partition or peer group.
3039 : */
3040 2372 : return_tuples = Min(return_tuples + 1.0, partition_tuples);
3041 : }
3042 : else
3043 : {
3044 : /*
3045 : * Cap the return value so it's never higher than the expected tuples
3046 : * in the partition.
3047 : */
3048 258 : return_tuples = Min(return_tuples, partition_tuples);
3049 : }
3050 :
3051 : /*
3052 : * We needn't worry about any EXCLUDE options as those only exclude rows
3053 : * from being aggregated, not from being read from the WindowAgg's
3054 : * subnode.
3055 : */
3056 :
3057 2630 : return clamp_row_est(return_tuples);
3058 : }
3059 :
3060 : /*
3061 : * cost_windowagg
3062 : * Determines and returns the cost of performing a WindowAgg plan node,
3063 : * including the cost of its input.
3064 : *
3065 : * Input is assumed already properly sorted.
3066 : */
3067 : void
3068 2630 : cost_windowagg(Path *path, PlannerInfo *root,
3069 : List *windowFuncs, WindowClause *winclause,
3070 : Cost input_startup_cost, Cost input_total_cost,
3071 : double input_tuples)
3072 : {
3073 : Cost startup_cost;
3074 : Cost total_cost;
3075 : double startup_tuples;
3076 : int numPartCols;
3077 : int numOrderCols;
3078 : ListCell *lc;
3079 :
3080 2630 : numPartCols = list_length(winclause->partitionClause);
3081 2630 : numOrderCols = list_length(winclause->orderClause);
3082 :
3083 2630 : startup_cost = input_startup_cost;
3084 2630 : total_cost = input_total_cost;
3085 :
3086 : /*
3087 : * Window functions are assumed to cost their stated execution cost, plus
3088 : * the cost of evaluating their input expressions, per tuple. Since they
3089 : * may in fact evaluate their inputs at multiple rows during each cycle,
3090 : * this could be a drastic underestimate; but without a way to know how
3091 : * many rows the window function will fetch, it's hard to do better. In
3092 : * any case, it's a good estimate for all the built-in window functions,
3093 : * so we'll just do this for now.
3094 : */
3095 5962 : foreach(lc, windowFuncs)
3096 : {
3097 3332 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc);
3098 : Cost wfunccost;
3099 : QualCost argcosts;
3100 :
3101 3332 : argcosts.startup = argcosts.per_tuple = 0;
3102 3332 : add_function_cost(root, wfunc->winfnoid, (Node *) wfunc,
3103 : &argcosts);
3104 3332 : startup_cost += argcosts.startup;
3105 3332 : wfunccost = argcosts.per_tuple;
3106 :
3107 : /* also add the input expressions' cost to per-input-row costs */
3108 3332 : cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
3109 3332 : startup_cost += argcosts.startup;
3110 3332 : wfunccost += argcosts.per_tuple;
3111 :
3112 : /*
3113 : * Add the filter's cost to per-input-row costs. XXX We should reduce
3114 : * input expression costs according to filter selectivity.
3115 : */
3116 3332 : cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
3117 3332 : startup_cost += argcosts.startup;
3118 3332 : wfunccost += argcosts.per_tuple;
3119 :
3120 3332 : total_cost += wfunccost * input_tuples;
3121 : }
3122 :
3123 : /*
3124 : * We also charge cpu_operator_cost per grouping column per tuple for
3125 : * grouping comparisons, plus cpu_tuple_cost per tuple for general
3126 : * overhead.
3127 : *
3128 : * XXX this neglects costs of spooling the data to disk when it overflows
3129 : * work_mem. Sooner or later that should get accounted for.
3130 : */
3131 2630 : total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
3132 2630 : total_cost += cpu_tuple_cost * input_tuples;
3133 :
3134 2630 : path->rows = input_tuples;
3135 2630 : path->startup_cost = startup_cost;
3136 2630 : path->total_cost = total_cost;
3137 :
3138 : /*
3139 : * Also, take into account how many tuples we need to read from the
3140 : * subnode in order to produce the first tuple from the WindowAgg. To do
3141 : * this we proportion the run cost (total cost not including startup cost)
3142 : * over the estimated startup tuples. We already included the startup
3143 : * cost of the subnode, so we only need to do this when the estimated
3144 : * startup tuples is above 1.0.
3145 : */
3146 2630 : startup_tuples = get_windowclause_startup_tuples(root, winclause,
3147 : input_tuples);
3148 :
3149 2630 : if (startup_tuples > 1.0)
3150 2358 : path->startup_cost += (total_cost - startup_cost) / input_tuples *
3151 2358 : (startup_tuples - 1.0);
3152 2630 : }
3153 :
3154 : /*
3155 : * cost_group
3156 : * Determines and returns the cost of performing a Group plan node,
3157 : * including the cost of its input.
3158 : *
3159 : * Note: caller must ensure that input costs are for appropriately-sorted
3160 : * input.
3161 : */
3162 : void
3163 1564 : cost_group(Path *path, PlannerInfo *root,
3164 : int numGroupCols, double numGroups,
3165 : List *quals,
3166 : Cost input_startup_cost, Cost input_total_cost,
3167 : double input_tuples)
3168 : {
3169 : double output_tuples;
3170 : Cost startup_cost;
3171 : Cost total_cost;
3172 :
3173 1564 : output_tuples = numGroups;
3174 1564 : startup_cost = input_startup_cost;
3175 1564 : total_cost = input_total_cost;
3176 :
3177 : /*
3178 : * Charge one cpu_operator_cost per comparison per input tuple. We assume
3179 : * all columns get compared at most of the tuples.
3180 : */
3181 1564 : total_cost += cpu_operator_cost * input_tuples * numGroupCols;
3182 :
3183 : /*
3184 : * If there are quals (HAVING quals), account for their cost and
3185 : * selectivity.
3186 : */
3187 1564 : if (quals)
3188 : {
3189 : QualCost qual_cost;
3190 :
3191 0 : cost_qual_eval(&qual_cost, quals, root);
3192 0 : startup_cost += qual_cost.startup;
3193 0 : total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
3194 :
3195 0 : output_tuples = clamp_row_est(output_tuples *
3196 0 : clauselist_selectivity(root,
3197 : quals,
3198 : 0,
3199 : JOIN_INNER,
3200 : NULL));
3201 : }
3202 :
3203 1564 : path->rows = output_tuples;
3204 1564 : path->startup_cost = startup_cost;
3205 1564 : path->total_cost = total_cost;
3206 1564 : }
3207 :
3208 : /*
3209 : * initial_cost_nestloop
3210 : * Preliminary estimate of the cost of a nestloop join path.
3211 : *
3212 : * This must quickly produce lower-bound estimates of the path's startup and
3213 : * total costs. If we are unable to eliminate the proposed path from
3214 : * consideration using the lower bounds, final_cost_nestloop will be called
3215 : * to obtain the final estimates.
3216 : *
3217 : * The exact division of labor between this function and final_cost_nestloop
3218 : * is private to them, and represents a tradeoff between speed of the initial
3219 : * estimate and getting a tight lower bound. We choose to not examine the
3220 : * join quals here, since that's by far the most expensive part of the
3221 : * calculations. The end result is that CPU-cost considerations must be
3222 : * left for the second phase; and for SEMI/ANTI joins, we must also postpone
3223 : * incorporation of the inner path's run cost.
3224 : *
3225 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3226 : * other data to be used by final_cost_nestloop
3227 : * 'jointype' is the type of join to be performed
3228 : * 'outer_path' is the outer input to the join
3229 : * 'inner_path' is the inner input to the join
3230 : * 'extra' contains miscellaneous information about the join
3231 : */
3232 : void
3233 2266580 : initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
3234 : JoinType jointype,
3235 : Path *outer_path, Path *inner_path,
3236 : JoinPathExtraData *extra)
3237 : {
3238 2266580 : Cost startup_cost = 0;
3239 2266580 : Cost run_cost = 0;
3240 2266580 : double outer_path_rows = outer_path->rows;
3241 : Cost inner_rescan_start_cost;
3242 : Cost inner_rescan_total_cost;
3243 : Cost inner_run_cost;
3244 : Cost inner_rescan_run_cost;
3245 :
3246 : /* estimate costs to rescan the inner relation */
3247 2266580 : cost_rescan(root, inner_path,
3248 : &inner_rescan_start_cost,
3249 : &inner_rescan_total_cost);
3250 :
3251 : /* cost of source data */
3252 :
3253 : /*
3254 : * NOTE: clearly, we must pay both outer and inner paths' startup_cost
3255 : * before we can start returning tuples, so the join's startup cost is
3256 : * their sum. We'll also pay the inner path's rescan startup cost
3257 : * multiple times.
3258 : */
3259 2266580 : startup_cost += outer_path->startup_cost + inner_path->startup_cost;
3260 2266580 : run_cost += outer_path->total_cost - outer_path->startup_cost;
3261 2266580 : if (outer_path_rows > 1)
3262 1535442 : run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
3263 :
3264 2266580 : inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
3265 2266580 : inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
3266 :
3267 2266580 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI ||
3268 2226632 : extra->inner_unique)
3269 : {
3270 : /*
3271 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3272 : * executor will stop after the first match.
3273 : *
3274 : * Getting decent estimates requires inspection of the join quals,
3275 : * which we choose to postpone to final_cost_nestloop.
3276 : */
3277 :
3278 : /* Save private data for final_cost_nestloop */
3279 1046528 : workspace->inner_run_cost = inner_run_cost;
3280 1046528 : workspace->inner_rescan_run_cost = inner_rescan_run_cost;
3281 : }
3282 : else
3283 : {
3284 : /* Normal case; we'll scan whole input rel for each outer row */
3285 1220052 : run_cost += inner_run_cost;
3286 1220052 : if (outer_path_rows > 1)
3287 835536 : run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
3288 : }
3289 :
3290 : /* CPU costs left for later */
3291 :
3292 : /* Public result fields */
3293 2266580 : workspace->startup_cost = startup_cost;
3294 2266580 : workspace->total_cost = startup_cost + run_cost;
3295 : /* Save private data for final_cost_nestloop */
3296 2266580 : workspace->run_cost = run_cost;
3297 2266580 : }
3298 :
3299 : /*
3300 : * final_cost_nestloop
3301 : * Final estimate of the cost and result size of a nestloop join path.
3302 : *
3303 : * 'path' is already filled in except for the rows and cost fields
3304 : * 'workspace' is the result from initial_cost_nestloop
3305 : * 'extra' contains miscellaneous information about the join
3306 : */
3307 : void
3308 1134770 : final_cost_nestloop(PlannerInfo *root, NestPath *path,
3309 : JoinCostWorkspace *workspace,
3310 : JoinPathExtraData *extra)
3311 : {
3312 1134770 : Path *outer_path = path->jpath.outerjoinpath;
3313 1134770 : Path *inner_path = path->jpath.innerjoinpath;
3314 1134770 : double outer_path_rows = outer_path->rows;
3315 1134770 : double inner_path_rows = inner_path->rows;
3316 1134770 : Cost startup_cost = workspace->startup_cost;
3317 1134770 : Cost run_cost = workspace->run_cost;
3318 : Cost cpu_per_tuple;
3319 : QualCost restrict_qual_cost;
3320 : double ntuples;
3321 :
3322 : /* Protect some assumptions below that rowcounts aren't zero */
3323 1134770 : if (outer_path_rows <= 0)
3324 0 : outer_path_rows = 1;
3325 1134770 : if (inner_path_rows <= 0)
3326 624 : inner_path_rows = 1;
3327 : /* Mark the path with the correct row estimate */
3328 1134770 : if (path->jpath.path.param_info)
3329 26292 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3330 : else
3331 1108478 : path->jpath.path.rows = path->jpath.path.parent->rows;
3332 :
3333 : /* For partial paths, scale row estimate. */
3334 1134770 : if (path->jpath.path.parallel_workers > 0)
3335 : {
3336 7826 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3337 :
3338 7826 : path->jpath.path.rows =
3339 7826 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3340 : }
3341 :
3342 : /*
3343 : * We could include disable_cost in the preliminary estimate, but that
3344 : * would amount to optimizing for the case where the join method is
3345 : * disabled, which doesn't seem like the way to bet.
3346 : */
3347 1134770 : if (!enable_nestloop)
3348 3252 : startup_cost += disable_cost;
3349 :
3350 : /* cost of inner-relation source data (we already dealt with outer rel) */
3351 :
3352 1134770 : if (path->jpath.jointype == JOIN_SEMI || path->jpath.jointype == JOIN_ANTI ||
3353 1108224 : extra->inner_unique)
3354 721908 : {
3355 : /*
3356 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3357 : * executor will stop after the first match.
3358 : */
3359 721908 : Cost inner_run_cost = workspace->inner_run_cost;
3360 721908 : Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
3361 : double outer_matched_rows;
3362 : double outer_unmatched_rows;
3363 : Selectivity inner_scan_frac;
3364 :
3365 : /*
3366 : * For an outer-rel row that has at least one match, we can expect the
3367 : * inner scan to stop after a fraction 1/(match_count+1) of the inner
3368 : * rows, if the matches are evenly distributed. Since they probably
3369 : * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
3370 : * that fraction. (If we used a larger fuzz factor, we'd have to
3371 : * clamp inner_scan_frac to at most 1.0; but since match_count is at
3372 : * least 1, no such clamp is needed now.)
3373 : */
3374 721908 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
3375 721908 : outer_unmatched_rows = outer_path_rows - outer_matched_rows;
3376 721908 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
3377 :
3378 : /*
3379 : * Compute number of tuples processed (not number emitted!). First,
3380 : * account for successfully-matched outer rows.
3381 : */
3382 721908 : ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
3383 :
3384 : /*
3385 : * Now we need to estimate the actual costs of scanning the inner
3386 : * relation, which may be quite a bit less than N times inner_run_cost
3387 : * due to early scan stops. We consider two cases. If the inner path
3388 : * is an indexscan using all the joinquals as indexquals, then an
3389 : * unmatched outer row results in an indexscan returning no rows,
3390 : * which is probably quite cheap. Otherwise, the executor will have
3391 : * to scan the whole inner rel for an unmatched row; not so cheap.
3392 : */
3393 721908 : if (has_indexed_join_quals(path))
3394 : {
3395 : /*
3396 : * Successfully-matched outer rows will only require scanning
3397 : * inner_scan_frac of the inner relation. In this case, we don't
3398 : * need to charge the full inner_run_cost even when that's more
3399 : * than inner_rescan_run_cost, because we can assume that none of
3400 : * the inner scans ever scan the whole inner relation. So it's
3401 : * okay to assume that all the inner scan executions can be
3402 : * fractions of the full cost, even if materialization is reducing
3403 : * the rescan cost. At this writing, it's impossible to get here
3404 : * for a materialized inner scan, so inner_run_cost and
3405 : * inner_rescan_run_cost will be the same anyway; but just in
3406 : * case, use inner_run_cost for the first matched tuple and
3407 : * inner_rescan_run_cost for additional ones.
3408 : */
3409 131494 : run_cost += inner_run_cost * inner_scan_frac;
3410 131494 : if (outer_matched_rows > 1)
3411 14126 : run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
3412 :
3413 : /*
3414 : * Add the cost of inner-scan executions for unmatched outer rows.
3415 : * We estimate this as the same cost as returning the first tuple
3416 : * of a nonempty scan. We consider that these are all rescans,
3417 : * since we used inner_run_cost once already.
3418 : */
3419 131494 : run_cost += outer_unmatched_rows *
3420 131494 : inner_rescan_run_cost / inner_path_rows;
3421 :
3422 : /*
3423 : * We won't be evaluating any quals at all for unmatched rows, so
3424 : * don't add them to ntuples.
3425 : */
3426 : }
3427 : else
3428 : {
3429 : /*
3430 : * Here, a complicating factor is that rescans may be cheaper than
3431 : * first scans. If we never scan all the way to the end of the
3432 : * inner rel, it might be (depending on the plan type) that we'd
3433 : * never pay the whole inner first-scan run cost. However it is
3434 : * difficult to estimate whether that will happen (and it could
3435 : * not happen if there are any unmatched outer rows!), so be
3436 : * conservative and always charge the whole first-scan cost once.
3437 : * We consider this charge to correspond to the first unmatched
3438 : * outer row, unless there isn't one in our estimate, in which
3439 : * case blame it on the first matched row.
3440 : */
3441 :
3442 : /* First, count all unmatched join tuples as being processed */
3443 590414 : ntuples += outer_unmatched_rows * inner_path_rows;
3444 :
3445 : /* Now add the forced full scan, and decrement appropriate count */
3446 590414 : run_cost += inner_run_cost;
3447 590414 : if (outer_unmatched_rows >= 1)
3448 575418 : outer_unmatched_rows -= 1;
3449 : else
3450 14996 : outer_matched_rows -= 1;
3451 :
3452 : /* Add inner run cost for additional outer tuples having matches */
3453 590414 : if (outer_matched_rows > 0)
3454 187494 : run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
3455 :
3456 : /* Add inner run cost for additional unmatched outer tuples */
3457 590414 : if (outer_unmatched_rows > 0)
3458 380900 : run_cost += outer_unmatched_rows * inner_rescan_run_cost;
3459 : }
3460 : }
3461 : else
3462 : {
3463 : /* Normal-case source costs were included in preliminary estimate */
3464 :
3465 : /* Compute number of tuples processed (not number emitted!) */
3466 412862 : ntuples = outer_path_rows * inner_path_rows;
3467 : }
3468 :
3469 : /* CPU costs */
3470 1134770 : cost_qual_eval(&restrict_qual_cost, path->jpath.joinrestrictinfo, root);
3471 1134770 : startup_cost += restrict_qual_cost.startup;
3472 1134770 : cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
3473 1134770 : run_cost += cpu_per_tuple * ntuples;
3474 :
3475 : /* tlist eval costs are paid per output row, not per tuple scanned */
3476 1134770 : startup_cost += path->jpath.path.pathtarget->cost.startup;
3477 1134770 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3478 :
3479 1134770 : path->jpath.path.startup_cost = startup_cost;
3480 1134770 : path->jpath.path.total_cost = startup_cost + run_cost;
3481 1134770 : }
3482 :
3483 : /*
3484 : * initial_cost_mergejoin
3485 : * Preliminary estimate of the cost of a mergejoin path.
3486 : *
3487 : * This must quickly produce lower-bound estimates of the path's startup and
3488 : * total costs. If we are unable to eliminate the proposed path from
3489 : * consideration using the lower bounds, final_cost_mergejoin will be called
3490 : * to obtain the final estimates.
3491 : *
3492 : * The exact division of labor between this function and final_cost_mergejoin
3493 : * is private to them, and represents a tradeoff between speed of the initial
3494 : * estimate and getting a tight lower bound. We choose to not examine the
3495 : * join quals here, except for obtaining the scan selectivity estimate which
3496 : * is really essential (but fortunately, use of caching keeps the cost of
3497 : * getting that down to something reasonable).
3498 : * We also assume that cost_sort is cheap enough to use here.
3499 : *
3500 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3501 : * other data to be used by final_cost_mergejoin
3502 : * 'jointype' is the type of join to be performed
3503 : * 'mergeclauses' is the list of joinclauses to be used as merge clauses
3504 : * 'outer_path' is the outer input to the join
3505 : * 'inner_path' is the inner input to the join
3506 : * 'outersortkeys' is the list of sort keys for the outer path
3507 : * 'innersortkeys' is the list of sort keys for the inner path
3508 : * 'extra' contains miscellaneous information about the join
3509 : *
3510 : * Note: outersortkeys and innersortkeys should be NIL if no explicit
3511 : * sort is needed because the respective source path is already ordered.
3512 : */
3513 : void
3514 1009762 : initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
3515 : JoinType jointype,
3516 : List *mergeclauses,
3517 : Path *outer_path, Path *inner_path,
3518 : List *outersortkeys, List *innersortkeys,
3519 : JoinPathExtraData *extra)
3520 : {
3521 1009762 : Cost startup_cost = 0;
3522 1009762 : Cost run_cost = 0;
3523 1009762 : double outer_path_rows = outer_path->rows;
3524 1009762 : double inner_path_rows = inner_path->rows;
3525 : Cost inner_run_cost;
3526 : double outer_rows,
3527 : inner_rows,
3528 : outer_skip_rows,
3529 : inner_skip_rows;
3530 : Selectivity outerstartsel,
3531 : outerendsel,
3532 : innerstartsel,
3533 : innerendsel;
3534 : Path sort_path; /* dummy for result of cost_sort */
3535 :
3536 : /* Protect some assumptions below that rowcounts aren't zero */
3537 1009762 : if (outer_path_rows <= 0)
3538 96 : outer_path_rows = 1;
3539 1009762 : if (inner_path_rows <= 0)
3540 126 : inner_path_rows = 1;
3541 :
3542 : /*
3543 : * A merge join will stop as soon as it exhausts either input stream
3544 : * (unless it's an outer join, in which case the outer side has to be
3545 : * scanned all the way anyway). Estimate fraction of the left and right
3546 : * inputs that will actually need to be scanned. Likewise, we can
3547 : * estimate the number of rows that will be skipped before the first join
3548 : * pair is found, which should be factored into startup cost. We use only
3549 : * the first (most significant) merge clause for this purpose. Since
3550 : * mergejoinscansel() is a fairly expensive computation, we cache the
3551 : * results in the merge clause RestrictInfo.
3552 : */
3553 1009762 : if (mergeclauses && jointype != JOIN_FULL)
3554 1003658 : {
3555 1003658 : RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
3556 : List *opathkeys;
3557 : List *ipathkeys;
3558 : PathKey *opathkey;
3559 : PathKey *ipathkey;
3560 : MergeScanSelCache *cache;
3561 :
3562 : /* Get the input pathkeys to determine the sort-order details */
3563 1003658 : opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
3564 1003658 : ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
3565 : Assert(opathkeys);
3566 : Assert(ipathkeys);
3567 1003658 : opathkey = (PathKey *) linitial(opathkeys);
3568 1003658 : ipathkey = (PathKey *) linitial(ipathkeys);
3569 : /* debugging check */
3570 1003658 : if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
3571 1003658 : opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
3572 1003658 : opathkey->pk_strategy != ipathkey->pk_strategy ||
3573 1003658 : opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
3574 0 : elog(ERROR, "left and right pathkeys do not match in mergejoin");
3575 :
3576 : /* Get the selectivity with caching */
3577 1003658 : cache = cached_scansel(root, firstclause, opathkey);
3578 :
3579 1003658 : if (bms_is_subset(firstclause->left_relids,
3580 1003658 : outer_path->parent->relids))
3581 : {
3582 : /* left side of clause is outer */
3583 524460 : outerstartsel = cache->leftstartsel;
3584 524460 : outerendsel = cache->leftendsel;
3585 524460 : innerstartsel = cache->rightstartsel;
3586 524460 : innerendsel = cache->rightendsel;
3587 : }
3588 : else
3589 : {
3590 : /* left side of clause is inner */
3591 479198 : outerstartsel = cache->rightstartsel;
3592 479198 : outerendsel = cache->rightendsel;
3593 479198 : innerstartsel = cache->leftstartsel;
3594 479198 : innerendsel = cache->leftendsel;
3595 : }
3596 1003658 : if (jointype == JOIN_LEFT ||
3597 : jointype == JOIN_ANTI)
3598 : {
3599 200518 : outerstartsel = 0.0;
3600 200518 : outerendsel = 1.0;
3601 : }
3602 803140 : else if (jointype == JOIN_RIGHT ||
3603 : jointype == JOIN_RIGHT_ANTI)
3604 : {
3605 196152 : innerstartsel = 0.0;
3606 196152 : innerendsel = 1.0;
3607 : }
3608 : }
3609 : else
3610 : {
3611 : /* cope with clauseless or full mergejoin */
3612 6104 : outerstartsel = innerstartsel = 0.0;
3613 6104 : outerendsel = innerendsel = 1.0;
3614 : }
3615 :
3616 : /*
3617 : * Convert selectivities to row counts. We force outer_rows and
3618 : * inner_rows to be at least 1, but the skip_rows estimates can be zero.
3619 : */
3620 1009762 : outer_skip_rows = rint(outer_path_rows * outerstartsel);
3621 1009762 : inner_skip_rows = rint(inner_path_rows * innerstartsel);
3622 1009762 : outer_rows = clamp_row_est(outer_path_rows * outerendsel);
3623 1009762 : inner_rows = clamp_row_est(inner_path_rows * innerendsel);
3624 :
3625 : Assert(outer_skip_rows <= outer_rows);
3626 : Assert(inner_skip_rows <= inner_rows);
3627 :
3628 : /*
3629 : * Readjust scan selectivities to account for above rounding. This is
3630 : * normally an insignificant effect, but when there are only a few rows in
3631 : * the inputs, failing to do this makes for a large percentage error.
3632 : */
3633 1009762 : outerstartsel = outer_skip_rows / outer_path_rows;
3634 1009762 : innerstartsel = inner_skip_rows / inner_path_rows;
3635 1009762 : outerendsel = outer_rows / outer_path_rows;
3636 1009762 : innerendsel = inner_rows / inner_path_rows;
3637 :
3638 : Assert(outerstartsel <= outerendsel);
3639 : Assert(innerstartsel <= innerendsel);
3640 :
3641 : /* cost of source data */
3642 :
3643 1009762 : if (outersortkeys) /* do we need to sort outer? */
3644 : {
3645 481008 : cost_sort(&sort_path,
3646 : root,
3647 : outersortkeys,
3648 : outer_path->total_cost,
3649 : outer_path_rows,
3650 481008 : outer_path->pathtarget->width,
3651 : 0.0,
3652 : work_mem,
3653 : -1.0);
3654 481008 : startup_cost += sort_path.startup_cost;
3655 481008 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3656 481008 : * outerstartsel;
3657 481008 : run_cost += (sort_path.total_cost - sort_path.startup_cost)
3658 481008 : * (outerendsel - outerstartsel);
3659 : }
3660 : else
3661 : {
3662 528754 : startup_cost += outer_path->startup_cost;
3663 528754 : startup_cost += (outer_path->total_cost - outer_path->startup_cost)
3664 528754 : * outerstartsel;
3665 528754 : run_cost += (outer_path->total_cost - outer_path->startup_cost)
3666 528754 : * (outerendsel - outerstartsel);
3667 : }
3668 :
3669 1009762 : if (innersortkeys) /* do we need to sort inner? */
3670 : {
3671 792314 : cost_sort(&sort_path,
3672 : root,
3673 : innersortkeys,
3674 : inner_path->total_cost,
3675 : inner_path_rows,
3676 792314 : inner_path->pathtarget->width,
3677 : 0.0,
3678 : work_mem,
3679 : -1.0);
3680 792314 : startup_cost += sort_path.startup_cost;
3681 792314 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3682 792314 : * innerstartsel;
3683 792314 : inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
3684 792314 : * (innerendsel - innerstartsel);
3685 : }
3686 : else
3687 : {
3688 217448 : startup_cost += inner_path->startup_cost;
3689 217448 : startup_cost += (inner_path->total_cost - inner_path->startup_cost)
3690 217448 : * innerstartsel;
3691 217448 : inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
3692 217448 : * (innerendsel - innerstartsel);
3693 : }
3694 :
3695 : /*
3696 : * We can't yet determine whether rescanning occurs, or whether
3697 : * materialization of the inner input should be done. The minimum
3698 : * possible inner input cost, regardless of rescan and materialization
3699 : * considerations, is inner_run_cost. We include that in
3700 : * workspace->total_cost, but not yet in run_cost.
3701 : */
3702 :
3703 : /* CPU costs left for later */
3704 :
3705 : /* Public result fields */
3706 1009762 : workspace->startup_cost = startup_cost;
3707 1009762 : workspace->total_cost = startup_cost + run_cost + inner_run_cost;
3708 : /* Save private data for final_cost_mergejoin */
3709 1009762 : workspace->run_cost = run_cost;
3710 1009762 : workspace->inner_run_cost = inner_run_cost;
3711 1009762 : workspace->outer_rows = outer_rows;
3712 1009762 : workspace->inner_rows = inner_rows;
3713 1009762 : workspace->outer_skip_rows = outer_skip_rows;
3714 1009762 : workspace->inner_skip_rows = inner_skip_rows;
3715 1009762 : }
3716 :
3717 : /*
3718 : * final_cost_mergejoin
3719 : * Final estimate of the cost and result size of a mergejoin path.
3720 : *
3721 : * Unlike other costsize functions, this routine makes two actual decisions:
3722 : * whether the executor will need to do mark/restore, and whether we should
3723 : * materialize the inner path. It would be logically cleaner to build
3724 : * separate paths testing these alternatives, but that would require repeating
3725 : * most of the cost calculations, which are not all that cheap. Since the
3726 : * choice will not affect output pathkeys or startup cost, only total cost,
3727 : * there is no possibility of wanting to keep more than one path. So it seems
3728 : * best to make the decisions here and record them in the path's
3729 : * skip_mark_restore and materialize_inner fields.
3730 : *
3731 : * Mark/restore overhead is usually required, but can be skipped if we know
3732 : * that the executor need find only one match per outer tuple, and that the
3733 : * mergeclauses are sufficient to identify a match.
3734 : *
3735 : * We materialize the inner path if we need mark/restore and either the inner
3736 : * path can't support mark/restore, or it's cheaper to use an interposed
3737 : * Material node to handle mark/restore.
3738 : *
3739 : * 'path' is already filled in except for the rows and cost fields and
3740 : * skip_mark_restore and materialize_inner
3741 : * 'workspace' is the result from initial_cost_mergejoin
3742 : * 'extra' contains miscellaneous information about the join
3743 : */
3744 : void
3745 252608 : final_cost_mergejoin(PlannerInfo *root, MergePath *path,
3746 : JoinCostWorkspace *workspace,
3747 : JoinPathExtraData *extra)
3748 : {
3749 252608 : Path *outer_path = path->jpath.outerjoinpath;
3750 252608 : Path *inner_path = path->jpath.innerjoinpath;
3751 252608 : double inner_path_rows = inner_path->rows;
3752 252608 : List *mergeclauses = path->path_mergeclauses;
3753 252608 : List *innersortkeys = path->innersortkeys;
3754 252608 : Cost startup_cost = workspace->startup_cost;
3755 252608 : Cost run_cost = workspace->run_cost;
3756 252608 : Cost inner_run_cost = workspace->inner_run_cost;
3757 252608 : double outer_rows = workspace->outer_rows;
3758 252608 : double inner_rows = workspace->inner_rows;
3759 252608 : double outer_skip_rows = workspace->outer_skip_rows;
3760 252608 : double inner_skip_rows = workspace->inner_skip_rows;
3761 : Cost cpu_per_tuple,
3762 : bare_inner_cost,
3763 : mat_inner_cost;
3764 : QualCost merge_qual_cost;
3765 : QualCost qp_qual_cost;
3766 : double mergejointuples,
3767 : rescannedtuples;
3768 : double rescanratio;
3769 :
3770 : /* Protect some assumptions below that rowcounts aren't zero */
3771 252608 : if (inner_path_rows <= 0)
3772 90 : inner_path_rows = 1;
3773 :
3774 : /* Mark the path with the correct row estimate */
3775 252608 : if (path->jpath.path.param_info)
3776 654 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3777 : else
3778 251954 : path->jpath.path.rows = path->jpath.path.parent->rows;
3779 :
3780 : /* For partial paths, scale row estimate. */
3781 252608 : if (path->jpath.path.parallel_workers > 0)
3782 : {
3783 9054 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3784 :
3785 9054 : path->jpath.path.rows =
3786 9054 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3787 : }
3788 :
3789 : /*
3790 : * We could include disable_cost in the preliminary estimate, but that
3791 : * would amount to optimizing for the case where the join method is
3792 : * disabled, which doesn't seem like the way to bet.
3793 : */
3794 252608 : if (!enable_mergejoin)
3795 0 : startup_cost += disable_cost;
3796 :
3797 : /*
3798 : * Compute cost of the mergequals and qpquals (other restriction clauses)
3799 : * separately.
3800 : */
3801 252608 : cost_qual_eval(&merge_qual_cost, mergeclauses, root);
3802 252608 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
3803 252608 : qp_qual_cost.startup -= merge_qual_cost.startup;
3804 252608 : qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
3805 :
3806 : /*
3807 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3808 : * executor will stop scanning for matches after the first match. When
3809 : * all the joinclauses are merge clauses, this means we don't ever need to
3810 : * back up the merge, and so we can skip mark/restore overhead.
3811 : */
3812 252608 : if ((path->jpath.jointype == JOIN_SEMI ||
3813 248858 : path->jpath.jointype == JOIN_ANTI ||
3814 362098 : extra->inner_unique) &&
3815 119380 : (list_length(path->jpath.joinrestrictinfo) ==
3816 119380 : list_length(path->path_mergeclauses)))
3817 102362 : path->skip_mark_restore = true;
3818 : else
3819 150246 : path->skip_mark_restore = false;
3820 :
3821 : /*
3822 : * Get approx # tuples passing the mergequals. We use approx_tuple_count
3823 : * here because we need an estimate done with JOIN_INNER semantics.
3824 : */
3825 252608 : mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
3826 :
3827 : /*
3828 : * When there are equal merge keys in the outer relation, the mergejoin
3829 : * must rescan any matching tuples in the inner relation. This means
3830 : * re-fetching inner tuples; we have to estimate how often that happens.
3831 : *
3832 : * For regular inner and outer joins, the number of re-fetches can be
3833 : * estimated approximately as size of merge join output minus size of
3834 : * inner relation. Assume that the distinct key values are 1, 2, ..., and
3835 : * denote the number of values of each key in the outer relation as m1,
3836 : * m2, ...; in the inner relation, n1, n2, ... Then we have
3837 : *
3838 : * size of join = m1 * n1 + m2 * n2 + ...
3839 : *
3840 : * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
3841 : * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
3842 : * relation
3843 : *
3844 : * This equation works correctly for outer tuples having no inner match
3845 : * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
3846 : * are effectively subtracting those from the number of rescanned tuples,
3847 : * when we should not. Can we do better without expensive selectivity
3848 : * computations?
3849 : *
3850 : * The whole issue is moot if we are working from a unique-ified outer
3851 : * input, or if we know we don't need to mark/restore at all.
3852 : */
3853 252608 : if (IsA(outer_path, UniquePath) || path->skip_mark_restore)
3854 103296 : rescannedtuples = 0;
3855 : else
3856 : {
3857 149312 : rescannedtuples = mergejointuples - inner_path_rows;
3858 : /* Must clamp because of possible underestimate */
3859 149312 : if (rescannedtuples < 0)
3860 59080 : rescannedtuples = 0;
3861 : }
3862 :
3863 : /*
3864 : * We'll inflate various costs this much to account for rescanning. Note
3865 : * that this is to be multiplied by something involving inner_rows, or
3866 : * another number related to the portion of the inner rel we'll scan.
3867 : */
3868 252608 : rescanratio = 1.0 + (rescannedtuples / inner_rows);
3869 :
3870 : /*
3871 : * Decide whether we want to materialize the inner input to shield it from
3872 : * mark/restore and performing re-fetches. Our cost model for regular
3873 : * re-fetches is that a re-fetch costs the same as an original fetch,
3874 : * which is probably an overestimate; but on the other hand we ignore the
3875 : * bookkeeping costs of mark/restore. Not clear if it's worth developing
3876 : * a more refined model. So we just need to inflate the inner run cost by
3877 : * rescanratio.
3878 : */
3879 252608 : bare_inner_cost = inner_run_cost * rescanratio;
3880 :
3881 : /*
3882 : * When we interpose a Material node the re-fetch cost is assumed to be
3883 : * just cpu_operator_cost per tuple, independently of the underlying
3884 : * plan's cost; and we charge an extra cpu_operator_cost per original
3885 : * fetch as well. Note that we're assuming the materialize node will
3886 : * never spill to disk, since it only has to remember tuples back to the
3887 : * last mark. (If there are a huge number of duplicates, our other cost
3888 : * factors will make the path so expensive that it probably won't get
3889 : * chosen anyway.) So we don't use cost_rescan here.
3890 : *
3891 : * Note: keep this estimate in sync with create_mergejoin_plan's labeling
3892 : * of the generated Material node.
3893 : */
3894 252608 : mat_inner_cost = inner_run_cost +
3895 252608 : cpu_operator_cost * inner_rows * rescanratio;
3896 :
3897 : /*
3898 : * If we don't need mark/restore at all, we don't need materialization.
3899 : */
3900 252608 : if (path->skip_mark_restore)
3901 102362 : path->materialize_inner = false;
3902 :
3903 : /*
3904 : * Prefer materializing if it looks cheaper, unless the user has asked to
3905 : * suppress materialization.
3906 : */
3907 150246 : else if (enable_material && mat_inner_cost < bare_inner_cost)
3908 2558 : path->materialize_inner = true;
3909 :
3910 : /*
3911 : * Even if materializing doesn't look cheaper, we *must* do it if the
3912 : * inner path is to be used directly (without sorting) and it doesn't
3913 : * support mark/restore.
3914 : *
3915 : * Since the inner side must be ordered, and only Sorts and IndexScans can
3916 : * create order to begin with, and they both support mark/restore, you
3917 : * might think there's no problem --- but you'd be wrong. Nestloop and
3918 : * merge joins can *preserve* the order of their inputs, so they can be
3919 : * selected as the input of a mergejoin, and they don't support
3920 : * mark/restore at present.
3921 : *
3922 : * We don't test the value of enable_material here, because
3923 : * materialization is required for correctness in this case, and turning
3924 : * it off does not entitle us to deliver an invalid plan.
3925 : */
3926 147688 : else if (innersortkeys == NIL &&
3927 6706 : !ExecSupportsMarkRestore(inner_path))
3928 998 : path->materialize_inner = true;
3929 :
3930 : /*
3931 : * Also, force materializing if the inner path is to be sorted and the
3932 : * sort is expected to spill to disk. This is because the final merge
3933 : * pass can be done on-the-fly if it doesn't have to support mark/restore.
3934 : * We don't try to adjust the cost estimates for this consideration,
3935 : * though.
3936 : *
3937 : * Since materialization is a performance optimization in this case,
3938 : * rather than necessary for correctness, we skip it if enable_material is
3939 : * off.
3940 : */
3941 146690 : else if (enable_material && innersortkeys != NIL &&
3942 140934 : relation_byte_size(inner_path_rows,
3943 140934 : inner_path->pathtarget->width) >
3944 140934 : (work_mem * 1024L))
3945 172 : path->materialize_inner = true;
3946 : else
3947 146518 : path->materialize_inner = false;
3948 :
3949 : /* Charge the right incremental cost for the chosen case */
3950 252608 : if (path->materialize_inner)
3951 3728 : run_cost += mat_inner_cost;
3952 : else
3953 248880 : run_cost += bare_inner_cost;
3954 :
3955 : /* CPU costs */
3956 :
3957 : /*
3958 : * The number of tuple comparisons needed is approximately number of outer
3959 : * rows plus number of inner rows plus number of rescanned tuples (can we
3960 : * refine this?). At each one, we need to evaluate the mergejoin quals.
3961 : */
3962 252608 : startup_cost += merge_qual_cost.startup;
3963 252608 : startup_cost += merge_qual_cost.per_tuple *
3964 252608 : (outer_skip_rows + inner_skip_rows * rescanratio);
3965 252608 : run_cost += merge_qual_cost.per_tuple *
3966 252608 : ((outer_rows - outer_skip_rows) +
3967 252608 : (inner_rows - inner_skip_rows) * rescanratio);
3968 :
3969 : /*
3970 : * For each tuple that gets through the mergejoin proper, we charge
3971 : * cpu_tuple_cost plus the cost of evaluating additional restriction
3972 : * clauses that are to be applied at the join. (This is pessimistic since
3973 : * not all of the quals may get evaluated at each tuple.)
3974 : *
3975 : * Note: we could adjust for SEMI/ANTI joins skipping some qual
3976 : * evaluations here, but it's probably not worth the trouble.
3977 : */
3978 252608 : startup_cost += qp_qual_cost.startup;
3979 252608 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
3980 252608 : run_cost += cpu_per_tuple * mergejointuples;
3981 :
3982 : /* tlist eval costs are paid per output row, not per tuple scanned */
3983 252608 : startup_cost += path->jpath.path.pathtarget->cost.startup;
3984 252608 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3985 :
3986 252608 : path->jpath.path.startup_cost = startup_cost;
3987 252608 : path->jpath.path.total_cost = startup_cost + run_cost;
3988 252608 : }
3989 :
3990 : /*
3991 : * run mergejoinscansel() with caching
3992 : */
3993 : static MergeScanSelCache *
3994 1003658 : cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
3995 : {
3996 : MergeScanSelCache *cache;
3997 : ListCell *lc;
3998 : Selectivity leftstartsel,
3999 : leftendsel,
4000 : rightstartsel,
4001 : rightendsel;
4002 : MemoryContext oldcontext;
4003 :
4004 : /* Do we have this result already? */
4005 1003700 : foreach(lc, rinfo->scansel_cache)
4006 : {
4007 907178 : cache = (MergeScanSelCache *) lfirst(lc);
4008 907178 : if (cache->opfamily == pathkey->pk_opfamily &&
4009 907178 : cache->collation == pathkey->pk_eclass->ec_collation &&
4010 907178 : cache->strategy == pathkey->pk_strategy &&
4011 907136 : cache->nulls_first == pathkey->pk_nulls_first)
4012 907136 : return cache;
4013 : }
4014 :
4015 : /* Nope, do the computation */
4016 96522 : mergejoinscansel(root,
4017 96522 : (Node *) rinfo->clause,
4018 : pathkey->pk_opfamily,
4019 : pathkey->pk_strategy,
4020 96522 : pathkey->pk_nulls_first,
4021 : &leftstartsel,
4022 : &leftendsel,
4023 : &rightstartsel,
4024 : &rightendsel);
4025 :
4026 : /* Cache the result in suitably long-lived workspace */
4027 96522 : oldcontext = MemoryContextSwitchTo(root->planner_cxt);
4028 :
4029 96522 : cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
4030 96522 : cache->opfamily = pathkey->pk_opfamily;
4031 96522 : cache->collation = pathkey->pk_eclass->ec_collation;
4032 96522 : cache->strategy = pathkey->pk_strategy;
4033 96522 : cache->nulls_first = pathkey->pk_nulls_first;
4034 96522 : cache->leftstartsel = leftstartsel;
4035 96522 : cache->leftendsel = leftendsel;
4036 96522 : cache->rightstartsel = rightstartsel;
4037 96522 : cache->rightendsel = rightendsel;
4038 :
4039 96522 : rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
4040 :
4041 96522 : MemoryContextSwitchTo(oldcontext);
4042 :
4043 96522 : return cache;
4044 : }
4045 :
4046 : /*
4047 : * initial_cost_hashjoin
4048 : * Preliminary estimate of the cost of a hashjoin path.
4049 : *
4050 : * This must quickly produce lower-bound estimates of the path's startup and
4051 : * total costs. If we are unable to eliminate the proposed path from
4052 : * consideration using the lower bounds, final_cost_hashjoin will be called
4053 : * to obtain the final estimates.
4054 : *
4055 : * The exact division of labor between this function and final_cost_hashjoin
4056 : * is private to them, and represents a tradeoff between speed of the initial
4057 : * estimate and getting a tight lower bound. We choose to not examine the
4058 : * join quals here (other than by counting the number of hash clauses),
4059 : * so we can't do much with CPU costs. We do assume that
4060 : * ExecChooseHashTableSize is cheap enough to use here.
4061 : *
4062 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
4063 : * other data to be used by final_cost_hashjoin
4064 : * 'jointype' is the type of join to be performed
4065 : * 'hashclauses' is the list of joinclauses to be used as hash clauses
4066 : * 'outer_path' is the outer input to the join
4067 : * 'inner_path' is the inner input to the join
4068 : * 'extra' contains miscellaneous information about the join
4069 : * 'parallel_hash' indicates that inner_path is partial and that a shared
4070 : * hash table will be built in parallel
4071 : */
4072 : void
4073 538146 : initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
4074 : JoinType jointype,
4075 : List *hashclauses,
4076 : Path *outer_path, Path *inner_path,
4077 : JoinPathExtraData *extra,
4078 : bool parallel_hash)
4079 : {
4080 538146 : Cost startup_cost = 0;
4081 538146 : Cost run_cost = 0;
4082 538146 : double outer_path_rows = outer_path->rows;
4083 538146 : double inner_path_rows = inner_path->rows;
4084 538146 : double inner_path_rows_total = inner_path_rows;
4085 538146 : int num_hashclauses = list_length(hashclauses);
4086 : int numbuckets;
4087 : int numbatches;
4088 : int num_skew_mcvs;
4089 : size_t space_allowed; /* unused */
4090 :
4091 : /* cost of source data */
4092 538146 : startup_cost += outer_path->startup_cost;
4093 538146 : run_cost += outer_path->total_cost - outer_path->startup_cost;
4094 538146 : startup_cost += inner_path->total_cost;
4095 :
4096 : /*
4097 : * Cost of computing hash function: must do it once per input tuple. We
4098 : * charge one cpu_operator_cost for each column's hash function. Also,
4099 : * tack on one cpu_tuple_cost per inner row, to model the costs of
4100 : * inserting the row into the hashtable.
4101 : *
4102 : * XXX when a hashclause is more complex than a single operator, we really
4103 : * should charge the extra eval costs of the left or right side, as
4104 : * appropriate, here. This seems more work than it's worth at the moment.
4105 : */
4106 538146 : startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
4107 538146 : * inner_path_rows;
4108 538146 : run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
4109 :
4110 : /*
4111 : * If this is a parallel hash build, then the value we have for
4112 : * inner_rows_total currently refers only to the rows returned by each
4113 : * participant. For shared hash table size estimation, we need the total
4114 : * number, so we need to undo the division.
4115 : */
4116 538146 : if (parallel_hash)
4117 11574 : inner_path_rows_total *= get_parallel_divisor(inner_path);
4118 :
4119 : /*
4120 : * Get hash table size that executor would use for inner relation.
4121 : *
4122 : * XXX for the moment, always assume that skew optimization will be
4123 : * performed. As long as SKEW_HASH_MEM_PERCENT is small, it's not worth
4124 : * trying to determine that for sure.
4125 : *
4126 : * XXX at some point it might be interesting to try to account for skew
4127 : * optimization in the cost estimate, but for now, we don't.
4128 : */
4129 538146 : ExecChooseHashTableSize(inner_path_rows_total,
4130 538146 : inner_path->pathtarget->width,
4131 : true, /* useskew */
4132 : parallel_hash, /* try_combined_hash_mem */
4133 : outer_path->parallel_workers,
4134 : &space_allowed,
4135 : &numbuckets,
4136 : &numbatches,
4137 : &num_skew_mcvs);
4138 :
4139 : /*
4140 : * If inner relation is too big then we will need to "batch" the join,
4141 : * which implies writing and reading most of the tuples to disk an extra
4142 : * time. Charge seq_page_cost per page, since the I/O should be nice and
4143 : * sequential. Writing the inner rel counts as startup cost, all the rest
4144 : * as run cost.
4145 : */
4146 538146 : if (numbatches > 1)
4147 : {
4148 4430 : double outerpages = page_size(outer_path_rows,
4149 4430 : outer_path->pathtarget->width);
4150 4430 : double innerpages = page_size(inner_path_rows,
4151 4430 : inner_path->pathtarget->width);
4152 :
4153 4430 : startup_cost += seq_page_cost * innerpages;
4154 4430 : run_cost += seq_page_cost * (innerpages + 2 * outerpages);
4155 : }
4156 :
4157 : /* CPU costs left for later */
4158 :
4159 : /* Public result fields */
4160 538146 : workspace->startup_cost = startup_cost;
4161 538146 : workspace->total_cost = startup_cost + run_cost;
4162 : /* Save private data for final_cost_hashjoin */
4163 538146 : workspace->run_cost = run_cost;
4164 538146 : workspace->numbuckets = numbuckets;
4165 538146 : workspace->numbatches = numbatches;
4166 538146 : workspace->inner_rows_total = inner_path_rows_total;
4167 538146 : }
4168 :
4169 : /*
4170 : * final_cost_hashjoin
4171 : * Final estimate of the cost and result size of a hashjoin path.
4172 : *
4173 : * Note: the numbatches estimate is also saved into 'path' for use later
4174 : *
4175 : * 'path' is already filled in except for the rows and cost fields and
4176 : * num_batches
4177 : * 'workspace' is the result from initial_cost_hashjoin
4178 : * 'extra' contains miscellaneous information about the join
4179 : */
4180 : void
4181 221494 : final_cost_hashjoin(PlannerInfo *root, HashPath *path,
4182 : JoinCostWorkspace *workspace,
4183 : JoinPathExtraData *extra)
4184 : {
4185 221494 : Path *outer_path = path->jpath.outerjoinpath;
4186 221494 : Path *inner_path = path->jpath.innerjoinpath;
4187 221494 : double outer_path_rows = outer_path->rows;
4188 221494 : double inner_path_rows = inner_path->rows;
4189 221494 : double inner_path_rows_total = workspace->inner_rows_total;
4190 221494 : List *hashclauses = path->path_hashclauses;
4191 221494 : Cost startup_cost = workspace->startup_cost;
4192 221494 : Cost run_cost = workspace->run_cost;
4193 221494 : int numbuckets = workspace->numbuckets;
4194 221494 : int numbatches = workspace->numbatches;
4195 : Cost cpu_per_tuple;
4196 : QualCost hash_qual_cost;
4197 : QualCost qp_qual_cost;
4198 : double hashjointuples;
4199 : double virtualbuckets;
4200 : Selectivity innerbucketsize;
4201 : Selectivity innermcvfreq;
4202 : ListCell *hcl;
4203 :
4204 : /* Mark the path with the correct row estimate */
4205 221494 : if (path->jpath.path.param_info)
4206 1246 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
4207 : else
4208 220248 : path->jpath.path.rows = path->jpath.path.parent->rows;
4209 :
4210 : /* For partial paths, scale row estimate. */
4211 221494 : if (path->jpath.path.parallel_workers > 0)
4212 : {
4213 10562 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
4214 :
4215 10562 : path->jpath.path.rows =
4216 10562 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
4217 : }
4218 :
4219 : /*
4220 : * We could include disable_cost in the preliminary estimate, but that
4221 : * would amount to optimizing for the case where the join method is
4222 : * disabled, which doesn't seem like the way to bet.
4223 : */
4224 221494 : if (!enable_hashjoin)
4225 204 : startup_cost += disable_cost;
4226 :
4227 : /* mark the path with estimated # of batches */
4228 221494 : path->num_batches = numbatches;
4229 :
4230 : /* store the total number of tuples (sum of partial row estimates) */
4231 221494 : path->inner_rows_total = inner_path_rows_total;
4232 :
4233 : /* and compute the number of "virtual" buckets in the whole join */
4234 221494 : virtualbuckets = (double) numbuckets * (double) numbatches;
4235 :
4236 : /*
4237 : * Determine bucketsize fraction and MCV frequency for the inner relation.
4238 : * We use the smallest bucketsize or MCV frequency estimated for any
4239 : * individual hashclause; this is undoubtedly conservative.
4240 : *
4241 : * BUT: if inner relation has been unique-ified, we can assume it's good
4242 : * for hashing. This is important both because it's the right answer, and
4243 : * because we avoid contaminating the cache with a value that's wrong for
4244 : * non-unique-ified paths.
4245 : */
4246 221494 : if (IsA(inner_path, UniquePath))
4247 : {
4248 2014 : innerbucketsize = 1.0 / virtualbuckets;
4249 2014 : innermcvfreq = 0.0;
4250 : }
4251 : else
4252 : {
4253 219480 : innerbucketsize = 1.0;
4254 219480 : innermcvfreq = 1.0;
4255 460908 : foreach(hcl, hashclauses)
4256 : {
4257 241428 : RestrictInfo *restrictinfo = lfirst_node(RestrictInfo, hcl);
4258 : Selectivity thisbucketsize;
4259 : Selectivity thismcvfreq;
4260 :
4261 : /*
4262 : * First we have to figure out which side of the hashjoin clause
4263 : * is the inner side.
4264 : *
4265 : * Since we tend to visit the same clauses over and over when
4266 : * planning a large query, we cache the bucket stats estimates in
4267 : * the RestrictInfo node to avoid repeated lookups of statistics.
4268 : */
4269 241428 : if (bms_is_subset(restrictinfo->right_relids,
4270 241428 : inner_path->parent->relids))
4271 : {
4272 : /* righthand side is inner */
4273 130046 : thisbucketsize = restrictinfo->right_bucketsize;
4274 130046 : if (thisbucketsize < 0)
4275 : {
4276 : /* not cached yet */
4277 71088 : estimate_hash_bucket_stats(root,
4278 71088 : get_rightop(restrictinfo->clause),
4279 : virtualbuckets,
4280 : &restrictinfo->right_mcvfreq,
4281 : &restrictinfo->right_bucketsize);
4282 71088 : thisbucketsize = restrictinfo->right_bucketsize;
4283 : }
4284 130046 : thismcvfreq = restrictinfo->right_mcvfreq;
4285 : }
4286 : else
4287 : {
4288 : Assert(bms_is_subset(restrictinfo->left_relids,
4289 : inner_path->parent->relids));
4290 : /* lefthand side is inner */
4291 111382 : thisbucketsize = restrictinfo->left_bucketsize;
4292 111382 : if (thisbucketsize < 0)
4293 : {
4294 : /* not cached yet */
4295 60096 : estimate_hash_bucket_stats(root,
4296 60096 : get_leftop(restrictinfo->clause),
4297 : virtualbuckets,
4298 : &restrictinfo->left_mcvfreq,
4299 : &restrictinfo->left_bucketsize);
4300 60096 : thisbucketsize = restrictinfo->left_bucketsize;
4301 : }
4302 111382 : thismcvfreq = restrictinfo->left_mcvfreq;
4303 : }
4304 :
4305 241428 : if (innerbucketsize > thisbucketsize)
4306 157272 : innerbucketsize = thisbucketsize;
4307 241428 : if (innermcvfreq > thismcvfreq)
4308 224192 : innermcvfreq = thismcvfreq;
4309 : }
4310 : }
4311 :
4312 : /*
4313 : * If the bucket holding the inner MCV would exceed hash_mem, we don't
4314 : * want to hash unless there is really no other alternative, so apply
4315 : * disable_cost. (The executor normally copes with excessive memory usage
4316 : * by splitting batches, but obviously it cannot separate equal values
4317 : * that way, so it will be unable to drive the batch size below hash_mem
4318 : * when this is true.)
4319 : */
4320 221494 : if (relation_byte_size(clamp_row_est(inner_path_rows * innermcvfreq),
4321 442988 : inner_path->pathtarget->width) > get_hash_memory_limit())
4322 0 : startup_cost += disable_cost;
4323 :
4324 : /*
4325 : * Compute cost of the hashquals and qpquals (other restriction clauses)
4326 : * separately.
4327 : */
4328 221494 : cost_qual_eval(&hash_qual_cost, hashclauses, root);
4329 221494 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
4330 221494 : qp_qual_cost.startup -= hash_qual_cost.startup;
4331 221494 : qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
4332 :
4333 : /* CPU costs */
4334 :
4335 221494 : if (path->jpath.jointype == JOIN_SEMI ||
4336 218574 : path->jpath.jointype == JOIN_ANTI ||
4337 214422 : extra->inner_unique)
4338 95584 : {
4339 : double outer_matched_rows;
4340 : Selectivity inner_scan_frac;
4341 :
4342 : /*
4343 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
4344 : * executor will stop after the first match.
4345 : *
4346 : * For an outer-rel row that has at least one match, we can expect the
4347 : * bucket scan to stop after a fraction 1/(match_count+1) of the
4348 : * bucket's rows, if the matches are evenly distributed. Since they
4349 : * probably aren't quite evenly distributed, we apply a fuzz factor of
4350 : * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
4351 : * to clamp inner_scan_frac to at most 1.0; but since match_count is
4352 : * at least 1, no such clamp is needed now.)
4353 : */
4354 95584 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
4355 95584 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
4356 :
4357 95584 : startup_cost += hash_qual_cost.startup;
4358 191168 : run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
4359 95584 : clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
4360 :
4361 : /*
4362 : * For unmatched outer-rel rows, the picture is quite a lot different.
4363 : * In the first place, there is no reason to assume that these rows
4364 : * preferentially hit heavily-populated buckets; instead assume they
4365 : * are uncorrelated with the inner distribution and so they see an
4366 : * average bucket size of inner_path_rows / virtualbuckets. In the
4367 : * second place, it seems likely that they will have few if any exact
4368 : * hash-code matches and so very few of the tuples in the bucket will
4369 : * actually require eval of the hash quals. We don't have any good
4370 : * way to estimate how many will, but for the moment assume that the
4371 : * effective cost per bucket entry is one-tenth what it is for
4372 : * matchable tuples.
4373 : */
4374 191168 : run_cost += hash_qual_cost.per_tuple *
4375 191168 : (outer_path_rows - outer_matched_rows) *
4376 95584 : clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
4377 :
4378 : /* Get # of tuples that will pass the basic join */
4379 95584 : if (path->jpath.jointype == JOIN_ANTI)
4380 4152 : hashjointuples = outer_path_rows - outer_matched_rows;
4381 : else
4382 91432 : hashjointuples = outer_matched_rows;
4383 : }
4384 : else
4385 : {
4386 : /*
4387 : * The number of tuple comparisons needed is the number of outer
4388 : * tuples times the typical number of tuples in a hash bucket, which
4389 : * is the inner relation size times its bucketsize fraction. At each
4390 : * one, we need to evaluate the hashjoin quals. But actually,
4391 : * charging the full qual eval cost at each tuple is pessimistic,
4392 : * since we don't evaluate the quals unless the hash values match
4393 : * exactly. For lack of a better idea, halve the cost estimate to
4394 : * allow for that.
4395 : */
4396 125910 : startup_cost += hash_qual_cost.startup;
4397 251820 : run_cost += hash_qual_cost.per_tuple * outer_path_rows *
4398 125910 : clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
4399 :
4400 : /*
4401 : * Get approx # tuples passing the hashquals. We use
4402 : * approx_tuple_count here because we need an estimate done with
4403 : * JOIN_INNER semantics.
4404 : */
4405 125910 : hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
4406 : }
4407 :
4408 : /*
4409 : * For each tuple that gets through the hashjoin proper, we charge
4410 : * cpu_tuple_cost plus the cost of evaluating additional restriction
4411 : * clauses that are to be applied at the join. (This is pessimistic since
4412 : * not all of the quals may get evaluated at each tuple.)
4413 : */
4414 221494 : startup_cost += qp_qual_cost.startup;
4415 221494 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
4416 221494 : run_cost += cpu_per_tuple * hashjointuples;
4417 :
4418 : /* tlist eval costs are paid per output row, not per tuple scanned */
4419 221494 : startup_cost += path->jpath.path.pathtarget->cost.startup;
4420 221494 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
4421 :
4422 221494 : path->jpath.path.startup_cost = startup_cost;
4423 221494 : path->jpath.path.total_cost = startup_cost + run_cost;
4424 221494 : }
4425 :
4426 :
4427 : /*
4428 : * cost_subplan
4429 : * Figure the costs for a SubPlan (or initplan).
4430 : *
4431 : * Note: we could dig the subplan's Plan out of the root list, but in practice
4432 : * all callers have it handy already, so we make them pass it.
4433 : */
4434 : void
4435 36050 : cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
4436 : {
4437 : QualCost sp_cost;
4438 :
4439 : /* Figure any cost for evaluating the testexpr */
4440 36050 : cost_qual_eval(&sp_cost,
4441 36050 : make_ands_implicit((Expr *) subplan->testexpr),
4442 : root);
4443 :
4444 36050 : if (subplan->useHashTable)
4445 : {
4446 : /*
4447 : * If we are using a hash table for the subquery outputs, then the
4448 : * cost of evaluating the query is a one-time cost. We charge one
4449 : * cpu_operator_cost per tuple for the work of loading the hashtable,
4450 : * too.
4451 : */
4452 1886 : sp_cost.startup += plan->total_cost +
4453 1886 : cpu_operator_cost * plan->plan_rows;
4454 :
4455 : /*
4456 : * The per-tuple costs include the cost of evaluating the lefthand
4457 : * expressions, plus the cost of probing the hashtable. We already
4458 : * accounted for the lefthand expressions as part of the testexpr, and
4459 : * will also have counted one cpu_operator_cost for each comparison
4460 : * operator. That is probably too low for the probing cost, but it's
4461 : * hard to make a better estimate, so live with it for now.
4462 : */
4463 : }
4464 : else
4465 : {
4466 : /*
4467 : * Otherwise we will be rescanning the subplan output on each
4468 : * evaluation. We need to estimate how much of the output we will
4469 : * actually need to scan. NOTE: this logic should agree with the
4470 : * tuple_fraction estimates used by make_subplan() in
4471 : * plan/subselect.c.
4472 : */
4473 34164 : Cost plan_run_cost = plan->total_cost - plan->startup_cost;
4474 :
4475 34164 : if (subplan->subLinkType == EXISTS_SUBLINK)
4476 : {
4477 : /* we only need to fetch 1 tuple; clamp to avoid zero divide */
4478 2040 : sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
4479 : }
4480 32124 : else if (subplan->subLinkType == ALL_SUBLINK ||
4481 32106 : subplan->subLinkType == ANY_SUBLINK)
4482 : {
4483 : /* assume we need 50% of the tuples */
4484 100 : sp_cost.per_tuple += 0.50 * plan_run_cost;
4485 : /* also charge a cpu_operator_cost per row examined */
4486 100 : sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
4487 : }
4488 : else
4489 : {
4490 : /* assume we need all tuples */
4491 32024 : sp_cost.per_tuple += plan_run_cost;
4492 : }
4493 :
4494 : /*
4495 : * Also account for subplan's startup cost. If the subplan is
4496 : * uncorrelated or undirect correlated, AND its topmost node is one
4497 : * that materializes its output, assume that we'll only need to pay
4498 : * its startup cost once; otherwise assume we pay the startup cost
4499 : * every time.
4500 : */
4501 45290 : if (subplan->parParam == NIL &&
4502 11126 : ExecMaterializesOutput(nodeTag(plan)))
4503 498 : sp_cost.startup += plan->startup_cost;
4504 : else
4505 33666 : sp_cost.per_tuple += plan->startup_cost;
4506 : }
4507 :
4508 36050 : subplan->startup_cost = sp_cost.startup;
4509 36050 : subplan->per_call_cost = sp_cost.per_tuple;
4510 36050 : }
4511 :
4512 :
4513 : /*
4514 : * cost_rescan
4515 : * Given a finished Path, estimate the costs of rescanning it after
4516 : * having done so the first time. For some Path types a rescan is
4517 : * cheaper than an original scan (if no parameters change), and this
4518 : * function embodies knowledge about that. The default is to return
4519 : * the same costs stored in the Path. (Note that the cost estimates
4520 : * actually stored in Paths are always for first scans.)
4521 : *
4522 : * This function is not currently intended to model effects such as rescans
4523 : * being cheaper due to disk block caching; what we are concerned with is
4524 : * plan types wherein the executor caches results explicitly, or doesn't
4525 : * redo startup calculations, etc.
4526 : */
4527 : static void
4528 2266580 : cost_rescan(PlannerInfo *root, Path *path,
4529 : Cost *rescan_startup_cost, /* output parameters */
4530 : Cost *rescan_total_cost)
4531 : {
4532 2266580 : switch (path->pathtype)
4533 : {
4534 39950 : case T_FunctionScan:
4535 :
4536 : /*
4537 : * Currently, nodeFunctionscan.c always executes the function to
4538 : * completion before returning any rows, and caches the results in
4539 : * a tuplestore. So the function eval cost is all startup cost
4540 : * and isn't paid over again on rescans. However, all run costs
4541 : * will be paid over again.
4542 : */
4543 39950 : *rescan_startup_cost = 0;
4544 39950 : *rescan_total_cost = path->total_cost - path->startup_cost;
4545 39950 : break;
4546 100920 : case T_HashJoin:
4547 :
4548 : /*
4549 : * If it's a single-batch join, we don't need to rebuild the hash
4550 : * table during a rescan.
4551 : */
4552 100920 : if (((HashPath *) path)->num_batches == 1)
4553 : {
4554 : /* Startup cost is exactly the cost of hash table building */
4555 100920 : *rescan_startup_cost = 0;
4556 100920 : *rescan_total_cost = path->total_cost - path->startup_cost;
4557 : }
4558 : else
4559 : {
4560 : /* Otherwise, no special treatment */
4561 0 : *rescan_startup_cost = path->startup_cost;
4562 0 : *rescan_total_cost = path->total_cost;
4563 : }
4564 100920 : break;
4565 6604 : case T_CteScan:
4566 : case T_WorkTableScan:
4567 : {
4568 : /*
4569 : * These plan types materialize their final result in a
4570 : * tuplestore or tuplesort object. So the rescan cost is only
4571 : * cpu_tuple_cost per tuple, unless the result is large enough
4572 : * to spill to disk.
4573 : */
4574 6604 : Cost run_cost = cpu_tuple_cost * path->rows;
4575 6604 : double nbytes = relation_byte_size(path->rows,
4576 6604 : path->pathtarget->width);
4577 6604 : long work_mem_bytes = work_mem * 1024L;
4578 :
4579 6604 : if (nbytes > work_mem_bytes)
4580 : {
4581 : /* It will spill, so account for re-read cost */
4582 160 : double npages = ceil(nbytes / BLCKSZ);
4583 :
4584 160 : run_cost += seq_page_cost * npages;
4585 : }
4586 6604 : *rescan_startup_cost = 0;
4587 6604 : *rescan_total_cost = run_cost;
4588 : }
4589 6604 : break;
4590 765176 : case T_Material:
4591 : case T_Sort:
4592 : {
4593 : /*
4594 : * These plan types not only materialize their results, but do
4595 : * not implement qual filtering or projection. So they are
4596 : * even cheaper to rescan than the ones above. We charge only
4597 : * cpu_operator_cost per tuple. (Note: keep that in sync with
4598 : * the run_cost charge in cost_sort, and also see comments in
4599 : * cost_material before you change it.)
4600 : */
4601 765176 : Cost run_cost = cpu_operator_cost * path->rows;
4602 765176 : double nbytes = relation_byte_size(path->rows,
4603 765176 : path->pathtarget->width);
4604 765176 : long work_mem_bytes = work_mem * 1024L;
4605 :
4606 765176 : if (nbytes > work_mem_bytes)
4607 : {
4608 : /* It will spill, so account for re-read cost */
4609 9004 : double npages = ceil(nbytes / BLCKSZ);
4610 :
4611 9004 : run_cost += seq_page_cost * npages;
4612 : }
4613 765176 : *rescan_startup_cost = 0;
4614 765176 : *rescan_total_cost = run_cost;
4615 : }
4616 765176 : break;
4617 226788 : case T_Memoize:
4618 : /* All the hard work is done by cost_memoize_rescan */
4619 226788 : cost_memoize_rescan(root, (MemoizePath *) path,
4620 : rescan_startup_cost, rescan_total_cost);
4621 226788 : break;
4622 1127142 : default:
4623 1127142 : *rescan_startup_cost = path->startup_cost;
4624 1127142 : *rescan_total_cost = path->total_cost;
4625 1127142 : break;
4626 : }
4627 2266580 : }
4628 :
4629 :
4630 : /*
4631 : * cost_qual_eval
4632 : * Estimate the CPU costs of evaluating a WHERE clause.
4633 : * The input can be either an implicitly-ANDed list of boolean
4634 : * expressions, or a list of RestrictInfo nodes. (The latter is
4635 : * preferred since it allows caching of the results.)
4636 : * The result includes both a one-time (startup) component,
4637 : * and a per-evaluation component.
4638 : */
4639 : void
4640 3199214 : cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
4641 : {
4642 : cost_qual_eval_context context;
4643 : ListCell *l;
4644 :
4645 3199214 : context.root = root;
4646 3199214 : context.total.startup = 0;
4647 3199214 : context.total.per_tuple = 0;
4648 :
4649 : /* We don't charge any cost for the implicit ANDing at top level ... */
4650 :
4651 5957532 : foreach(l, quals)
4652 : {
4653 2758318 : Node *qual = (Node *) lfirst(l);
4654 :
4655 2758318 : cost_qual_eval_walker(qual, &context);
4656 : }
4657 :
4658 3199214 : *cost = context.total;
4659 3199214 : }
4660 :
4661 : /*
4662 : * cost_qual_eval_node
4663 : * As above, for a single RestrictInfo or expression.
4664 : */
4665 : void
4666 1516438 : cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
4667 : {
4668 : cost_qual_eval_context context;
4669 :
4670 1516438 : context.root = root;
4671 1516438 : context.total.startup = 0;
4672 1516438 : context.total.per_tuple = 0;
4673 :
4674 1516438 : cost_qual_eval_walker(qual, &context);
4675 :
4676 1516438 : *cost = context.total;
4677 1516438 : }
4678 :
4679 : static bool
4680 7250000 : cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
4681 : {
4682 7250000 : if (node == NULL)
4683 94962 : return false;
4684 :
4685 : /*
4686 : * RestrictInfo nodes contain an eval_cost field reserved for this
4687 : * routine's use, so that it's not necessary to evaluate the qual clause's
4688 : * cost more than once. If the clause's cost hasn't been computed yet,
4689 : * the field's startup value will contain -1.
4690 : */
4691 7155038 : if (IsA(node, RestrictInfo))
4692 : {
4693 2893986 : RestrictInfo *rinfo = (RestrictInfo *) node;
4694 :
4695 2893986 : if (rinfo->eval_cost.startup < 0)
4696 : {
4697 : cost_qual_eval_context locContext;
4698 :
4699 489978 : locContext.root = context->root;
4700 489978 : locContext.total.startup = 0;
4701 489978 : locContext.total.per_tuple = 0;
4702 :
4703 : /*
4704 : * For an OR clause, recurse into the marked-up tree so that we
4705 : * set the eval_cost for contained RestrictInfos too.
4706 : */
4707 489978 : if (rinfo->orclause)
4708 7798 : cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
4709 : else
4710 482180 : cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
4711 :
4712 : /*
4713 : * If the RestrictInfo is marked pseudoconstant, it will be tested
4714 : * only once, so treat its cost as all startup cost.
4715 : */
4716 489978 : if (rinfo->pseudoconstant)
4717 : {
4718 : /* count one execution during startup */
4719 8648 : locContext.total.startup += locContext.total.per_tuple;
4720 8648 : locContext.total.per_tuple = 0;
4721 : }
4722 489978 : rinfo->eval_cost = locContext.total;
4723 : }
4724 2893986 : context->total.startup += rinfo->eval_cost.startup;
4725 2893986 : context->total.per_tuple += rinfo->eval_cost.per_tuple;
4726 : /* do NOT recurse into children */
4727 2893986 : return false;
4728 : }
4729 :
4730 : /*
4731 : * For each operator or function node in the given tree, we charge the
4732 : * estimated execution cost given by pg_proc.procost (remember to multiply
4733 : * this by cpu_operator_cost).
4734 : *
4735 : * Vars and Consts are charged zero, and so are boolean operators (AND,
4736 : * OR, NOT). Simplistic, but a lot better than no model at all.
4737 : *
4738 : * Should we try to account for the possibility of short-circuit
4739 : * evaluation of AND/OR? Probably *not*, because that would make the
4740 : * results depend on the clause ordering, and we are not in any position
4741 : * to expect that the current ordering of the clauses is the one that's
4742 : * going to end up being used. The above per-RestrictInfo caching would
4743 : * not mix well with trying to re-order clauses anyway.
4744 : *
4745 : * Another issue that is entirely ignored here is that if a set-returning
4746 : * function is below top level in the tree, the functions/operators above
4747 : * it will need to be evaluated multiple times. In practical use, such
4748 : * cases arise so seldom as to not be worth the added complexity needed;
4749 : * moreover, since our rowcount estimates for functions tend to be pretty
4750 : * phony, the results would also be pretty phony.
4751 : */
4752 4261052 : if (IsA(node, FuncExpr))
4753 : {
4754 298394 : add_function_cost(context->root, ((FuncExpr *) node)->funcid, node,
4755 : &context->total);
4756 : }
4757 3962658 : else if (IsA(node, OpExpr) ||
4758 3393532 : IsA(node, DistinctExpr) ||
4759 3392674 : IsA(node, NullIfExpr))
4760 : {
4761 : /* rely on struct equivalence to treat these all alike */
4762 570090 : set_opfuncid((OpExpr *) node);
4763 570090 : add_function_cost(context->root, ((OpExpr *) node)->opfuncid, node,
4764 : &context->total);
4765 : }
4766 3392568 : else if (IsA(node, ScalarArrayOpExpr))
4767 : {
4768 37104 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
4769 37104 : Node *arraynode = (Node *) lsecond(saop->args);
4770 : QualCost sacosts;
4771 : QualCost hcosts;
4772 37104 : double estarraylen = estimate_array_length(context->root, arraynode);
4773 :
4774 37104 : set_sa_opfuncid(saop);
4775 37104 : sacosts.startup = sacosts.per_tuple = 0;
4776 37104 : add_function_cost(context->root, saop->opfuncid, NULL,
4777 : &sacosts);
4778 :
4779 37104 : if (OidIsValid(saop->hashfuncid))
4780 : {
4781 : /* Handle costs for hashed ScalarArrayOpExpr */
4782 266 : hcosts.startup = hcosts.per_tuple = 0;
4783 :
4784 266 : add_function_cost(context->root, saop->hashfuncid, NULL, &hcosts);
4785 266 : context->total.startup += sacosts.startup + hcosts.startup;
4786 :
4787 : /* Estimate the cost of building the hashtable. */
4788 266 : context->total.startup += estarraylen * hcosts.per_tuple;
4789 :
4790 : /*
4791 : * XXX should we charge a little bit for sacosts.per_tuple when
4792 : * building the table, or is it ok to assume there will be zero
4793 : * hash collision?
4794 : */
4795 :
4796 : /*
4797 : * Charge for hashtable lookups. Charge a single hash and a
4798 : * single comparison.
4799 : */
4800 266 : context->total.per_tuple += hcosts.per_tuple + sacosts.per_tuple;
4801 : }
4802 : else
4803 : {
4804 : /*
4805 : * Estimate that the operator will be applied to about half of the
4806 : * array elements before the answer is determined.
4807 : */
4808 36838 : context->total.startup += sacosts.startup;
4809 73676 : context->total.per_tuple += sacosts.per_tuple *
4810 36838 : estimate_array_length(context->root, arraynode) * 0.5;
4811 : }
4812 : }
4813 3355464 : else if (IsA(node, Aggref) ||
4814 3305766 : IsA(node, WindowFunc))
4815 : {
4816 : /*
4817 : * Aggref and WindowFunc nodes are (and should be) treated like Vars,
4818 : * ie, zero execution cost in the current model, because they behave
4819 : * essentially like Vars at execution. We disregard the costs of
4820 : * their input expressions for the same reason. The actual execution
4821 : * costs of the aggregate/window functions and their arguments have to
4822 : * be factored into plan-node-specific costing of the Agg or WindowAgg
4823 : * plan node.
4824 : */
4825 53048 : return false; /* don't recurse into children */
4826 : }
4827 3302416 : else if (IsA(node, GroupingFunc))
4828 : {
4829 : /* Treat this as having cost 1 */
4830 350 : context->total.per_tuple += cpu_operator_cost;
4831 350 : return false; /* don't recurse into children */
4832 : }
4833 3302066 : else if (IsA(node, CoerceViaIO))
4834 : {
4835 20054 : CoerceViaIO *iocoerce = (CoerceViaIO *) node;
4836 : Oid iofunc;
4837 : Oid typioparam;
4838 : bool typisvarlena;
4839 :
4840 : /* check the result type's input function */
4841 20054 : getTypeInputInfo(iocoerce->resulttype,
4842 : &iofunc, &typioparam);
4843 20054 : add_function_cost(context->root, iofunc, NULL,
4844 : &context->total);
4845 : /* check the input type's output function */
4846 20054 : getTypeOutputInfo(exprType((Node *) iocoerce->arg),
4847 : &iofunc, &typisvarlena);
4848 20054 : add_function_cost(context->root, iofunc, NULL,
4849 : &context->total);
4850 : }
4851 3282012 : else if (IsA(node, ArrayCoerceExpr))
4852 : {
4853 4506 : ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
4854 : QualCost perelemcost;
4855 :
4856 4506 : cost_qual_eval_node(&perelemcost, (Node *) acoerce->elemexpr,
4857 : context->root);
4858 4506 : context->total.startup += perelemcost.startup;
4859 4506 : if (perelemcost.per_tuple > 0)
4860 58 : context->total.per_tuple += perelemcost.per_tuple *
4861 58 : estimate_array_length(context->root, (Node *) acoerce->arg);
4862 : }
4863 3277506 : else if (IsA(node, RowCompareExpr))
4864 : {
4865 : /* Conservatively assume we will check all the columns */
4866 156 : RowCompareExpr *rcexpr = (RowCompareExpr *) node;
4867 : ListCell *lc;
4868 :
4869 522 : foreach(lc, rcexpr->opnos)
4870 : {
4871 366 : Oid opid = lfirst_oid(lc);
4872 :
4873 366 : add_function_cost(context->root, get_opcode(opid), NULL,
4874 : &context->total);
4875 : }
4876 : }
4877 3277350 : else if (IsA(node, MinMaxExpr) ||
4878 3277166 : IsA(node, SQLValueFunction) ||
4879 3272826 : IsA(node, XmlExpr) ||
4880 3272136 : IsA(node, CoerceToDomain) ||
4881 3263432 : IsA(node, NextValueExpr) ||
4882 3262974 : IsA(node, JsonExpr))
4883 : {
4884 : /* Treat all these as having cost 1 */
4885 16618 : context->total.per_tuple += cpu_operator_cost;
4886 : }
4887 3260732 : else if (IsA(node, CurrentOfExpr))
4888 : {
4889 : /* Report high cost to prevent selection of anything but TID scan */
4890 394 : context->total.startup += disable_cost;
4891 : }
4892 3260338 : else if (IsA(node, SubLink))
4893 : {
4894 : /* This routine should not be applied to un-planned expressions */
4895 0 : elog(ERROR, "cannot handle unplanned sub-select");
4896 : }
4897 3260338 : else if (IsA(node, SubPlan))
4898 : {
4899 : /*
4900 : * A subplan node in an expression typically indicates that the
4901 : * subplan will be executed on each evaluation, so charge accordingly.
4902 : * (Sub-selects that can be executed as InitPlans have already been
4903 : * removed from the expression.)
4904 : */
4905 35794 : SubPlan *subplan = (SubPlan *) node;
4906 :
4907 35794 : context->total.startup += subplan->startup_cost;
4908 35794 : context->total.per_tuple += subplan->per_call_cost;
4909 :
4910 : /*
4911 : * We don't want to recurse into the testexpr, because it was already
4912 : * counted in the SubPlan node's costs. So we're done.
4913 : */
4914 35794 : return false;
4915 : }
4916 3224544 : else if (IsA(node, AlternativeSubPlan))
4917 : {
4918 : /*
4919 : * Arbitrarily use the first alternative plan for costing. (We should
4920 : * certainly only include one alternative, and we don't yet have
4921 : * enough information to know which one the executor is most likely to
4922 : * use.)
4923 : */
4924 1614 : AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
4925 :
4926 1614 : return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
4927 : context);
4928 : }
4929 3222930 : else if (IsA(node, PlaceHolderVar))
4930 : {
4931 : /*
4932 : * A PlaceHolderVar should be given cost zero when considering general
4933 : * expression evaluation costs. The expense of doing the contained
4934 : * expression is charged as part of the tlist eval costs of the scan
4935 : * or join where the PHV is first computed (see set_rel_width and
4936 : * add_placeholders_to_joinrel). If we charged it again here, we'd be
4937 : * double-counting the cost for each level of plan that the PHV
4938 : * bubbles up through. Hence, return without recursing into the
4939 : * phexpr.
4940 : */
4941 2514 : return false;
4942 : }
4943 :
4944 : /* recurse into children */
4945 4167732 : return expression_tree_walker(node, cost_qual_eval_walker,
4946 : (void *) context);
4947 : }
4948 :
4949 : /*
4950 : * get_restriction_qual_cost
4951 : * Compute evaluation costs of a baserel's restriction quals, plus any
4952 : * movable join quals that have been pushed down to the scan.
4953 : * Results are returned into *qpqual_cost.
4954 : *
4955 : * This is a convenience subroutine that works for seqscans and other cases
4956 : * where all the given quals will be evaluated the hard way. It's not useful
4957 : * for cost_index(), for example, where the index machinery takes care of
4958 : * some of the quals. We assume baserestrictcost was previously set by
4959 : * set_baserel_size_estimates().
4960 : */
4961 : static void
4962 885352 : get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
4963 : ParamPathInfo *param_info,
4964 : QualCost *qpqual_cost)
4965 : {
4966 885352 : if (param_info)
4967 : {
4968 : /* Include costs of pushed-down clauses */
4969 186958 : cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
4970 :
4971 186958 : qpqual_cost->startup += baserel->baserestrictcost.startup;
4972 186958 : qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
4973 : }
4974 : else
4975 698394 : *qpqual_cost = baserel->baserestrictcost;
4976 885352 : }
4977 :
4978 :
4979 : /*
4980 : * compute_semi_anti_join_factors
4981 : * Estimate how much of the inner input a SEMI, ANTI, or inner_unique join
4982 : * can be expected to scan.
4983 : *
4984 : * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
4985 : * inner rows as soon as it finds a match to the current outer row.
4986 : * The same happens if we have detected the inner rel is unique.
4987 : * We should therefore adjust some of the cost components for this effect.
4988 : * This function computes some estimates needed for these adjustments.
4989 : * These estimates will be the same regardless of the particular paths used
4990 : * for the outer and inner relation, so we compute these once and then pass
4991 : * them to all the join cost estimation functions.
4992 : *
4993 : * Input parameters:
4994 : * joinrel: join relation under consideration
4995 : * outerrel: outer relation under consideration
4996 : * innerrel: inner relation under consideration
4997 : * jointype: if not JOIN_SEMI or JOIN_ANTI, we assume it's inner_unique
4998 : * sjinfo: SpecialJoinInfo relevant to this join
4999 : * restrictlist: join quals
5000 : * Output parameters:
5001 : * *semifactors is filled in (see pathnodes.h for field definitions)
5002 : */
5003 : void
5004 179164 : compute_semi_anti_join_factors(PlannerInfo *root,
5005 : RelOptInfo *joinrel,
5006 : RelOptInfo *outerrel,
5007 : RelOptInfo *innerrel,
5008 : JoinType jointype,
5009 : SpecialJoinInfo *sjinfo,
5010 : List *restrictlist,
5011 : SemiAntiJoinFactors *semifactors)
5012 : {
5013 : Selectivity jselec;
5014 : Selectivity nselec;
5015 : Selectivity avgmatch;
5016 : SpecialJoinInfo norm_sjinfo;
5017 : List *joinquals;
5018 : ListCell *l;
5019 :
5020 : /*
5021 : * In an ANTI join, we must ignore clauses that are "pushed down", since
5022 : * those won't affect the match logic. In a SEMI join, we do not
5023 : * distinguish joinquals from "pushed down" quals, so just use the whole
5024 : * restrictinfo list. For other outer join types, we should consider only
5025 : * non-pushed-down quals, so that this devolves to an IS_OUTER_JOIN check.
5026 : */
5027 179164 : if (IS_OUTER_JOIN(jointype))
5028 : {
5029 76928 : joinquals = NIL;
5030 165192 : foreach(l, restrictlist)
5031 : {
5032 88264 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
5033 :
5034 88264 : if (!RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5035 84438 : joinquals = lappend(joinquals, rinfo);
5036 : }
5037 : }
5038 : else
5039 102236 : joinquals = restrictlist;
5040 :
5041 : /*
5042 : * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
5043 : */
5044 179164 : jselec = clauselist_selectivity(root,
5045 : joinquals,
5046 : 0,
5047 : (jointype == JOIN_ANTI) ? JOIN_ANTI : JOIN_SEMI,
5048 : sjinfo);
5049 :
5050 : /*
5051 : * Also get the normal inner-join selectivity of the join clauses.
5052 : */
5053 179164 : init_dummy_sjinfo(&norm_sjinfo, outerrel->relids, innerrel->relids);
5054 :
5055 179164 : nselec = clauselist_selectivity(root,
5056 : joinquals,
5057 : 0,
5058 : JOIN_INNER,
5059 : &norm_sjinfo);
5060 :
5061 : /* Avoid leaking a lot of ListCells */
5062 179164 : if (IS_OUTER_JOIN(jointype))
5063 76928 : list_free(joinquals);
5064 :
5065 : /*
5066 : * jselec can be interpreted as the fraction of outer-rel rows that have
5067 : * any matches (this is true for both SEMI and ANTI cases). And nselec is
5068 : * the fraction of the Cartesian product that matches. So, the average
5069 : * number of matches for each outer-rel row that has at least one match is
5070 : * nselec * inner_rows / jselec.
5071 : *
5072 : * Note: it is correct to use the inner rel's "rows" count here, even
5073 : * though we might later be considering a parameterized inner path with
5074 : * fewer rows. This is because we have included all the join clauses in
5075 : * the selectivity estimate.
5076 : */
5077 179164 : if (jselec > 0) /* protect against zero divide */
5078 : {
5079 178828 : avgmatch = nselec * innerrel->rows / jselec;
5080 : /* Clamp to sane range */
5081 178828 : avgmatch = Max(1.0, avgmatch);
5082 : }
5083 : else
5084 336 : avgmatch = 1.0;
5085 :
5086 179164 : semifactors->outer_match_frac = jselec;
5087 179164 : semifactors->match_count = avgmatch;
5088 179164 : }
5089 :
5090 : /*
5091 : * has_indexed_join_quals
5092 : * Check whether all the joinquals of a nestloop join are used as
5093 : * inner index quals.
5094 : *
5095 : * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
5096 : * indexscan) that uses all the joinquals as indexquals, we can assume that an
5097 : * unmatched outer tuple is cheap to process, whereas otherwise it's probably
5098 : * expensive.
5099 : */
5100 : static bool
5101 721908 : has_indexed_join_quals(NestPath *path)
5102 : {
5103 721908 : JoinPath *joinpath = &path->jpath;
5104 721908 : Relids joinrelids = joinpath->path.parent->relids;
5105 721908 : Path *innerpath = joinpath->innerjoinpath;
5106 : List *indexclauses;
5107 : bool found_one;
5108 : ListCell *lc;
5109 :
5110 : /* If join still has quals to evaluate, it's not fast */
5111 721908 : if (joinpath->joinrestrictinfo != NIL)
5112 504262 : return false;
5113 : /* Nor if the inner path isn't parameterized at all */
5114 217646 : if (innerpath->param_info == NULL)
5115 4764 : return false;
5116 :
5117 : /* Find the indexclauses list for the inner scan */
5118 212882 : switch (innerpath->pathtype)
5119 : {
5120 134590 : case T_IndexScan:
5121 : case T_IndexOnlyScan:
5122 134590 : indexclauses = ((IndexPath *) innerpath)->indexclauses;
5123 134590 : break;
5124 282 : case T_BitmapHeapScan:
5125 : {
5126 : /* Accept only a simple bitmap scan, not AND/OR cases */
5127 282 : Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
5128 :
5129 282 : if (IsA(bmqual, IndexPath))
5130 234 : indexclauses = ((IndexPath *) bmqual)->indexclauses;
5131 : else
5132 48 : return false;
5133 234 : break;
5134 : }
5135 78010 : default:
5136 :
5137 : /*
5138 : * If it's not a simple indexscan, it probably doesn't run quickly
5139 : * for zero rows out, even if it's a parameterized path using all
5140 : * the joinquals.
5141 : */
5142 78010 : return false;
5143 : }
5144 :
5145 : /*
5146 : * Examine the inner path's param clauses. Any that are from the outer
5147 : * path must be found in the indexclauses list, either exactly or in an
5148 : * equivalent form generated by equivclass.c. Also, we must find at least
5149 : * one such clause, else it's a clauseless join which isn't fast.
5150 : */
5151 134824 : found_one = false;
5152 269054 : foreach(lc, innerpath->param_info->ppi_clauses)
5153 : {
5154 137092 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
5155 :
5156 137092 : if (join_clause_is_movable_into(rinfo,
5157 137092 : innerpath->parent->relids,
5158 : joinrelids))
5159 : {
5160 136624 : if (!is_redundant_with_indexclauses(rinfo, indexclauses))
5161 2862 : return false;
5162 133762 : found_one = true;
5163 : }
5164 : }
5165 131962 : return found_one;
5166 : }
5167 :
5168 :
5169 : /*
5170 : * approx_tuple_count
5171 : * Quick-and-dirty estimation of the number of join rows passing
5172 : * a set of qual conditions.
5173 : *
5174 : * The quals can be either an implicitly-ANDed list of boolean expressions,
5175 : * or a list of RestrictInfo nodes (typically the latter).
5176 : *
5177 : * We intentionally compute the selectivity under JOIN_INNER rules, even
5178 : * if it's some type of outer join. This is appropriate because we are
5179 : * trying to figure out how many tuples pass the initial merge or hash
5180 : * join step.
5181 : *
5182 : * This is quick-and-dirty because we bypass clauselist_selectivity, and
5183 : * simply multiply the independent clause selectivities together. Now
5184 : * clauselist_selectivity often can't do any better than that anyhow, but
5185 : * for some situations (such as range constraints) it is smarter. However,
5186 : * we can't effectively cache the results of clauselist_selectivity, whereas
5187 : * the individual clause selectivities can be and are cached.
5188 : *
5189 : * Since we are only using the results to estimate how many potential
5190 : * output tuples are generated and passed through qpqual checking, it
5191 : * seems OK to live with the approximation.
5192 : */
5193 : static double
5194 378518 : approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
5195 : {
5196 : double tuples;
5197 378518 : double outer_tuples = path->outerjoinpath->rows;
5198 378518 : double inner_tuples = path->innerjoinpath->rows;
5199 : SpecialJoinInfo sjinfo;
5200 378518 : Selectivity selec = 1.0;
5201 : ListCell *l;
5202 :
5203 : /*
5204 : * Make up a SpecialJoinInfo for JOIN_INNER semantics.
5205 : */
5206 378518 : init_dummy_sjinfo(&sjinfo, path->outerjoinpath->parent->relids,
5207 378518 : path->innerjoinpath->parent->relids);
5208 :
5209 : /* Get the approximate selectivity */
5210 810444 : foreach(l, quals)
5211 : {
5212 431926 : Node *qual = (Node *) lfirst(l);
5213 :
5214 : /* Note that clause_selectivity will be able to cache its result */
5215 431926 : selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
5216 : }
5217 :
5218 : /* Apply it to the input relation sizes */
5219 378518 : tuples = selec * outer_tuples * inner_tuples;
5220 :
5221 378518 : return clamp_row_est(tuples);
5222 : }
5223 :
5224 :
5225 : /*
5226 : * set_baserel_size_estimates
5227 : * Set the size estimates for the given base relation.
5228 : *
5229 : * The rel's targetlist and restrictinfo list must have been constructed
5230 : * already, and rel->tuples must be set.
5231 : *
5232 : * We set the following fields of the rel node:
5233 : * rows: the estimated number of output tuples (after applying
5234 : * restriction clauses).
5235 : * width: the estimated average output tuple width in bytes.
5236 : * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
5237 : */
5238 : void
5239 426456 : set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5240 : {
5241 : double nrows;
5242 :
5243 : /* Should only be applied to base relations */
5244 : Assert(rel->relid > 0);
5245 :
5246 852888 : nrows = rel->tuples *
5247 426456 : clauselist_selectivity(root,
5248 : rel->baserestrictinfo,
5249 : 0,
5250 : JOIN_INNER,
5251 : NULL);
5252 :
5253 426432 : rel->rows = clamp_row_est(nrows);
5254 :
5255 426432 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
5256 :
5257 426432 : set_rel_width(root, rel);
5258 426432 : }
5259 :
5260 : /*
5261 : * get_parameterized_baserel_size
5262 : * Make a size estimate for a parameterized scan of a base relation.
5263 : *
5264 : * 'param_clauses' lists the additional join clauses to be used.
5265 : *
5266 : * set_baserel_size_estimates must have been applied already.
5267 : */
5268 : double
5269 123700 : get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel,
5270 : List *param_clauses)
5271 : {
5272 : List *allclauses;
5273 : double nrows;
5274 :
5275 : /*
5276 : * Estimate the number of rows returned by the parameterized scan, knowing
5277 : * that it will apply all the extra join clauses as well as the rel's own
5278 : * restriction clauses. Note that we force the clauses to be treated as
5279 : * non-join clauses during selectivity estimation.
5280 : */
5281 123700 : allclauses = list_concat_copy(param_clauses, rel->baserestrictinfo);
5282 247400 : nrows = rel->tuples *
5283 123700 : clauselist_selectivity(root,
5284 : allclauses,
5285 123700 : rel->relid, /* do not use 0! */
5286 : JOIN_INNER,
5287 : NULL);
5288 123700 : nrows = clamp_row_est(nrows);
5289 : /* For safety, make sure result is not more than the base estimate */
5290 123700 : if (nrows > rel->rows)
5291 0 : nrows = rel->rows;
5292 123700 : return nrows;
5293 : }
5294 :
5295 : /*
5296 : * set_joinrel_size_estimates
5297 : * Set the size estimates for the given join relation.
5298 : *
5299 : * The rel's targetlist must have been constructed already, and a
5300 : * restriction clause list that matches the given component rels must
5301 : * be provided.
5302 : *
5303 : * Since there is more than one way to make a joinrel for more than two
5304 : * base relations, the results we get here could depend on which component
5305 : * rel pair is provided. In theory we should get the same answers no matter
5306 : * which pair is provided; in practice, since the selectivity estimation
5307 : * routines don't handle all cases equally well, we might not. But there's
5308 : * not much to be done about it. (Would it make sense to repeat the
5309 : * calculations for each pair of input rels that's encountered, and somehow
5310 : * average the results? Probably way more trouble than it's worth, and
5311 : * anyway we must keep the rowcount estimate the same for all paths for the
5312 : * joinrel.)
5313 : *
5314 : * We set only the rows field here. The reltarget field was already set by
5315 : * build_joinrel_tlist, and baserestrictcost is not used for join rels.
5316 : */
5317 : void
5318 182678 : set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
5319 : RelOptInfo *outer_rel,
5320 : RelOptInfo *inner_rel,
5321 : SpecialJoinInfo *sjinfo,
5322 : List *restrictlist)
5323 : {
5324 182678 : rel->rows = calc_joinrel_size_estimate(root,
5325 : rel,
5326 : outer_rel,
5327 : inner_rel,
5328 : outer_rel->rows,
5329 : inner_rel->rows,
5330 : sjinfo,
5331 : restrictlist);
5332 182678 : }
5333 :
5334 : /*
5335 : * get_parameterized_joinrel_size
5336 : * Make a size estimate for a parameterized scan of a join relation.
5337 : *
5338 : * 'rel' is the joinrel under consideration.
5339 : * 'outer_path', 'inner_path' are (probably also parameterized) Paths that
5340 : * produce the relations being joined.
5341 : * 'sjinfo' is any SpecialJoinInfo relevant to this join.
5342 : * 'restrict_clauses' lists the join clauses that need to be applied at the
5343 : * join node (including any movable clauses that were moved down to this join,
5344 : * and not including any movable clauses that were pushed down into the
5345 : * child paths).
5346 : *
5347 : * set_joinrel_size_estimates must have been applied already.
5348 : */
5349 : double
5350 7240 : get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel,
5351 : Path *outer_path,
5352 : Path *inner_path,
5353 : SpecialJoinInfo *sjinfo,
5354 : List *restrict_clauses)
5355 : {
5356 : double nrows;
5357 :
5358 : /*
5359 : * Estimate the number of rows returned by the parameterized join as the
5360 : * sizes of the input paths times the selectivity of the clauses that have
5361 : * ended up at this join node.
5362 : *
5363 : * As with set_joinrel_size_estimates, the rowcount estimate could depend
5364 : * on the pair of input paths provided, though ideally we'd get the same
5365 : * estimate for any pair with the same parameterization.
5366 : */
5367 7240 : nrows = calc_joinrel_size_estimate(root,
5368 : rel,
5369 : outer_path->parent,
5370 : inner_path->parent,
5371 : outer_path->rows,
5372 : inner_path->rows,
5373 : sjinfo,
5374 : restrict_clauses);
5375 : /* For safety, make sure result is not more than the base estimate */
5376 7240 : if (nrows > rel->rows)
5377 12 : nrows = rel->rows;
5378 7240 : return nrows;
5379 : }
5380 :
5381 : /*
5382 : * calc_joinrel_size_estimate
5383 : * Workhorse for set_joinrel_size_estimates and
5384 : * get_parameterized_joinrel_size.
5385 : *
5386 : * outer_rel/inner_rel are the relations being joined, but they should be
5387 : * assumed to have sizes outer_rows/inner_rows; those numbers might be less
5388 : * than what rel->rows says, when we are considering parameterized paths.
5389 : */
5390 : static double
5391 189918 : calc_joinrel_size_estimate(PlannerInfo *root,
5392 : RelOptInfo *joinrel,
5393 : RelOptInfo *outer_rel,
5394 : RelOptInfo *inner_rel,
5395 : double outer_rows,
5396 : double inner_rows,
5397 : SpecialJoinInfo *sjinfo,
5398 : List *restrictlist)
5399 : {
5400 189918 : JoinType jointype = sjinfo->jointype;
5401 : Selectivity fkselec;
5402 : Selectivity jselec;
5403 : Selectivity pselec;
5404 : double nrows;
5405 :
5406 : /*
5407 : * Compute joinclause selectivity. Note that we are only considering
5408 : * clauses that become restriction clauses at this join level; we are not
5409 : * double-counting them because they were not considered in estimating the
5410 : * sizes of the component rels.
5411 : *
5412 : * First, see whether any of the joinclauses can be matched to known FK
5413 : * constraints. If so, drop those clauses from the restrictlist, and
5414 : * instead estimate their selectivity using FK semantics. (We do this
5415 : * without regard to whether said clauses are local or "pushed down".
5416 : * Probably, an FK-matching clause could never be seen as pushed down at
5417 : * an outer join, since it would be strict and hence would be grounds for
5418 : * join strength reduction.) fkselec gets the net selectivity for
5419 : * FK-matching clauses, or 1.0 if there are none.
5420 : */
5421 189918 : fkselec = get_foreign_key_join_selectivity(root,
5422 : outer_rel->relids,
5423 : inner_rel->relids,
5424 : sjinfo,
5425 : &restrictlist);
5426 :
5427 : /*
5428 : * For an outer join, we have to distinguish the selectivity of the join's
5429 : * own clauses (JOIN/ON conditions) from any clauses that were "pushed
5430 : * down". For inner joins we just count them all as joinclauses.
5431 : */
5432 189918 : if (IS_OUTER_JOIN(jointype))
5433 : {
5434 74848 : List *joinquals = NIL;
5435 74848 : List *pushedquals = NIL;
5436 : ListCell *l;
5437 :
5438 : /* Grovel through the clauses to separate into two lists */
5439 166520 : foreach(l, restrictlist)
5440 : {
5441 91672 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
5442 :
5443 91672 : if (RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5444 3722 : pushedquals = lappend(pushedquals, rinfo);
5445 : else
5446 87950 : joinquals = lappend(joinquals, rinfo);
5447 : }
5448 :
5449 : /* Get the separate selectivities */
5450 74848 : jselec = clauselist_selectivity(root,
5451 : joinquals,
5452 : 0,
5453 : jointype,
5454 : sjinfo);
5455 74848 : pselec = clauselist_selectivity(root,
5456 : pushedquals,
5457 : 0,
5458 : jointype,
5459 : sjinfo);
5460 :
5461 : /* Avoid leaking a lot of ListCells */
5462 74848 : list_free(joinquals);
5463 74848 : list_free(pushedquals);
5464 : }
5465 : else
5466 : {
5467 115070 : jselec = clauselist_selectivity(root,
5468 : restrictlist,
5469 : 0,
5470 : jointype,
5471 : sjinfo);
5472 115070 : pselec = 0.0; /* not used, keep compiler quiet */
5473 : }
5474 :
5475 : /*
5476 : * Basically, we multiply size of Cartesian product by selectivity.
5477 : *
5478 : * If we are doing an outer join, take that into account: the joinqual
5479 : * selectivity has to be clamped using the knowledge that the output must
5480 : * be at least as large as the non-nullable input. However, any
5481 : * pushed-down quals are applied after the outer join, so their
5482 : * selectivity applies fully.
5483 : *
5484 : * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
5485 : * of LHS rows that have matches, and we apply that straightforwardly.
5486 : */
5487 189918 : switch (jointype)
5488 : {
5489 110336 : case JOIN_INNER:
5490 110336 : nrows = outer_rows * inner_rows * fkselec * jselec;
5491 : /* pselec not used */
5492 110336 : break;
5493 68778 : case JOIN_LEFT:
5494 68778 : nrows = outer_rows * inner_rows * fkselec * jselec;
5495 68778 : if (nrows < outer_rows)
5496 24326 : nrows = outer_rows;
5497 68778 : nrows *= pselec;
5498 68778 : break;
5499 1690 : case JOIN_FULL:
5500 1690 : nrows = outer_rows * inner_rows * fkselec * jselec;
5501 1690 : if (nrows < outer_rows)
5502 1092 : nrows = outer_rows;
5503 1690 : if (nrows < inner_rows)
5504 120 : nrows = inner_rows;
5505 1690 : nrows *= pselec;
5506 1690 : break;
5507 4734 : case JOIN_SEMI:
5508 4734 : nrows = outer_rows * fkselec * jselec;
5509 : /* pselec not used */
5510 4734 : break;
5511 4380 : case JOIN_ANTI:
5512 4380 : nrows = outer_rows * (1.0 - fkselec * jselec);
5513 4380 : nrows *= pselec;
5514 4380 : break;
5515 0 : default:
5516 : /* other values not expected here */
5517 0 : elog(ERROR, "unrecognized join type: %d", (int) jointype);
5518 : nrows = 0; /* keep compiler quiet */
5519 : break;
5520 : }
5521 :
5522 189918 : return clamp_row_est(nrows);
5523 : }
5524 :
5525 : /*
5526 : * get_foreign_key_join_selectivity
5527 : * Estimate join selectivity for foreign-key-related clauses.
5528 : *
5529 : * Remove any clauses that can be matched to FK constraints from *restrictlist,
5530 : * and return a substitute estimate of their selectivity. 1.0 is returned
5531 : * when there are no such clauses.
5532 : *
5533 : * The reason for treating such clauses specially is that we can get better
5534 : * estimates this way than by relying on clauselist_selectivity(), especially
5535 : * for multi-column FKs where that function's assumption that the clauses are
5536 : * independent falls down badly. But even with single-column FKs, we may be
5537 : * able to get a better answer when the pg_statistic stats are missing or out
5538 : * of date.
5539 : */
5540 : static Selectivity
5541 189918 : get_foreign_key_join_selectivity(PlannerInfo *root,
5542 : Relids outer_relids,
5543 : Relids inner_relids,
5544 : SpecialJoinInfo *sjinfo,
5545 : List **restrictlist)
5546 : {
5547 189918 : Selectivity fkselec = 1.0;
5548 189918 : JoinType jointype = sjinfo->jointype;
5549 189918 : List *worklist = *restrictlist;
5550 : ListCell *lc;
5551 :
5552 : /* Consider each FK constraint that is known to match the query */
5553 191800 : foreach(lc, root->fkey_list)
5554 : {
5555 1882 : ForeignKeyOptInfo *fkinfo = (ForeignKeyOptInfo *) lfirst(lc);
5556 : bool ref_is_outer;
5557 : List *removedlist;
5558 : ListCell *cell;
5559 :
5560 : /*
5561 : * This FK is not relevant unless it connects a baserel on one side of
5562 : * this join to a baserel on the other side.
5563 : */
5564 3418 : if (bms_is_member(fkinfo->con_relid, outer_relids) &&
5565 1536 : bms_is_member(fkinfo->ref_relid, inner_relids))
5566 1368 : ref_is_outer = false;
5567 842 : else if (bms_is_member(fkinfo->ref_relid, outer_relids) &&
5568 328 : bms_is_member(fkinfo->con_relid, inner_relids))
5569 130 : ref_is_outer = true;
5570 : else
5571 384 : continue;
5572 :
5573 : /*
5574 : * If we're dealing with a semi/anti join, and the FK's referenced
5575 : * relation is on the outside, then knowledge of the FK doesn't help
5576 : * us figure out what we need to know (which is the fraction of outer
5577 : * rows that have matches). On the other hand, if the referenced rel
5578 : * is on the inside, then all outer rows must have matches in the
5579 : * referenced table (ignoring nulls). But any restriction or join
5580 : * clauses that filter that table will reduce the fraction of matches.
5581 : * We can account for restriction clauses, but it's too hard to guess
5582 : * how many table rows would get through a join that's inside the RHS.
5583 : * Hence, if either case applies, punt and ignore the FK.
5584 : */
5585 1498 : if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
5586 976 : (ref_is_outer || bms_membership(inner_relids) != BMS_SINGLETON))
5587 12 : continue;
5588 :
5589 : /*
5590 : * Modify the restrictlist by removing clauses that match the FK (and
5591 : * putting them into removedlist instead). It seems unsafe to modify
5592 : * the originally-passed List structure, so we make a shallow copy the
5593 : * first time through.
5594 : */
5595 1486 : if (worklist == *restrictlist)
5596 1262 : worklist = list_copy(worklist);
5597 :
5598 1486 : removedlist = NIL;
5599 3048 : foreach(cell, worklist)
5600 : {
5601 1562 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
5602 1562 : bool remove_it = false;
5603 : int i;
5604 :
5605 : /* Drop this clause if it matches any column of the FK */
5606 1948 : for (i = 0; i < fkinfo->nkeys; i++)
5607 : {
5608 1918 : if (rinfo->parent_ec)
5609 : {
5610 : /*
5611 : * EC-derived clauses can only match by EC. It is okay to
5612 : * consider any clause derived from the same EC as
5613 : * matching the FK: even if equivclass.c chose to generate
5614 : * a clause equating some other pair of Vars, it could
5615 : * have generated one equating the FK's Vars. So for
5616 : * purposes of estimation, we can act as though it did so.
5617 : *
5618 : * Note: checking parent_ec is a bit of a cheat because
5619 : * there are EC-derived clauses that don't have parent_ec
5620 : * set; but such clauses must compare expressions that
5621 : * aren't just Vars, so they cannot match the FK anyway.
5622 : */
5623 304 : if (fkinfo->eclass[i] == rinfo->parent_ec)
5624 : {
5625 298 : remove_it = true;
5626 298 : break;
5627 : }
5628 : }
5629 : else
5630 : {
5631 : /*
5632 : * Otherwise, see if rinfo was previously matched to FK as
5633 : * a "loose" clause.
5634 : */
5635 1614 : if (list_member_ptr(fkinfo->rinfos[i], rinfo))
5636 : {
5637 1234 : remove_it = true;
5638 1234 : break;
5639 : }
5640 : }
5641 : }
5642 1562 : if (remove_it)
5643 : {
5644 1532 : worklist = foreach_delete_current(worklist, cell);
5645 1532 : removedlist = lappend(removedlist, rinfo);
5646 : }
5647 : }
5648 :
5649 : /*
5650 : * If we failed to remove all the matching clauses we expected to
5651 : * find, chicken out and ignore this FK; applying its selectivity
5652 : * might result in double-counting. Put any clauses we did manage to
5653 : * remove back into the worklist.
5654 : *
5655 : * Since the matching clauses are known not outerjoin-delayed, they
5656 : * would normally have appeared in the initial joinclause list. If we
5657 : * didn't find them, there are two possibilities:
5658 : *
5659 : * 1. If the FK match is based on an EC that is ec_has_const, it won't
5660 : * have generated any join clauses at all. We discount such ECs while
5661 : * checking to see if we have "all" the clauses. (Below, we'll adjust
5662 : * the selectivity estimate for this case.)
5663 : *
5664 : * 2. The clauses were matched to some other FK in a previous
5665 : * iteration of this loop, and thus removed from worklist. (A likely
5666 : * case is that two FKs are matched to the same EC; there will be only
5667 : * one EC-derived clause in the initial list, so the first FK will
5668 : * consume it.) Applying both FKs' selectivity independently risks
5669 : * underestimating the join size; in particular, this would undo one
5670 : * of the main things that ECs were invented for, namely to avoid
5671 : * double-counting the selectivity of redundant equality conditions.
5672 : * Later we might think of a reasonable way to combine the estimates,
5673 : * but for now, just punt, since this is a fairly uncommon situation.
5674 : */
5675 1486 : if (removedlist == NIL ||
5676 1200 : list_length(removedlist) !=
5677 1200 : (fkinfo->nmatched_ec - fkinfo->nconst_ec + fkinfo->nmatched_ri))
5678 : {
5679 286 : worklist = list_concat(worklist, removedlist);
5680 286 : continue;
5681 : }
5682 :
5683 : /*
5684 : * Finally we get to the payoff: estimate selectivity using the
5685 : * knowledge that each referencing row will match exactly one row in
5686 : * the referenced table.
5687 : *
5688 : * XXX that's not true in the presence of nulls in the referencing
5689 : * column(s), so in principle we should derate the estimate for those.
5690 : * However (1) if there are any strict restriction clauses for the
5691 : * referencing column(s) elsewhere in the query, derating here would
5692 : * be double-counting the null fraction, and (2) it's not very clear
5693 : * how to combine null fractions for multiple referencing columns. So
5694 : * we do nothing for now about correcting for nulls.
5695 : *
5696 : * XXX another point here is that if either side of an FK constraint
5697 : * is an inheritance parent, we estimate as though the constraint
5698 : * covers all its children as well. This is not an unreasonable
5699 : * assumption for a referencing table, ie the user probably applied
5700 : * identical constraints to all child tables (though perhaps we ought
5701 : * to check that). But it's not possible to have done that for a
5702 : * referenced table. Fortunately, precisely because that doesn't
5703 : * work, it is uncommon in practice to have an FK referencing a parent
5704 : * table. So, at least for now, disregard inheritance here.
5705 : */
5706 1200 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
5707 752 : {
5708 : /*
5709 : * For JOIN_SEMI and JOIN_ANTI, we only get here when the FK's
5710 : * referenced table is exactly the inside of the join. The join
5711 : * selectivity is defined as the fraction of LHS rows that have
5712 : * matches. The FK implies that every LHS row has a match *in the
5713 : * referenced table*; but any restriction clauses on it will
5714 : * reduce the number of matches. Hence we take the join
5715 : * selectivity as equal to the selectivity of the table's
5716 : * restriction clauses, which is rows / tuples; but we must guard
5717 : * against tuples == 0.
5718 : */
5719 752 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5720 752 : double ref_tuples = Max(ref_rel->tuples, 1.0);
5721 :
5722 752 : fkselec *= ref_rel->rows / ref_tuples;
5723 : }
5724 : else
5725 : {
5726 : /*
5727 : * Otherwise, selectivity is exactly 1/referenced-table-size; but
5728 : * guard against tuples == 0. Note we should use the raw table
5729 : * tuple count, not any estimate of its filtered or joined size.
5730 : */
5731 448 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5732 448 : double ref_tuples = Max(ref_rel->tuples, 1.0);
5733 :
5734 448 : fkselec *= 1.0 / ref_tuples;
5735 : }
5736 :
5737 : /*
5738 : * If any of the FK columns participated in ec_has_const ECs, then
5739 : * equivclass.c will have generated "var = const" restrictions for
5740 : * each side of the join, thus reducing the sizes of both input
5741 : * relations. Taking the fkselec at face value would amount to
5742 : * double-counting the selectivity of the constant restriction for the
5743 : * referencing Var. Hence, look for the restriction clause(s) that
5744 : * were applied to the referencing Var(s), and divide out their
5745 : * selectivity to correct for this.
5746 : */
5747 1200 : if (fkinfo->nconst_ec > 0)
5748 : {
5749 24 : for (int i = 0; i < fkinfo->nkeys; i++)
5750 : {
5751 18 : EquivalenceClass *ec = fkinfo->eclass[i];
5752 :
5753 18 : if (ec && ec->ec_has_const)
5754 : {
5755 6 : EquivalenceMember *em = fkinfo->fk_eclass_member[i];
5756 6 : RestrictInfo *rinfo = find_derived_clause_for_ec_member(ec,
5757 : em);
5758 :
5759 6 : if (rinfo)
5760 : {
5761 : Selectivity s0;
5762 :
5763 6 : s0 = clause_selectivity(root,
5764 : (Node *) rinfo,
5765 : 0,
5766 : jointype,
5767 : sjinfo);
5768 6 : if (s0 > 0)
5769 6 : fkselec /= s0;
5770 : }
5771 : }
5772 : }
5773 : }
5774 : }
5775 :
5776 189918 : *restrictlist = worklist;
5777 189918 : CLAMP_PROBABILITY(fkselec);
5778 189918 : return fkselec;
5779 : }
5780 :
5781 : /*
5782 : * set_subquery_size_estimates
5783 : * Set the size estimates for a base relation that is a subquery.
5784 : *
5785 : * The rel's targetlist and restrictinfo list must have been constructed
5786 : * already, and the Paths for the subquery must have been completed.
5787 : * We look at the subquery's PlannerInfo to extract data.
5788 : *
5789 : * We set the same fields as set_baserel_size_estimates.
5790 : */
5791 : void
5792 21364 : set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5793 : {
5794 21364 : PlannerInfo *subroot = rel->subroot;
5795 : RelOptInfo *sub_final_rel;
5796 : ListCell *lc;
5797 :
5798 : /* Should only be applied to base relations that are subqueries */
5799 : Assert(rel->relid > 0);
5800 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_SUBQUERY);
5801 :
5802 : /*
5803 : * Copy raw number of output rows from subquery. All of its paths should
5804 : * have the same output rowcount, so just look at cheapest-total.
5805 : */
5806 21364 : sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
5807 21364 : rel->tuples = sub_final_rel->cheapest_total_path->rows;
5808 :
5809 : /*
5810 : * Compute per-output-column width estimates by examining the subquery's
5811 : * targetlist. For any output that is a plain Var, get the width estimate
5812 : * that was made while planning the subquery. Otherwise, we leave it to
5813 : * set_rel_width to fill in a datatype-based default estimate.
5814 : */
5815 85532 : foreach(lc, subroot->parse->targetList)
5816 : {
5817 64168 : TargetEntry *te = lfirst_node(TargetEntry, lc);
5818 64168 : Node *texpr = (Node *) te->expr;
5819 64168 : int32 item_width = 0;
5820 :
5821 : /* junk columns aren't visible to upper query */
5822 64168 : if (te->resjunk)
5823 1134 : continue;
5824 :
5825 : /*
5826 : * The subquery could be an expansion of a view that's had columns
5827 : * added to it since the current query was parsed, so that there are
5828 : * non-junk tlist columns in it that don't correspond to any column
5829 : * visible at our query level. Ignore such columns.
5830 : */
5831 63034 : if (te->resno < rel->min_attr || te->resno > rel->max_attr)
5832 0 : continue;
5833 :
5834 : /*
5835 : * XXX This currently doesn't work for subqueries containing set
5836 : * operations, because the Vars in their tlists are bogus references
5837 : * to the first leaf subquery, which wouldn't give the right answer
5838 : * even if we could still get to its PlannerInfo.
5839 : *
5840 : * Also, the subquery could be an appendrel for which all branches are
5841 : * known empty due to constraint exclusion, in which case
5842 : * set_append_rel_pathlist will have left the attr_widths set to zero.
5843 : *
5844 : * In either case, we just leave the width estimate zero until
5845 : * set_rel_width fixes it.
5846 : */
5847 63034 : if (IsA(texpr, Var) &&
5848 28356 : subroot->parse->setOperations == NULL)
5849 : {
5850 26936 : Var *var = (Var *) texpr;
5851 26936 : RelOptInfo *subrel = find_base_rel(subroot, var->varno);
5852 :
5853 26936 : item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
5854 : }
5855 63034 : rel->attr_widths[te->resno - rel->min_attr] = item_width;
5856 : }
5857 :
5858 : /* Now estimate number of output rows, etc */
5859 21364 : set_baserel_size_estimates(root, rel);
5860 21364 : }
5861 :
5862 : /*
5863 : * set_function_size_estimates
5864 : * Set the size estimates for a base relation that is a function call.
5865 : *
5866 : * The rel's targetlist and restrictinfo list must have been constructed
5867 : * already.
5868 : *
5869 : * We set the same fields as set_baserel_size_estimates.
5870 : */
5871 : void
5872 39980 : set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5873 : {
5874 : RangeTblEntry *rte;
5875 : ListCell *lc;
5876 :
5877 : /* Should only be applied to base relations that are functions */
5878 : Assert(rel->relid > 0);
5879 39980 : rte = planner_rt_fetch(rel->relid, root);
5880 : Assert(rte->rtekind == RTE_FUNCTION);
5881 :
5882 : /*
5883 : * Estimate number of rows the functions will return. The rowcount of the
5884 : * node is that of the largest function result.
5885 : */
5886 39980 : rel->tuples = 0;
5887 80272 : foreach(lc, rte->functions)
5888 : {
5889 40292 : RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
5890 40292 : double ntup = expression_returns_set_rows(root, rtfunc->funcexpr);
5891 :
5892 40292 : if (ntup > rel->tuples)
5893 40004 : rel->tuples = ntup;
5894 : }
5895 :
5896 : /* Now estimate number of output rows, etc */
5897 39980 : set_baserel_size_estimates(root, rel);
5898 39980 : }
5899 :
5900 : /*
5901 : * set_function_size_estimates
5902 : * Set the size estimates for a base relation that is a function call.
5903 : *
5904 : * The rel's targetlist and restrictinfo list must have been constructed
5905 : * already.
5906 : *
5907 : * We set the same fields as set_tablefunc_size_estimates.
5908 : */
5909 : void
5910 548 : set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5911 : {
5912 : /* Should only be applied to base relations that are functions */
5913 : Assert(rel->relid > 0);
5914 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_TABLEFUNC);
5915 :
5916 548 : rel->tuples = 100;
5917 :
5918 : /* Now estimate number of output rows, etc */
5919 548 : set_baserel_size_estimates(root, rel);
5920 548 : }
5921 :
5922 : /*
5923 : * set_values_size_estimates
5924 : * Set the size estimates for a base relation that is a values list.
5925 : *
5926 : * The rel's targetlist and restrictinfo list must have been constructed
5927 : * already.
5928 : *
5929 : * We set the same fields as set_baserel_size_estimates.
5930 : */
5931 : void
5932 7736 : set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5933 : {
5934 : RangeTblEntry *rte;
5935 :
5936 : /* Should only be applied to base relations that are values lists */
5937 : Assert(rel->relid > 0);
5938 7736 : rte = planner_rt_fetch(rel->relid, root);
5939 : Assert(rte->rtekind == RTE_VALUES);
5940 :
5941 : /*
5942 : * Estimate number of rows the values list will return. We know this
5943 : * precisely based on the list length (well, barring set-returning
5944 : * functions in list items, but that's a refinement not catered for
5945 : * anywhere else either).
5946 : */
5947 7736 : rel->tuples = list_length(rte->values_lists);
5948 :
5949 : /* Now estimate number of output rows, etc */
5950 7736 : set_baserel_size_estimates(root, rel);
5951 7736 : }
5952 :
5953 : /*
5954 : * set_cte_size_estimates
5955 : * Set the size estimates for a base relation that is a CTE reference.
5956 : *
5957 : * The rel's targetlist and restrictinfo list must have been constructed
5958 : * already, and we need an estimate of the number of rows returned by the CTE
5959 : * (if a regular CTE) or the non-recursive term (if a self-reference).
5960 : *
5961 : * We set the same fields as set_baserel_size_estimates.
5962 : */
5963 : void
5964 4026 : set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
5965 : {
5966 : RangeTblEntry *rte;
5967 :
5968 : /* Should only be applied to base relations that are CTE references */
5969 : Assert(rel->relid > 0);
5970 4026 : rte = planner_rt_fetch(rel->relid, root);
5971 : Assert(rte->rtekind == RTE_CTE);
5972 :
5973 4026 : if (rte->self_reference)
5974 : {
5975 : /*
5976 : * In a self-reference, we assume the average worktable size is a
5977 : * multiple of the nonrecursive term's size. The best multiplier will
5978 : * vary depending on query "fan-out", so make its value adjustable.
5979 : */
5980 806 : rel->tuples = clamp_row_est(recursive_worktable_factor * cte_rows);
5981 : }
5982 : else
5983 : {
5984 : /* Otherwise just believe the CTE's rowcount estimate */
5985 3220 : rel->tuples = cte_rows;
5986 : }
5987 :
5988 : /* Now estimate number of output rows, etc */
5989 4026 : set_baserel_size_estimates(root, rel);
5990 4026 : }
5991 :
5992 : /*
5993 : * set_namedtuplestore_size_estimates
5994 : * Set the size estimates for a base relation that is a tuplestore reference.
5995 : *
5996 : * The rel's targetlist and restrictinfo list must have been constructed
5997 : * already.
5998 : *
5999 : * We set the same fields as set_baserel_size_estimates.
6000 : */
6001 : void
6002 438 : set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6003 : {
6004 : RangeTblEntry *rte;
6005 :
6006 : /* Should only be applied to base relations that are tuplestore references */
6007 : Assert(rel->relid > 0);
6008 438 : rte = planner_rt_fetch(rel->relid, root);
6009 : Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
6010 :
6011 : /*
6012 : * Use the estimate provided by the code which is generating the named
6013 : * tuplestore. In some cases, the actual number might be available; in
6014 : * others the same plan will be re-used, so a "typical" value might be
6015 : * estimated and used.
6016 : */
6017 438 : rel->tuples = rte->enrtuples;
6018 438 : if (rel->tuples < 0)
6019 0 : rel->tuples = 1000;
6020 :
6021 : /* Now estimate number of output rows, etc */
6022 438 : set_baserel_size_estimates(root, rel);
6023 438 : }
6024 :
6025 : /*
6026 : * set_result_size_estimates
6027 : * Set the size estimates for an RTE_RESULT base relation
6028 : *
6029 : * The rel's targetlist and restrictinfo list must have been constructed
6030 : * already.
6031 : *
6032 : * We set the same fields as set_baserel_size_estimates.
6033 : */
6034 : void
6035 1562 : set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6036 : {
6037 : /* Should only be applied to RTE_RESULT base relations */
6038 : Assert(rel->relid > 0);
6039 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_RESULT);
6040 :
6041 : /* RTE_RESULT always generates a single row, natively */
6042 1562 : rel->tuples = 1;
6043 :
6044 : /* Now estimate number of output rows, etc */
6045 1562 : set_baserel_size_estimates(root, rel);
6046 1562 : }
6047 :
6048 : /*
6049 : * set_foreign_size_estimates
6050 : * Set the size estimates for a base relation that is a foreign table.
6051 : *
6052 : * There is not a whole lot that we can do here; the foreign-data wrapper
6053 : * is responsible for producing useful estimates. We can do a decent job
6054 : * of estimating baserestrictcost, so we set that, and we also set up width
6055 : * using what will be purely datatype-driven estimates from the targetlist.
6056 : * There is no way to do anything sane with the rows value, so we just put
6057 : * a default estimate and hope that the wrapper can improve on it. The
6058 : * wrapper's GetForeignRelSize function will be called momentarily.
6059 : *
6060 : * The rel's targetlist and restrictinfo list must have been constructed
6061 : * already.
6062 : */
6063 : void
6064 2348 : set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6065 : {
6066 : /* Should only be applied to base relations */
6067 : Assert(rel->relid > 0);
6068 :
6069 2348 : rel->rows = 1000; /* entirely bogus default estimate */
6070 :
6071 2348 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
6072 :
6073 2348 : set_rel_width(root, rel);
6074 2348 : }
6075 :
6076 :
6077 : /*
6078 : * set_rel_width
6079 : * Set the estimated output width of a base relation.
6080 : *
6081 : * The estimated output width is the sum of the per-attribute width estimates
6082 : * for the actually-referenced columns, plus any PHVs or other expressions
6083 : * that have to be calculated at this relation. This is the amount of data
6084 : * we'd need to pass upwards in case of a sort, hash, etc.
6085 : *
6086 : * This function also sets reltarget->cost, so it's a bit misnamed now.
6087 : *
6088 : * NB: this works best on plain relations because it prefers to look at
6089 : * real Vars. For subqueries, set_subquery_size_estimates will already have
6090 : * copied up whatever per-column estimates were made within the subquery,
6091 : * and for other types of rels there isn't much we can do anyway. We fall
6092 : * back on (fairly stupid) datatype-based width estimates if we can't get
6093 : * any better number.
6094 : *
6095 : * The per-attribute width estimates are cached for possible re-use while
6096 : * building join relations or post-scan/join pathtargets.
6097 : */
6098 : static void
6099 428780 : set_rel_width(PlannerInfo *root, RelOptInfo *rel)
6100 : {
6101 428780 : Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
6102 428780 : int64 tuple_width = 0;
6103 428780 : bool have_wholerow_var = false;
6104 : ListCell *lc;
6105 :
6106 : /* Vars are assumed to have cost zero, but other exprs do not */
6107 428780 : rel->reltarget->cost.startup = 0;
6108 428780 : rel->reltarget->cost.per_tuple = 0;
6109 :
6110 1494812 : foreach(lc, rel->reltarget->exprs)
6111 : {
6112 1066032 : Node *node = (Node *) lfirst(lc);
6113 :
6114 : /*
6115 : * Ordinarily, a Var in a rel's targetlist must belong to that rel;
6116 : * but there are corner cases involving LATERAL references where that
6117 : * isn't so. If the Var has the wrong varno, fall through to the
6118 : * generic case (it doesn't seem worth the trouble to be any smarter).
6119 : */
6120 1066032 : if (IsA(node, Var) &&
6121 1046242 : ((Var *) node)->varno == rel->relid)
6122 283234 : {
6123 1046176 : Var *var = (Var *) node;
6124 : int ndx;
6125 : int32 item_width;
6126 :
6127 : Assert(var->varattno >= rel->min_attr);
6128 : Assert(var->varattno <= rel->max_attr);
6129 :
6130 1046176 : ndx = var->varattno - rel->min_attr;
6131 :
6132 : /*
6133 : * If it's a whole-row Var, we'll deal with it below after we have
6134 : * already cached as many attr widths as possible.
6135 : */
6136 1046176 : if (var->varattno == 0)
6137 : {
6138 2670 : have_wholerow_var = true;
6139 2670 : continue;
6140 : }
6141 :
6142 : /*
6143 : * The width may have been cached already (especially if it's a
6144 : * subquery), so don't duplicate effort.
6145 : */
6146 1043506 : if (rel->attr_widths[ndx] > 0)
6147 : {
6148 218064 : tuple_width += rel->attr_widths[ndx];
6149 218064 : continue;
6150 : }
6151 :
6152 : /* Try to get column width from statistics */
6153 825442 : if (reloid != InvalidOid && var->varattno > 0)
6154 : {
6155 641158 : item_width = get_attavgwidth(reloid, var->varattno);
6156 641158 : if (item_width > 0)
6157 : {
6158 542208 : rel->attr_widths[ndx] = item_width;
6159 542208 : tuple_width += item_width;
6160 542208 : continue;
6161 : }
6162 : }
6163 :
6164 : /*
6165 : * Not a plain relation, or can't find statistics for it. Estimate
6166 : * using just the type info.
6167 : */
6168 283234 : item_width = get_typavgwidth(var->vartype, var->vartypmod);
6169 : Assert(item_width > 0);
6170 283234 : rel->attr_widths[ndx] = item_width;
6171 283234 : tuple_width += item_width;
6172 : }
6173 19856 : else if (IsA(node, PlaceHolderVar))
6174 : {
6175 : /*
6176 : * We will need to evaluate the PHV's contained expression while
6177 : * scanning this rel, so be sure to include it in reltarget->cost.
6178 : */
6179 1156 : PlaceHolderVar *phv = (PlaceHolderVar *) node;
6180 1156 : PlaceHolderInfo *phinfo = find_placeholder_info(root, phv);
6181 : QualCost cost;
6182 :
6183 1156 : tuple_width += phinfo->ph_width;
6184 1156 : cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
6185 1156 : rel->reltarget->cost.startup += cost.startup;
6186 1156 : rel->reltarget->cost.per_tuple += cost.per_tuple;
6187 : }
6188 : else
6189 : {
6190 : /*
6191 : * We could be looking at an expression pulled up from a subquery,
6192 : * or a ROW() representing a whole-row child Var, etc. Do what we
6193 : * can using the expression type information.
6194 : */
6195 : int32 item_width;
6196 : QualCost cost;
6197 :
6198 18700 : item_width = get_typavgwidth(exprType(node), exprTypmod(node));
6199 : Assert(item_width > 0);
6200 18700 : tuple_width += item_width;
6201 : /* Not entirely clear if we need to account for cost, but do so */
6202 18700 : cost_qual_eval_node(&cost, node, root);
6203 18700 : rel->reltarget->cost.startup += cost.startup;
6204 18700 : rel->reltarget->cost.per_tuple += cost.per_tuple;
6205 : }
6206 : }
6207 :
6208 : /*
6209 : * If we have a whole-row reference, estimate its width as the sum of
6210 : * per-column widths plus heap tuple header overhead.
6211 : */
6212 428780 : if (have_wholerow_var)
6213 : {
6214 2670 : int64 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
6215 :
6216 2670 : if (reloid != InvalidOid)
6217 : {
6218 : /* Real relation, so estimate true tuple width */
6219 2084 : wholerow_width += get_relation_data_width(reloid,
6220 2084 : rel->attr_widths - rel->min_attr);
6221 : }
6222 : else
6223 : {
6224 : /* Do what we can with info for a phony rel */
6225 : AttrNumber i;
6226 :
6227 1512 : for (i = 1; i <= rel->max_attr; i++)
6228 926 : wholerow_width += rel->attr_widths[i - rel->min_attr];
6229 : }
6230 :
6231 2670 : rel->attr_widths[0 - rel->min_attr] = clamp_width_est(wholerow_width);
6232 :
6233 : /*
6234 : * Include the whole-row Var as part of the output tuple. Yes, that
6235 : * really is what happens at runtime.
6236 : */
6237 2670 : tuple_width += wholerow_width;
6238 : }
6239 :
6240 428780 : rel->reltarget->width = clamp_width_est(tuple_width);
6241 428780 : }
6242 :
6243 : /*
6244 : * set_pathtarget_cost_width
6245 : * Set the estimated eval cost and output width of a PathTarget tlist.
6246 : *
6247 : * As a notational convenience, returns the same PathTarget pointer passed in.
6248 : *
6249 : * Most, though not quite all, uses of this function occur after we've run
6250 : * set_rel_width() for base relations; so we can usually obtain cached width
6251 : * estimates for Vars. If we can't, fall back on datatype-based width
6252 : * estimates. Present early-planning uses of PathTargets don't need accurate
6253 : * widths badly enough to justify going to the catalogs for better data.
6254 : */
6255 : PathTarget *
6256 554258 : set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
6257 : {
6258 554258 : int64 tuple_width = 0;
6259 : ListCell *lc;
6260 :
6261 : /* Vars are assumed to have cost zero, but other exprs do not */
6262 554258 : target->cost.startup = 0;
6263 554258 : target->cost.per_tuple = 0;
6264 :
6265 1840468 : foreach(lc, target->exprs)
6266 : {
6267 1286210 : Node *node = (Node *) lfirst(lc);
6268 :
6269 1286210 : tuple_width += get_expr_width(root, node);
6270 :
6271 : /* For non-Vars, account for evaluation cost */
6272 1286210 : if (!IsA(node, Var))
6273 : {
6274 : QualCost cost;
6275 :
6276 573672 : cost_qual_eval_node(&cost, node, root);
6277 573672 : target->cost.startup += cost.startup;
6278 573672 : target->cost.per_tuple += cost.per_tuple;
6279 : }
6280 : }
6281 :
6282 554258 : target->width = clamp_width_est(tuple_width);
6283 :
6284 554258 : return target;
6285 : }
6286 :
6287 : /*
6288 : * get_expr_width
6289 : * Estimate the width of the given expr attempting to use the width
6290 : * cached in a Var's owning RelOptInfo, else fallback on the type's
6291 : * average width when unable to or when the given Node is not a Var.
6292 : */
6293 : static int32
6294 1538444 : get_expr_width(PlannerInfo *root, const Node *expr)
6295 : {
6296 : int32 width;
6297 :
6298 1538444 : if (IsA(expr, Var))
6299 : {
6300 952750 : const Var *var = (const Var *) expr;
6301 :
6302 : /* We should not see any upper-level Vars here */
6303 : Assert(var->varlevelsup == 0);
6304 :
6305 : /* Try to get data from RelOptInfo cache */
6306 952750 : if (!IS_SPECIAL_VARNO(var->varno) &&
6307 947648 : var->varno < root->simple_rel_array_size)
6308 : {
6309 947648 : RelOptInfo *rel = root->simple_rel_array[var->varno];
6310 :
6311 947648 : if (rel != NULL &&
6312 922550 : var->varattno >= rel->min_attr &&
6313 922550 : var->varattno <= rel->max_attr)
6314 : {
6315 922550 : int ndx = var->varattno - rel->min_attr;
6316 :
6317 922550 : if (rel->attr_widths[ndx] > 0)
6318 896102 : return rel->attr_widths[ndx];
6319 : }
6320 : }
6321 :
6322 : /*
6323 : * No cached data available, so estimate using just the type info.
6324 : */
6325 56648 : width = get_typavgwidth(var->vartype, var->vartypmod);
6326 : Assert(width > 0);
6327 :
6328 56648 : return width;
6329 : }
6330 :
6331 585694 : width = get_typavgwidth(exprType(expr), exprTypmod(expr));
6332 : Assert(width > 0);
6333 585694 : return width;
6334 : }
6335 :
6336 : /*
6337 : * relation_byte_size
6338 : * Estimate the storage space in bytes for a given number of tuples
6339 : * of a given width (size in bytes).
6340 : */
6341 : static double
6342 3189430 : relation_byte_size(double tuples, int width)
6343 : {
6344 3189430 : return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
6345 : }
6346 :
6347 : /*
6348 : * page_size
6349 : * Returns an estimate of the number of pages covered by a given
6350 : * number of tuples of a given width (size in bytes).
6351 : */
6352 : static double
6353 8860 : page_size(double tuples, int width)
6354 : {
6355 8860 : return ceil(relation_byte_size(tuples, width) / BLCKSZ);
6356 : }
6357 :
6358 : /*
6359 : * Estimate the fraction of the work that each worker will do given the
6360 : * number of workers budgeted for the path.
6361 : */
6362 : static double
6363 139752 : get_parallel_divisor(Path *path)
6364 : {
6365 139752 : double parallel_divisor = path->parallel_workers;
6366 :
6367 : /*
6368 : * Early experience with parallel query suggests that when there is only
6369 : * one worker, the leader often makes a very substantial contribution to
6370 : * executing the parallel portion of the plan, but as more workers are
6371 : * added, it does less and less, because it's busy reading tuples from the
6372 : * workers and doing whatever non-parallel post-processing is needed. By
6373 : * the time we reach 4 workers, the leader no longer makes a meaningful
6374 : * contribution. Thus, for now, estimate that the leader spends 30% of
6375 : * its time servicing each worker, and the remainder executing the
6376 : * parallel plan.
6377 : */
6378 139752 : if (parallel_leader_participation)
6379 : {
6380 : double leader_contribution;
6381 :
6382 138984 : leader_contribution = 1.0 - (0.3 * path->parallel_workers);
6383 138984 : if (leader_contribution > 0)
6384 138006 : parallel_divisor += leader_contribution;
6385 : }
6386 :
6387 139752 : return parallel_divisor;
6388 : }
6389 :
6390 : /*
6391 : * compute_bitmap_pages
6392 : * Estimate number of pages fetched from heap in a bitmap heap scan.
6393 : *
6394 : * 'baserel' is the relation to be scanned
6395 : * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
6396 : * 'loop_count' is the number of repetitions of the indexscan to factor into
6397 : * estimates of caching behavior
6398 : *
6399 : * If cost_p isn't NULL, the indexTotalCost estimate is returned in *cost_p.
6400 : * If tuples_p isn't NULL, the tuples_fetched estimate is returned in *tuples_p.
6401 : */
6402 : double
6403 559756 : compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel,
6404 : Path *bitmapqual, double loop_count,
6405 : Cost *cost_p, double *tuples_p)
6406 : {
6407 : Cost indexTotalCost;
6408 : Selectivity indexSelectivity;
6409 : double T;
6410 : double pages_fetched;
6411 : double tuples_fetched;
6412 : double heap_pages;
6413 : long maxentries;
6414 :
6415 : /*
6416 : * Fetch total cost of obtaining the bitmap, as well as its total
6417 : * selectivity.
6418 : */
6419 559756 : cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
6420 :
6421 : /*
6422 : * Estimate number of main-table pages fetched.
6423 : */
6424 559756 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
6425 :
6426 559756 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
6427 :
6428 : /*
6429 : * For a single scan, the number of heap pages that need to be fetched is
6430 : * the same as the Mackert and Lohman formula for the case T <= b (ie, no
6431 : * re-reads needed).
6432 : */
6433 559756 : pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
6434 :
6435 : /*
6436 : * Calculate the number of pages fetched from the heap. Then based on
6437 : * current work_mem estimate get the estimated maxentries in the bitmap.
6438 : * (Note that we always do this calculation based on the number of pages
6439 : * that would be fetched in a single iteration, even if loop_count > 1.
6440 : * That's correct, because only that number of entries will be stored in
6441 : * the bitmap at one time.)
6442 : */
6443 559756 : heap_pages = Min(pages_fetched, baserel->pages);
6444 559756 : maxentries = tbm_calculate_entries(work_mem * 1024L);
6445 :
6446 559756 : if (loop_count > 1)
6447 : {
6448 : /*
6449 : * For repeated bitmap scans, scale up the number of tuples fetched in
6450 : * the Mackert and Lohman formula by the number of scans, so that we
6451 : * estimate the number of pages fetched by all the scans. Then
6452 : * pro-rate for one scan.
6453 : */
6454 106680 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
6455 : baserel->pages,
6456 : get_indexpath_pages(bitmapqual),
6457 : root);
6458 106680 : pages_fetched /= loop_count;
6459 : }
6460 :
6461 559756 : if (pages_fetched >= T)
6462 50270 : pages_fetched = T;
6463 : else
6464 509486 : pages_fetched = ceil(pages_fetched);
6465 :
6466 559756 : if (maxentries < heap_pages)
6467 : {
6468 : double exact_pages;
6469 : double lossy_pages;
6470 :
6471 : /*
6472 : * Crude approximation of the number of lossy pages. Because of the
6473 : * way tbm_lossify() is coded, the number of lossy pages increases
6474 : * very sharply as soon as we run short of memory; this formula has
6475 : * that property and seems to perform adequately in testing, but it's
6476 : * possible we could do better somehow.
6477 : */
6478 18 : lossy_pages = Max(0, heap_pages - maxentries / 2);
6479 18 : exact_pages = heap_pages - lossy_pages;
6480 :
6481 : /*
6482 : * If there are lossy pages then recompute the number of tuples
6483 : * processed by the bitmap heap node. We assume here that the chance
6484 : * of a given tuple coming from an exact page is the same as the
6485 : * chance that a given page is exact. This might not be true, but
6486 : * it's not clear how we can do any better.
6487 : */
6488 18 : if (lossy_pages > 0)
6489 : tuples_fetched =
6490 18 : clamp_row_est(indexSelectivity *
6491 18 : (exact_pages / heap_pages) * baserel->tuples +
6492 18 : (lossy_pages / heap_pages) * baserel->tuples);
6493 : }
6494 :
6495 559756 : if (cost_p)
6496 431960 : *cost_p = indexTotalCost;
6497 559756 : if (tuples_p)
6498 431960 : *tuples_p = tuples_fetched;
6499 :
6500 559756 : return pages_fetched;
6501 : }
|