Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * costsize.c
4 : * Routines to compute (and set) relation sizes and path costs
5 : *
6 : * Path costs are measured in arbitrary units established by these basic
7 : * parameters:
8 : *
9 : * seq_page_cost Cost of a sequential page fetch
10 : * random_page_cost Cost of a non-sequential page fetch
11 : * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 : * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 : * cpu_operator_cost Cost of CPU time to execute an operator or function
14 : * parallel_tuple_cost Cost of CPU time to pass a tuple from worker to leader backend
15 : * parallel_setup_cost Cost of setting up shared memory for parallelism
16 : *
17 : * We expect that the kernel will typically do some amount of read-ahead
18 : * optimization; this in conjunction with seek costs means that seq_page_cost
19 : * is normally considerably less than random_page_cost. (However, if the
20 : * database is fully cached in RAM, it is reasonable to set them equal.)
21 : *
22 : * We also use a rough estimate "effective_cache_size" of the number of
23 : * disk pages in Postgres + OS-level disk cache. (We can't simply use
24 : * NBuffers for this purpose because that would ignore the effects of
25 : * the kernel's disk cache.)
26 : *
27 : * Obviously, taking constants for these values is an oversimplification,
28 : * but it's tough enough to get any useful estimates even at this level of
29 : * detail. Note that all of these parameters are user-settable, in case
30 : * the default values are drastically off for a particular platform.
31 : *
32 : * seq_page_cost and random_page_cost can also be overridden for an individual
33 : * tablespace, in case some data is on a fast disk and other data is on a slow
34 : * disk. Per-tablespace overrides never apply to temporary work files such as
35 : * an external sort or a materialize node that overflows work_mem.
36 : *
37 : * We compute two separate costs for each path:
38 : * total_cost: total estimated cost to fetch all tuples
39 : * startup_cost: cost that is expended before first tuple is fetched
40 : * In some scenarios, such as when there is a LIMIT or we are implementing
41 : * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
42 : * path's result. A caller can estimate the cost of fetching a partial
43 : * result by interpolating between startup_cost and total_cost. In detail:
44 : * actual_cost = startup_cost +
45 : * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
46 : * Note that a base relation's rows count (and, by extension, plan_rows for
47 : * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
48 : * that this equation works properly. (Note: while path->rows is never zero
49 : * for ordinary relations, it is zero for paths for provably-empty relations,
50 : * so beware of division-by-zero.) The LIMIT is applied as a top-level
51 : * plan node.
52 : *
53 : * Each path stores the total number of disabled nodes that exist at or
54 : * below that point in the plan tree. This is regarded as a component of
55 : * the cost, and paths with fewer disabled nodes should be regarded as
56 : * cheaper than those with more. Disabled nodes occur when the user sets
57 : * a GUC like enable_seqscan=false. We can't necessarily respect such a
58 : * setting in every part of the plan tree, but we want to respect in as many
59 : * parts of the plan tree as possible. Simpler schemes like storing a Boolean
60 : * here rather than a count fail to do that. We used to disable nodes by
61 : * adding a large constant to the startup cost, but that distorted planning
62 : * in other ways.
63 : *
64 : * For largely historical reasons, most of the routines in this module use
65 : * the passed result Path only to store their results (rows, startup_cost and
66 : * total_cost) into. All the input data they need is passed as separate
67 : * parameters, even though much of it could be extracted from the Path.
68 : * An exception is made for the cost_XXXjoin() routines, which expect all
69 : * the other fields of the passed XXXPath to be filled in, and similarly
70 : * cost_index() assumes the passed IndexPath is valid except for its output
71 : * values.
72 : *
73 : *
74 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
75 : * Portions Copyright (c) 1994, Regents of the University of California
76 : *
77 : * IDENTIFICATION
78 : * src/backend/optimizer/path/costsize.c
79 : *
80 : *-------------------------------------------------------------------------
81 : */
82 :
83 : #include "postgres.h"
84 :
85 : #include <limits.h>
86 : #include <math.h>
87 :
88 : #include "access/amapi.h"
89 : #include "access/htup_details.h"
90 : #include "access/tsmapi.h"
91 : #include "executor/executor.h"
92 : #include "executor/nodeAgg.h"
93 : #include "executor/nodeHash.h"
94 : #include "executor/nodeMemoize.h"
95 : #include "miscadmin.h"
96 : #include "nodes/makefuncs.h"
97 : #include "nodes/nodeFuncs.h"
98 : #include "optimizer/clauses.h"
99 : #include "optimizer/cost.h"
100 : #include "optimizer/optimizer.h"
101 : #include "optimizer/pathnode.h"
102 : #include "optimizer/paths.h"
103 : #include "optimizer/placeholder.h"
104 : #include "optimizer/plancat.h"
105 : #include "optimizer/restrictinfo.h"
106 : #include "parser/parsetree.h"
107 : #include "utils/lsyscache.h"
108 : #include "utils/selfuncs.h"
109 : #include "utils/spccache.h"
110 : #include "utils/tuplesort.h"
111 :
112 :
113 : #define LOG2(x) (log(x) / 0.693147180559945)
114 :
115 : /*
116 : * Append and MergeAppend nodes are less expensive than some other operations
117 : * which use cpu_tuple_cost; instead of adding a separate GUC, estimate the
118 : * per-tuple cost as cpu_tuple_cost multiplied by this value.
119 : */
120 : #define APPEND_CPU_COST_MULTIPLIER 0.5
121 :
122 : /*
123 : * Maximum value for row estimates. We cap row estimates to this to help
124 : * ensure that costs based on these estimates remain within the range of what
125 : * double can represent. add_path() wouldn't act sanely given infinite or NaN
126 : * cost values.
127 : */
128 : #define MAXIMUM_ROWCOUNT 1e100
129 :
130 : double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
131 : double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
132 : double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
133 : double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
134 : double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
135 : double parallel_tuple_cost = DEFAULT_PARALLEL_TUPLE_COST;
136 : double parallel_setup_cost = DEFAULT_PARALLEL_SETUP_COST;
137 : double recursive_worktable_factor = DEFAULT_RECURSIVE_WORKTABLE_FACTOR;
138 :
139 : int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
140 :
141 : Cost disable_cost = 1.0e10;
142 :
143 : int max_parallel_workers_per_gather = 2;
144 :
145 : bool enable_seqscan = true;
146 : bool enable_indexscan = true;
147 : bool enable_indexonlyscan = true;
148 : bool enable_bitmapscan = true;
149 : bool enable_tidscan = true;
150 : bool enable_sort = true;
151 : bool enable_incremental_sort = true;
152 : bool enable_hashagg = true;
153 : bool enable_nestloop = true;
154 : bool enable_material = true;
155 : bool enable_memoize = true;
156 : bool enable_mergejoin = true;
157 : bool enable_hashjoin = true;
158 : bool enable_gathermerge = true;
159 : bool enable_partitionwise_join = false;
160 : bool enable_partitionwise_aggregate = false;
161 : bool enable_parallel_append = true;
162 : bool enable_parallel_hash = true;
163 : bool enable_partition_pruning = true;
164 : bool enable_presorted_aggregate = true;
165 : bool enable_async_append = true;
166 :
167 : typedef struct
168 : {
169 : PlannerInfo *root;
170 : QualCost total;
171 : } cost_qual_eval_context;
172 :
173 : static List *extract_nonindex_conditions(List *qual_clauses, List *indexclauses);
174 : static MergeScanSelCache *cached_scansel(PlannerInfo *root,
175 : RestrictInfo *rinfo,
176 : PathKey *pathkey);
177 : static void cost_rescan(PlannerInfo *root, Path *path,
178 : Cost *rescan_startup_cost, Cost *rescan_total_cost);
179 : static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
180 : static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
181 : ParamPathInfo *param_info,
182 : QualCost *qpqual_cost);
183 : static bool has_indexed_join_quals(NestPath *path);
184 : static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
185 : List *quals);
186 : static double calc_joinrel_size_estimate(PlannerInfo *root,
187 : RelOptInfo *joinrel,
188 : RelOptInfo *outer_rel,
189 : RelOptInfo *inner_rel,
190 : double outer_rows,
191 : double inner_rows,
192 : SpecialJoinInfo *sjinfo,
193 : List *restrictlist);
194 : static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root,
195 : Relids outer_relids,
196 : Relids inner_relids,
197 : SpecialJoinInfo *sjinfo,
198 : List **restrictlist);
199 : static Cost append_nonpartial_cost(List *subpaths, int numpaths,
200 : int parallel_workers);
201 : static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
202 : static int32 get_expr_width(PlannerInfo *root, const Node *expr);
203 : static double relation_byte_size(double tuples, int width);
204 : static double page_size(double tuples, int width);
205 : static double get_parallel_divisor(Path *path);
206 :
207 :
208 : /*
209 : * clamp_row_est
210 : * Force a row-count estimate to a sane value.
211 : */
212 : double
213 10575862 : clamp_row_est(double nrows)
214 : {
215 : /*
216 : * Avoid infinite and NaN row estimates. Costs derived from such values
217 : * are going to be useless. Also force the estimate to be at least one
218 : * row, to make explain output look better and to avoid possible
219 : * divide-by-zero when interpolating costs. Make it an integer, too.
220 : */
221 10575862 : if (nrows > MAXIMUM_ROWCOUNT || isnan(nrows))
222 0 : nrows = MAXIMUM_ROWCOUNT;
223 10575862 : else if (nrows <= 1.0)
224 3373522 : nrows = 1.0;
225 : else
226 7202340 : nrows = rint(nrows);
227 :
228 10575862 : return nrows;
229 : }
230 :
231 : /*
232 : * clamp_width_est
233 : * Force a tuple-width estimate to a sane value.
234 : *
235 : * The planner represents datatype width and tuple width estimates as int32.
236 : * When summing column width estimates to create a tuple width estimate,
237 : * it's possible to reach integer overflow in edge cases. To ensure sane
238 : * behavior, we form such sums in int64 arithmetic and then apply this routine
239 : * to clamp to int32 range.
240 : */
241 : int32
242 2001956 : clamp_width_est(int64 tuple_width)
243 : {
244 : /*
245 : * Anything more than MaxAllocSize is clearly bogus, since we could not
246 : * create a tuple that large.
247 : */
248 2001956 : if (tuple_width > MaxAllocSize)
249 0 : return (int32) MaxAllocSize;
250 :
251 : /*
252 : * Unlike clamp_row_est, we just Assert that the value isn't negative,
253 : * rather than masking such errors.
254 : */
255 : Assert(tuple_width >= 0);
256 :
257 2001956 : return (int32) tuple_width;
258 : }
259 :
260 :
261 : /*
262 : * cost_seqscan
263 : * Determines and returns the cost of scanning a relation sequentially.
264 : *
265 : * 'baserel' is the relation to be scanned
266 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
267 : */
268 : void
269 445154 : cost_seqscan(Path *path, PlannerInfo *root,
270 : RelOptInfo *baserel, ParamPathInfo *param_info)
271 : {
272 445154 : Cost startup_cost = 0;
273 : Cost cpu_run_cost;
274 : Cost disk_run_cost;
275 : double spc_seq_page_cost;
276 : QualCost qpqual_cost;
277 : Cost cpu_per_tuple;
278 :
279 : /* Should only be applied to base relations */
280 : Assert(baserel->relid > 0);
281 : Assert(baserel->rtekind == RTE_RELATION);
282 :
283 : /* Mark the path with the correct row estimate */
284 445154 : if (param_info)
285 840 : path->rows = param_info->ppi_rows;
286 : else
287 444314 : path->rows = baserel->rows;
288 :
289 : /* fetch estimated page cost for tablespace containing table */
290 445154 : get_tablespace_page_costs(baserel->reltablespace,
291 : NULL,
292 : &spc_seq_page_cost);
293 :
294 : /*
295 : * disk costs
296 : */
297 445154 : disk_run_cost = spc_seq_page_cost * baserel->pages;
298 :
299 : /* CPU costs */
300 445154 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
301 :
302 445154 : startup_cost += qpqual_cost.startup;
303 445154 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
304 445154 : cpu_run_cost = cpu_per_tuple * baserel->tuples;
305 : /* tlist eval costs are paid per output row, not per tuple scanned */
306 445154 : startup_cost += path->pathtarget->cost.startup;
307 445154 : cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
308 :
309 : /* Adjust costing for parallelism, if used. */
310 445154 : if (path->parallel_workers > 0)
311 : {
312 28408 : double parallel_divisor = get_parallel_divisor(path);
313 :
314 : /* The CPU cost is divided among all the workers. */
315 28408 : cpu_run_cost /= parallel_divisor;
316 :
317 : /*
318 : * It may be possible to amortize some of the I/O cost, but probably
319 : * not very much, because most operating systems already do aggressive
320 : * prefetching. For now, we assume that the disk run cost can't be
321 : * amortized at all.
322 : */
323 :
324 : /*
325 : * In the case of a parallel plan, the row count needs to represent
326 : * the number of tuples processed per worker.
327 : */
328 28408 : path->rows = clamp_row_est(path->rows / parallel_divisor);
329 : }
330 :
331 445154 : path->disabled_nodes = enable_seqscan ? 0 : 1;
332 445154 : path->startup_cost = startup_cost;
333 445154 : path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
334 445154 : }
335 :
336 : /*
337 : * cost_samplescan
338 : * Determines and returns the cost of scanning a relation using sampling.
339 : *
340 : * 'baserel' is the relation to be scanned
341 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
342 : */
343 : void
344 306 : cost_samplescan(Path *path, PlannerInfo *root,
345 : RelOptInfo *baserel, ParamPathInfo *param_info)
346 : {
347 306 : Cost startup_cost = 0;
348 306 : Cost run_cost = 0;
349 : RangeTblEntry *rte;
350 : TableSampleClause *tsc;
351 : TsmRoutine *tsm;
352 : double spc_seq_page_cost,
353 : spc_random_page_cost,
354 : spc_page_cost;
355 : QualCost qpqual_cost;
356 : Cost cpu_per_tuple;
357 :
358 : /* Should only be applied to base relations with tablesample clauses */
359 : Assert(baserel->relid > 0);
360 306 : rte = planner_rt_fetch(baserel->relid, root);
361 : Assert(rte->rtekind == RTE_RELATION);
362 306 : tsc = rte->tablesample;
363 : Assert(tsc != NULL);
364 306 : tsm = GetTsmRoutine(tsc->tsmhandler);
365 :
366 : /* Mark the path with the correct row estimate */
367 306 : if (param_info)
368 72 : path->rows = param_info->ppi_rows;
369 : else
370 234 : path->rows = baserel->rows;
371 :
372 : /* fetch estimated page cost for tablespace containing table */
373 306 : get_tablespace_page_costs(baserel->reltablespace,
374 : &spc_random_page_cost,
375 : &spc_seq_page_cost);
376 :
377 : /* if NextSampleBlock is used, assume random access, else sequential */
378 612 : spc_page_cost = (tsm->NextSampleBlock != NULL) ?
379 306 : spc_random_page_cost : spc_seq_page_cost;
380 :
381 : /*
382 : * disk costs (recall that baserel->pages has already been set to the
383 : * number of pages the sampling method will visit)
384 : */
385 306 : run_cost += spc_page_cost * baserel->pages;
386 :
387 : /*
388 : * CPU costs (recall that baserel->tuples has already been set to the
389 : * number of tuples the sampling method will select). Note that we ignore
390 : * execution cost of the TABLESAMPLE parameter expressions; they will be
391 : * evaluated only once per scan, and in most usages they'll likely be
392 : * simple constants anyway. We also don't charge anything for the
393 : * calculations the sampling method might do internally.
394 : */
395 306 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
396 :
397 306 : startup_cost += qpqual_cost.startup;
398 306 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
399 306 : run_cost += cpu_per_tuple * baserel->tuples;
400 : /* tlist eval costs are paid per output row, not per tuple scanned */
401 306 : startup_cost += path->pathtarget->cost.startup;
402 306 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
403 :
404 306 : path->disabled_nodes = 0;
405 306 : path->startup_cost = startup_cost;
406 306 : path->total_cost = startup_cost + run_cost;
407 306 : }
408 :
409 : /*
410 : * cost_gather
411 : * Determines and returns the cost of gather path.
412 : *
413 : * 'rel' is the relation to be operated upon
414 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
415 : * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
416 : * both 'rel' and 'param_info'. This is useful when the path doesn't exactly
417 : * correspond to any particular RelOptInfo.
418 : */
419 : void
420 25398 : cost_gather(GatherPath *path, PlannerInfo *root,
421 : RelOptInfo *rel, ParamPathInfo *param_info,
422 : double *rows)
423 : {
424 25398 : Cost startup_cost = 0;
425 25398 : Cost run_cost = 0;
426 :
427 : /* Mark the path with the correct row estimate */
428 25398 : if (rows)
429 6264 : path->path.rows = *rows;
430 19134 : else if (param_info)
431 0 : path->path.rows = param_info->ppi_rows;
432 : else
433 19134 : path->path.rows = rel->rows;
434 :
435 25398 : startup_cost = path->subpath->startup_cost;
436 :
437 25398 : run_cost = path->subpath->total_cost - path->subpath->startup_cost;
438 :
439 : /* Parallel setup and communication cost. */
440 25398 : startup_cost += parallel_setup_cost;
441 25398 : run_cost += parallel_tuple_cost * path->path.rows;
442 :
443 25398 : path->path.disabled_nodes = path->subpath->disabled_nodes;
444 25398 : path->path.startup_cost = startup_cost;
445 25398 : path->path.total_cost = (startup_cost + run_cost);
446 25398 : }
447 :
448 : /*
449 : * cost_gather_merge
450 : * Determines and returns the cost of gather merge path.
451 : *
452 : * GatherMerge merges several pre-sorted input streams, using a heap that at
453 : * any given instant holds the next tuple from each stream. If there are N
454 : * streams, we need about N*log2(N) tuple comparisons to construct the heap at
455 : * startup, and then for each output tuple, about log2(N) comparisons to
456 : * replace the top heap entry with the next tuple from the same stream.
457 : */
458 : void
459 18894 : cost_gather_merge(GatherMergePath *path, PlannerInfo *root,
460 : RelOptInfo *rel, ParamPathInfo *param_info,
461 : int input_disabled_nodes,
462 : Cost input_startup_cost, Cost input_total_cost,
463 : double *rows)
464 : {
465 18894 : Cost startup_cost = 0;
466 18894 : Cost run_cost = 0;
467 : Cost comparison_cost;
468 : double N;
469 : double logN;
470 :
471 : /* Mark the path with the correct row estimate */
472 18894 : if (rows)
473 11278 : path->path.rows = *rows;
474 7616 : else if (param_info)
475 0 : path->path.rows = param_info->ppi_rows;
476 : else
477 7616 : path->path.rows = rel->rows;
478 :
479 : /*
480 : * Add one to the number of workers to account for the leader. This might
481 : * be overgenerous since the leader will do less work than other workers
482 : * in typical cases, but we'll go with it for now.
483 : */
484 : Assert(path->num_workers > 0);
485 18894 : N = (double) path->num_workers + 1;
486 18894 : logN = LOG2(N);
487 :
488 : /* Assumed cost per tuple comparison */
489 18894 : comparison_cost = 2.0 * cpu_operator_cost;
490 :
491 : /* Heap creation cost */
492 18894 : startup_cost += comparison_cost * N * logN;
493 :
494 : /* Per-tuple heap maintenance cost */
495 18894 : run_cost += path->path.rows * comparison_cost * logN;
496 :
497 : /* small cost for heap management, like cost_merge_append */
498 18894 : run_cost += cpu_operator_cost * path->path.rows;
499 :
500 : /*
501 : * Parallel setup and communication cost. Since Gather Merge, unlike
502 : * Gather, requires us to block until a tuple is available from every
503 : * worker, we bump the IPC cost up a little bit as compared with Gather.
504 : * For lack of a better idea, charge an extra 5%.
505 : */
506 18894 : startup_cost += parallel_setup_cost;
507 18894 : run_cost += parallel_tuple_cost * path->path.rows * 1.05;
508 :
509 18894 : path->path.disabled_nodes = input_disabled_nodes
510 18894 : + (enable_gathermerge ? 0 : 1);
511 18894 : path->path.startup_cost = startup_cost + input_startup_cost;
512 18894 : path->path.total_cost = (startup_cost + run_cost + input_total_cost);
513 18894 : }
514 :
515 : /*
516 : * cost_index
517 : * Determines and returns the cost of scanning a relation using an index.
518 : *
519 : * 'path' describes the indexscan under consideration, and is complete
520 : * except for the fields to be set by this routine
521 : * 'loop_count' is the number of repetitions of the indexscan to factor into
522 : * estimates of caching behavior
523 : *
524 : * In addition to rows, startup_cost and total_cost, cost_index() sets the
525 : * path's indextotalcost and indexselectivity fields. These values will be
526 : * needed if the IndexPath is used in a BitmapIndexScan.
527 : *
528 : * NOTE: path->indexquals must contain only clauses usable as index
529 : * restrictions. Any additional quals evaluated as qpquals may reduce the
530 : * number of returned tuples, but they won't reduce the number of tuples
531 : * we have to fetch from the table, so they don't reduce the scan cost.
532 : */
533 : void
534 820160 : cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
535 : bool partial_path)
536 : {
537 820160 : IndexOptInfo *index = path->indexinfo;
538 820160 : RelOptInfo *baserel = index->rel;
539 820160 : bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
540 : amcostestimate_function amcostestimate;
541 : List *qpquals;
542 820160 : Cost startup_cost = 0;
543 820160 : Cost run_cost = 0;
544 820160 : Cost cpu_run_cost = 0;
545 : Cost indexStartupCost;
546 : Cost indexTotalCost;
547 : Selectivity indexSelectivity;
548 : double indexCorrelation,
549 : csquared;
550 : double spc_seq_page_cost,
551 : spc_random_page_cost;
552 : Cost min_IO_cost,
553 : max_IO_cost;
554 : QualCost qpqual_cost;
555 : Cost cpu_per_tuple;
556 : double tuples_fetched;
557 : double pages_fetched;
558 : double rand_heap_pages;
559 : double index_pages;
560 :
561 : /* Should only be applied to base relations */
562 : Assert(IsA(baserel, RelOptInfo) &&
563 : IsA(index, IndexOptInfo));
564 : Assert(baserel->relid > 0);
565 : Assert(baserel->rtekind == RTE_RELATION);
566 :
567 : /*
568 : * Mark the path with the correct row estimate, and identify which quals
569 : * will need to be enforced as qpquals. We need not check any quals that
570 : * are implied by the index's predicate, so we can use indrestrictinfo not
571 : * baserestrictinfo as the list of relevant restriction clauses for the
572 : * rel.
573 : */
574 820160 : if (path->path.param_info)
575 : {
576 156740 : path->path.rows = path->path.param_info->ppi_rows;
577 : /* qpquals come from the rel's restriction clauses and ppi_clauses */
578 156740 : qpquals = list_concat(extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
579 : path->indexclauses),
580 156740 : extract_nonindex_conditions(path->path.param_info->ppi_clauses,
581 : path->indexclauses));
582 : }
583 : else
584 : {
585 663420 : path->path.rows = baserel->rows;
586 : /* qpquals come from just the rel's restriction clauses */
587 663420 : qpquals = extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
588 : path->indexclauses);
589 : }
590 :
591 : /* we don't need to check enable_indexonlyscan; indxpath.c does that */
592 820160 : path->path.disabled_nodes = enable_indexscan ? 0 : 1;
593 :
594 : /*
595 : * Call index-access-method-specific code to estimate the processing cost
596 : * for scanning the index, as well as the selectivity of the index (ie,
597 : * the fraction of main-table tuples we will have to retrieve) and its
598 : * correlation to the main-table tuple order. We need a cast here because
599 : * pathnodes.h uses a weak function type to avoid including amapi.h.
600 : */
601 820160 : amcostestimate = (amcostestimate_function) index->amcostestimate;
602 820160 : amcostestimate(root, path, loop_count,
603 : &indexStartupCost, &indexTotalCost,
604 : &indexSelectivity, &indexCorrelation,
605 : &index_pages);
606 :
607 : /*
608 : * Save amcostestimate's results for possible use in bitmap scan planning.
609 : * We don't bother to save indexStartupCost or indexCorrelation, because a
610 : * bitmap scan doesn't care about either.
611 : */
612 820160 : path->indextotalcost = indexTotalCost;
613 820160 : path->indexselectivity = indexSelectivity;
614 :
615 : /* all costs for touching index itself included here */
616 820160 : startup_cost += indexStartupCost;
617 820160 : run_cost += indexTotalCost - indexStartupCost;
618 :
619 : /* estimate number of main-table tuples fetched */
620 820160 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
621 :
622 : /* fetch estimated page costs for tablespace containing table */
623 820160 : get_tablespace_page_costs(baserel->reltablespace,
624 : &spc_random_page_cost,
625 : &spc_seq_page_cost);
626 :
627 : /*----------
628 : * Estimate number of main-table pages fetched, and compute I/O cost.
629 : *
630 : * When the index ordering is uncorrelated with the table ordering,
631 : * we use an approximation proposed by Mackert and Lohman (see
632 : * index_pages_fetched() for details) to compute the number of pages
633 : * fetched, and then charge spc_random_page_cost per page fetched.
634 : *
635 : * When the index ordering is exactly correlated with the table ordering
636 : * (just after a CLUSTER, for example), the number of pages fetched should
637 : * be exactly selectivity * table_size. What's more, all but the first
638 : * will be sequential fetches, not the random fetches that occur in the
639 : * uncorrelated case. So if the number of pages is more than 1, we
640 : * ought to charge
641 : * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
642 : * For partially-correlated indexes, we ought to charge somewhere between
643 : * these two estimates. We currently interpolate linearly between the
644 : * estimates based on the correlation squared (XXX is that appropriate?).
645 : *
646 : * If it's an index-only scan, then we will not need to fetch any heap
647 : * pages for which the visibility map shows all tuples are visible.
648 : * Hence, reduce the estimated number of heap fetches accordingly.
649 : * We use the measured fraction of the entire heap that is all-visible,
650 : * which might not be particularly relevant to the subset of the heap
651 : * that this query will fetch; but it's not clear how to do better.
652 : *----------
653 : */
654 820160 : if (loop_count > 1)
655 : {
656 : /*
657 : * For repeated indexscans, the appropriate estimate for the
658 : * uncorrelated case is to scale up the number of tuples fetched in
659 : * the Mackert and Lohman formula by the number of scans, so that we
660 : * estimate the number of pages fetched by all the scans; then
661 : * pro-rate the costs for one scan. In this case we assume all the
662 : * fetches are random accesses.
663 : */
664 90268 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
665 : baserel->pages,
666 90268 : (double) index->pages,
667 : root);
668 :
669 90268 : if (indexonly)
670 11332 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
671 :
672 90268 : rand_heap_pages = pages_fetched;
673 :
674 90268 : max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
675 :
676 : /*
677 : * In the perfectly correlated case, the number of pages touched by
678 : * each scan is selectivity * table_size, and we can use the Mackert
679 : * and Lohman formula at the page level to estimate how much work is
680 : * saved by caching across scans. We still assume all the fetches are
681 : * random, though, which is an overestimate that's hard to correct for
682 : * without double-counting the cache effects. (But in most cases
683 : * where such a plan is actually interesting, only one page would get
684 : * fetched per scan anyway, so it shouldn't matter much.)
685 : */
686 90268 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
687 :
688 90268 : pages_fetched = index_pages_fetched(pages_fetched * loop_count,
689 : baserel->pages,
690 90268 : (double) index->pages,
691 : root);
692 :
693 90268 : if (indexonly)
694 11332 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
695 :
696 90268 : min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
697 : }
698 : else
699 : {
700 : /*
701 : * Normal case: apply the Mackert and Lohman formula, and then
702 : * interpolate between that and the correlation-derived result.
703 : */
704 729892 : pages_fetched = index_pages_fetched(tuples_fetched,
705 : baserel->pages,
706 729892 : (double) index->pages,
707 : root);
708 :
709 729892 : if (indexonly)
710 71564 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
711 :
712 729892 : rand_heap_pages = pages_fetched;
713 :
714 : /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
715 729892 : max_IO_cost = pages_fetched * spc_random_page_cost;
716 :
717 : /* min_IO_cost is for the perfectly correlated case (csquared=1) */
718 729892 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
719 :
720 729892 : if (indexonly)
721 71564 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
722 :
723 729892 : if (pages_fetched > 0)
724 : {
725 650942 : min_IO_cost = spc_random_page_cost;
726 650942 : if (pages_fetched > 1)
727 199730 : min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
728 : }
729 : else
730 78950 : min_IO_cost = 0;
731 : }
732 :
733 820160 : if (partial_path)
734 : {
735 : /*
736 : * For index only scans compute workers based on number of index pages
737 : * fetched; the number of heap pages we fetch might be so small as to
738 : * effectively rule out parallelism, which we don't want to do.
739 : */
740 281244 : if (indexonly)
741 26400 : rand_heap_pages = -1;
742 :
743 : /*
744 : * Estimate the number of parallel workers required to scan index. Use
745 : * the number of heap pages computed considering heap fetches won't be
746 : * sequential as for parallel scans the pages are accessed in random
747 : * order.
748 : */
749 281244 : path->path.parallel_workers = compute_parallel_worker(baserel,
750 : rand_heap_pages,
751 : index_pages,
752 : max_parallel_workers_per_gather);
753 :
754 : /*
755 : * Fall out if workers can't be assigned for parallel scan, because in
756 : * such a case this path will be rejected. So there is no benefit in
757 : * doing extra computation.
758 : */
759 281244 : if (path->path.parallel_workers <= 0)
760 271022 : return;
761 :
762 10222 : path->path.parallel_aware = true;
763 : }
764 :
765 : /*
766 : * Now interpolate based on estimated index order correlation to get total
767 : * disk I/O cost for main table accesses.
768 : */
769 549138 : csquared = indexCorrelation * indexCorrelation;
770 :
771 549138 : run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
772 :
773 : /*
774 : * Estimate CPU costs per tuple.
775 : *
776 : * What we want here is cpu_tuple_cost plus the evaluation costs of any
777 : * qual clauses that we have to evaluate as qpquals.
778 : */
779 549138 : cost_qual_eval(&qpqual_cost, qpquals, root);
780 :
781 549138 : startup_cost += qpqual_cost.startup;
782 549138 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
783 :
784 549138 : cpu_run_cost += cpu_per_tuple * tuples_fetched;
785 :
786 : /* tlist eval costs are paid per output row, not per tuple scanned */
787 549138 : startup_cost += path->path.pathtarget->cost.startup;
788 549138 : cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
789 :
790 : /* Adjust costing for parallelism, if used. */
791 549138 : if (path->path.parallel_workers > 0)
792 : {
793 10222 : double parallel_divisor = get_parallel_divisor(&path->path);
794 :
795 10222 : path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
796 :
797 : /* The CPU cost is divided among all the workers. */
798 10222 : cpu_run_cost /= parallel_divisor;
799 : }
800 :
801 549138 : run_cost += cpu_run_cost;
802 :
803 549138 : path->path.startup_cost = startup_cost;
804 549138 : path->path.total_cost = startup_cost + run_cost;
805 : }
806 :
807 : /*
808 : * extract_nonindex_conditions
809 : *
810 : * Given a list of quals to be enforced in an indexscan, extract the ones that
811 : * will have to be applied as qpquals (ie, the index machinery won't handle
812 : * them). Here we detect only whether a qual clause is directly redundant
813 : * with some indexclause. If the index path is chosen for use, createplan.c
814 : * will try a bit harder to get rid of redundant qual conditions; specifically
815 : * it will see if quals can be proven to be implied by the indexquals. But
816 : * it does not seem worth the cycles to try to factor that in at this stage,
817 : * since we're only trying to estimate qual eval costs. Otherwise this must
818 : * match the logic in create_indexscan_plan().
819 : *
820 : * qual_clauses, and the result, are lists of RestrictInfos.
821 : * indexclauses is a list of IndexClauses.
822 : */
823 : static List *
824 976900 : extract_nonindex_conditions(List *qual_clauses, List *indexclauses)
825 : {
826 976900 : List *result = NIL;
827 : ListCell *lc;
828 :
829 2022480 : foreach(lc, qual_clauses)
830 : {
831 1045580 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
832 :
833 1045580 : if (rinfo->pseudoconstant)
834 10304 : continue; /* we may drop pseudoconstants here */
835 1035276 : if (is_redundant_with_indexclauses(rinfo, indexclauses))
836 616846 : continue; /* dup or derived from same EquivalenceClass */
837 : /* ... skip the predicate proof attempt createplan.c will try ... */
838 418430 : result = lappend(result, rinfo);
839 : }
840 976900 : return result;
841 : }
842 :
843 : /*
844 : * index_pages_fetched
845 : * Estimate the number of pages actually fetched after accounting for
846 : * cache effects.
847 : *
848 : * We use an approximation proposed by Mackert and Lohman, "Index Scans
849 : * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
850 : * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
851 : * The Mackert and Lohman approximation is that the number of pages
852 : * fetched is
853 : * PF =
854 : * min(2TNs/(2T+Ns), T) when T <= b
855 : * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
856 : * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
857 : * where
858 : * T = # pages in table
859 : * N = # tuples in table
860 : * s = selectivity = fraction of table to be scanned
861 : * b = # buffer pages available (we include kernel space here)
862 : *
863 : * We assume that effective_cache_size is the total number of buffer pages
864 : * available for the whole query, and pro-rate that space across all the
865 : * tables in the query and the index currently under consideration. (This
866 : * ignores space needed for other indexes used by the query, but since we
867 : * don't know which indexes will get used, we can't estimate that very well;
868 : * and in any case counting all the tables may well be an overestimate, since
869 : * depending on the join plan not all the tables may be scanned concurrently.)
870 : *
871 : * The product Ns is the number of tuples fetched; we pass in that
872 : * product rather than calculating it here. "pages" is the number of pages
873 : * in the object under consideration (either an index or a table).
874 : * "index_pages" is the amount to add to the total table space, which was
875 : * computed for us by make_one_rel.
876 : *
877 : * Caller is expected to have ensured that tuples_fetched is greater than zero
878 : * and rounded to integer (see clamp_row_est). The result will likewise be
879 : * greater than zero and integral.
880 : */
881 : double
882 1153624 : index_pages_fetched(double tuples_fetched, BlockNumber pages,
883 : double index_pages, PlannerInfo *root)
884 : {
885 : double pages_fetched;
886 : double total_pages;
887 : double T,
888 : b;
889 :
890 : /* T is # pages in table, but don't allow it to be zero */
891 1153624 : T = (pages > 1) ? (double) pages : 1.0;
892 :
893 : /* Compute number of pages assumed to be competing for cache space */
894 1153624 : total_pages = root->total_table_pages + index_pages;
895 1153624 : total_pages = Max(total_pages, 1.0);
896 : Assert(T <= total_pages);
897 :
898 : /* b is pro-rated share of effective_cache_size */
899 1153624 : b = (double) effective_cache_size * T / total_pages;
900 :
901 : /* force it positive and integral */
902 1153624 : if (b <= 1.0)
903 0 : b = 1.0;
904 : else
905 1153624 : b = ceil(b);
906 :
907 : /* This part is the Mackert and Lohman formula */
908 1153624 : if (T <= b)
909 : {
910 1153624 : pages_fetched =
911 1153624 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
912 1153624 : if (pages_fetched >= T)
913 674554 : pages_fetched = T;
914 : else
915 479070 : pages_fetched = ceil(pages_fetched);
916 : }
917 : else
918 : {
919 : double lim;
920 :
921 0 : lim = (2.0 * T * b) / (2.0 * T - b);
922 0 : if (tuples_fetched <= lim)
923 : {
924 0 : pages_fetched =
925 0 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
926 : }
927 : else
928 : {
929 0 : pages_fetched =
930 0 : b + (tuples_fetched - lim) * (T - b) / T;
931 : }
932 0 : pages_fetched = ceil(pages_fetched);
933 : }
934 1153624 : return pages_fetched;
935 : }
936 :
937 : /*
938 : * get_indexpath_pages
939 : * Determine the total size of the indexes used in a bitmap index path.
940 : *
941 : * Note: if the same index is used more than once in a bitmap tree, we will
942 : * count it multiple times, which perhaps is the wrong thing ... but it's
943 : * not completely clear, and detecting duplicates is difficult, so ignore it
944 : * for now.
945 : */
946 : static double
947 193806 : get_indexpath_pages(Path *bitmapqual)
948 : {
949 193806 : double result = 0;
950 : ListCell *l;
951 :
952 193806 : if (IsA(bitmapqual, BitmapAndPath))
953 : {
954 23094 : BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
955 :
956 69282 : foreach(l, apath->bitmapquals)
957 : {
958 46188 : result += get_indexpath_pages((Path *) lfirst(l));
959 : }
960 : }
961 170712 : else if (IsA(bitmapqual, BitmapOrPath))
962 : {
963 70 : BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
964 :
965 222 : foreach(l, opath->bitmapquals)
966 : {
967 152 : result += get_indexpath_pages((Path *) lfirst(l));
968 : }
969 : }
970 170642 : else if (IsA(bitmapqual, IndexPath))
971 : {
972 170642 : IndexPath *ipath = (IndexPath *) bitmapqual;
973 :
974 170642 : result = (double) ipath->indexinfo->pages;
975 : }
976 : else
977 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
978 :
979 193806 : return result;
980 : }
981 :
982 : /*
983 : * cost_bitmap_heap_scan
984 : * Determines and returns the cost of scanning a relation using a bitmap
985 : * index-then-heap plan.
986 : *
987 : * 'baserel' is the relation to be scanned
988 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
989 : * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
990 : * 'loop_count' is the number of repetitions of the indexscan to factor into
991 : * estimates of caching behavior
992 : *
993 : * Note: the component IndexPaths in bitmapqual should have been costed
994 : * using the same loop_count.
995 : */
996 : void
997 556780 : cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
998 : ParamPathInfo *param_info,
999 : Path *bitmapqual, double loop_count)
1000 : {
1001 556780 : Cost startup_cost = 0;
1002 556780 : Cost run_cost = 0;
1003 : Cost indexTotalCost;
1004 : QualCost qpqual_cost;
1005 : Cost cpu_per_tuple;
1006 : Cost cost_per_page;
1007 : Cost cpu_run_cost;
1008 : double tuples_fetched;
1009 : double pages_fetched;
1010 : double spc_seq_page_cost,
1011 : spc_random_page_cost;
1012 : double T;
1013 :
1014 : /* Should only be applied to base relations */
1015 : Assert(IsA(baserel, RelOptInfo));
1016 : Assert(baserel->relid > 0);
1017 : Assert(baserel->rtekind == RTE_RELATION);
1018 :
1019 : /* Mark the path with the correct row estimate */
1020 556780 : if (param_info)
1021 236196 : path->rows = param_info->ppi_rows;
1022 : else
1023 320584 : path->rows = baserel->rows;
1024 :
1025 556780 : pages_fetched = compute_bitmap_pages(root, baserel, bitmapqual,
1026 : loop_count, &indexTotalCost,
1027 : &tuples_fetched);
1028 :
1029 556780 : startup_cost += indexTotalCost;
1030 556780 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
1031 :
1032 : /* Fetch estimated page costs for tablespace containing table. */
1033 556780 : get_tablespace_page_costs(baserel->reltablespace,
1034 : &spc_random_page_cost,
1035 : &spc_seq_page_cost);
1036 :
1037 : /*
1038 : * For small numbers of pages we should charge spc_random_page_cost
1039 : * apiece, while if nearly all the table's pages are being read, it's more
1040 : * appropriate to charge spc_seq_page_cost apiece. The effect is
1041 : * nonlinear, too. For lack of a better idea, interpolate like this to
1042 : * determine the cost per page.
1043 : */
1044 556780 : if (pages_fetched >= 2.0)
1045 118510 : cost_per_page = spc_random_page_cost -
1046 118510 : (spc_random_page_cost - spc_seq_page_cost)
1047 118510 : * sqrt(pages_fetched / T);
1048 : else
1049 438270 : cost_per_page = spc_random_page_cost;
1050 :
1051 556780 : run_cost += pages_fetched * cost_per_page;
1052 :
1053 : /*
1054 : * Estimate CPU costs per tuple.
1055 : *
1056 : * Often the indexquals don't need to be rechecked at each tuple ... but
1057 : * not always, especially not if there are enough tuples involved that the
1058 : * bitmaps become lossy. For the moment, just assume they will be
1059 : * rechecked always. This means we charge the full freight for all the
1060 : * scan clauses.
1061 : */
1062 556780 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1063 :
1064 556780 : startup_cost += qpqual_cost.startup;
1065 556780 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1066 556780 : cpu_run_cost = cpu_per_tuple * tuples_fetched;
1067 :
1068 : /* Adjust costing for parallelism, if used. */
1069 556780 : if (path->parallel_workers > 0)
1070 : {
1071 4286 : double parallel_divisor = get_parallel_divisor(path);
1072 :
1073 : /* The CPU cost is divided among all the workers. */
1074 4286 : cpu_run_cost /= parallel_divisor;
1075 :
1076 4286 : path->rows = clamp_row_est(path->rows / parallel_divisor);
1077 : }
1078 :
1079 :
1080 556780 : run_cost += cpu_run_cost;
1081 :
1082 : /* tlist eval costs are paid per output row, not per tuple scanned */
1083 556780 : startup_cost += path->pathtarget->cost.startup;
1084 556780 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1085 :
1086 556780 : path->disabled_nodes = enable_bitmapscan ? 0 : 1;
1087 556780 : path->startup_cost = startup_cost;
1088 556780 : path->total_cost = startup_cost + run_cost;
1089 556780 : }
1090 :
1091 : /*
1092 : * cost_bitmap_tree_node
1093 : * Extract cost and selectivity from a bitmap tree node (index/and/or)
1094 : */
1095 : void
1096 1025578 : cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
1097 : {
1098 1025578 : if (IsA(path, IndexPath))
1099 : {
1100 971374 : *cost = ((IndexPath *) path)->indextotalcost;
1101 971374 : *selec = ((IndexPath *) path)->indexselectivity;
1102 :
1103 : /*
1104 : * Charge a small amount per retrieved tuple to reflect the costs of
1105 : * manipulating the bitmap. This is mostly to make sure that a bitmap
1106 : * scan doesn't look to be the same cost as an indexscan to retrieve a
1107 : * single tuple.
1108 : */
1109 971374 : *cost += 0.1 * cpu_operator_cost * path->rows;
1110 : }
1111 54204 : else if (IsA(path, BitmapAndPath))
1112 : {
1113 50536 : *cost = path->total_cost;
1114 50536 : *selec = ((BitmapAndPath *) path)->bitmapselectivity;
1115 : }
1116 3668 : else if (IsA(path, BitmapOrPath))
1117 : {
1118 3668 : *cost = path->total_cost;
1119 3668 : *selec = ((BitmapOrPath *) path)->bitmapselectivity;
1120 : }
1121 : else
1122 : {
1123 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(path));
1124 : *cost = *selec = 0; /* keep compiler quiet */
1125 : }
1126 1025578 : }
1127 :
1128 : /*
1129 : * cost_bitmap_and_node
1130 : * Estimate the cost of a BitmapAnd node
1131 : *
1132 : * Note that this considers only the costs of index scanning and bitmap
1133 : * creation, not the eventual heap access. In that sense the object isn't
1134 : * truly a Path, but it has enough path-like properties (costs in particular)
1135 : * to warrant treating it as one. We don't bother to set the path rows field,
1136 : * however.
1137 : */
1138 : void
1139 50322 : cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
1140 : {
1141 : Cost totalCost;
1142 : Selectivity selec;
1143 : ListCell *l;
1144 :
1145 : /*
1146 : * We estimate AND selectivity on the assumption that the inputs are
1147 : * independent. This is probably often wrong, but we don't have the info
1148 : * to do better.
1149 : *
1150 : * The runtime cost of the BitmapAnd itself is estimated at 100x
1151 : * cpu_operator_cost for each tbm_intersect needed. Probably too small,
1152 : * definitely too simplistic?
1153 : */
1154 50322 : totalCost = 0.0;
1155 50322 : selec = 1.0;
1156 150966 : foreach(l, path->bitmapquals)
1157 : {
1158 100644 : Path *subpath = (Path *) lfirst(l);
1159 : Cost subCost;
1160 : Selectivity subselec;
1161 :
1162 100644 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1163 :
1164 100644 : selec *= subselec;
1165 :
1166 100644 : totalCost += subCost;
1167 100644 : if (l != list_head(path->bitmapquals))
1168 50322 : totalCost += 100.0 * cpu_operator_cost;
1169 : }
1170 50322 : path->bitmapselectivity = selec;
1171 50322 : path->path.rows = 0; /* per above, not used */
1172 50322 : path->path.disabled_nodes = 0;
1173 50322 : path->path.startup_cost = totalCost;
1174 50322 : path->path.total_cost = totalCost;
1175 50322 : }
1176 :
1177 : /*
1178 : * cost_bitmap_or_node
1179 : * Estimate the cost of a BitmapOr node
1180 : *
1181 : * See comments for cost_bitmap_and_node.
1182 : */
1183 : void
1184 1040 : cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
1185 : {
1186 : Cost totalCost;
1187 : Selectivity selec;
1188 : ListCell *l;
1189 :
1190 : /*
1191 : * We estimate OR selectivity on the assumption that the inputs are
1192 : * non-overlapping, since that's often the case in "x IN (list)" type
1193 : * situations. Of course, we clamp to 1.0 at the end.
1194 : *
1195 : * The runtime cost of the BitmapOr itself is estimated at 100x
1196 : * cpu_operator_cost for each tbm_union needed. Probably too small,
1197 : * definitely too simplistic? We are aware that the tbm_unions are
1198 : * optimized out when the inputs are BitmapIndexScans.
1199 : */
1200 1040 : totalCost = 0.0;
1201 1040 : selec = 0.0;
1202 2922 : foreach(l, path->bitmapquals)
1203 : {
1204 1882 : Path *subpath = (Path *) lfirst(l);
1205 : Cost subCost;
1206 : Selectivity subselec;
1207 :
1208 1882 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1209 :
1210 1882 : selec += subselec;
1211 :
1212 1882 : totalCost += subCost;
1213 1882 : if (l != list_head(path->bitmapquals) &&
1214 842 : !IsA(subpath, IndexPath))
1215 0 : totalCost += 100.0 * cpu_operator_cost;
1216 : }
1217 1040 : path->bitmapselectivity = Min(selec, 1.0);
1218 1040 : path->path.rows = 0; /* per above, not used */
1219 1040 : path->path.startup_cost = totalCost;
1220 1040 : path->path.total_cost = totalCost;
1221 1040 : }
1222 :
1223 : /*
1224 : * cost_tidscan
1225 : * Determines and returns the cost of scanning a relation using TIDs.
1226 : *
1227 : * 'baserel' is the relation to be scanned
1228 : * 'tidquals' is the list of TID-checkable quals
1229 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1230 : */
1231 : void
1232 872 : cost_tidscan(Path *path, PlannerInfo *root,
1233 : RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
1234 : {
1235 872 : Cost startup_cost = 0;
1236 872 : Cost run_cost = 0;
1237 : QualCost qpqual_cost;
1238 : Cost cpu_per_tuple;
1239 : QualCost tid_qual_cost;
1240 : double ntuples;
1241 : ListCell *l;
1242 : double spc_random_page_cost;
1243 :
1244 : /* Should only be applied to base relations */
1245 : Assert(baserel->relid > 0);
1246 : Assert(baserel->rtekind == RTE_RELATION);
1247 : Assert(tidquals != NIL);
1248 :
1249 : /* Mark the path with the correct row estimate */
1250 872 : if (param_info)
1251 144 : path->rows = param_info->ppi_rows;
1252 : else
1253 728 : path->rows = baserel->rows;
1254 :
1255 : /* Count how many tuples we expect to retrieve */
1256 872 : ntuples = 0;
1257 1770 : foreach(l, tidquals)
1258 : {
1259 898 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
1260 898 : Expr *qual = rinfo->clause;
1261 :
1262 : /*
1263 : * We must use a TID scan for CurrentOfExpr; in any other case, we
1264 : * should be generating a TID scan only if enable_tidscan=true. Also,
1265 : * if CurrentOfExpr is the qual, there should be only one.
1266 : */
1267 : Assert(enable_tidscan || IsA(qual, CurrentOfExpr));
1268 : Assert(list_length(tidquals) == 1 || !IsA(qual, CurrentOfExpr));
1269 :
1270 898 : if (IsA(qual, ScalarArrayOpExpr))
1271 : {
1272 : /* Each element of the array yields 1 tuple */
1273 50 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) qual;
1274 50 : Node *arraynode = (Node *) lsecond(saop->args);
1275 :
1276 50 : ntuples += estimate_array_length(root, arraynode);
1277 : }
1278 848 : else if (IsA(qual, CurrentOfExpr))
1279 : {
1280 : /* CURRENT OF yields 1 tuple */
1281 404 : ntuples++;
1282 : }
1283 : else
1284 : {
1285 : /* It's just CTID = something, count 1 tuple */
1286 444 : ntuples++;
1287 : }
1288 : }
1289 :
1290 : /*
1291 : * The TID qual expressions will be computed once, any other baserestrict
1292 : * quals once per retrieved tuple.
1293 : */
1294 872 : cost_qual_eval(&tid_qual_cost, tidquals, root);
1295 :
1296 : /* fetch estimated page cost for tablespace containing table */
1297 872 : get_tablespace_page_costs(baserel->reltablespace,
1298 : &spc_random_page_cost,
1299 : NULL);
1300 :
1301 : /* disk costs --- assume each tuple on a different page */
1302 872 : run_cost += spc_random_page_cost * ntuples;
1303 :
1304 : /* Add scanning CPU costs */
1305 872 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1306 :
1307 : /* XXX currently we assume TID quals are a subset of qpquals */
1308 872 : startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1309 872 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1310 872 : tid_qual_cost.per_tuple;
1311 872 : run_cost += cpu_per_tuple * ntuples;
1312 :
1313 : /* tlist eval costs are paid per output row, not per tuple scanned */
1314 872 : startup_cost += path->pathtarget->cost.startup;
1315 872 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1316 :
1317 : /*
1318 : * There are assertions above verifying that we only reach this function
1319 : * either when enable_tidscan=true or when the TID scan is the only legal
1320 : * path, so it's safe to set disabled_nodes to zero here.
1321 : */
1322 872 : path->disabled_nodes = 0;
1323 872 : path->startup_cost = startup_cost;
1324 872 : path->total_cost = startup_cost + run_cost;
1325 872 : }
1326 :
1327 : /*
1328 : * cost_tidrangescan
1329 : * Determines and sets the costs of scanning a relation using a range of
1330 : * TIDs for 'path'
1331 : *
1332 : * 'baserel' is the relation to be scanned
1333 : * 'tidrangequals' is the list of TID-checkable range quals
1334 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1335 : */
1336 : void
1337 2052 : cost_tidrangescan(Path *path, PlannerInfo *root,
1338 : RelOptInfo *baserel, List *tidrangequals,
1339 : ParamPathInfo *param_info)
1340 : {
1341 : Selectivity selectivity;
1342 : double pages;
1343 : Cost startup_cost;
1344 : Cost cpu_run_cost;
1345 : Cost disk_run_cost;
1346 : QualCost qpqual_cost;
1347 : Cost cpu_per_tuple;
1348 : QualCost tid_qual_cost;
1349 : double ntuples;
1350 : double nseqpages;
1351 : double spc_random_page_cost;
1352 : double spc_seq_page_cost;
1353 :
1354 : /* Should only be applied to base relations */
1355 : Assert(baserel->relid > 0);
1356 : Assert(baserel->rtekind == RTE_RELATION);
1357 :
1358 : /* Mark the path with the correct row estimate */
1359 2052 : if (param_info)
1360 0 : path->rows = param_info->ppi_rows;
1361 : else
1362 2052 : path->rows = baserel->rows;
1363 :
1364 : /* Count how many tuples and pages we expect to scan */
1365 2052 : selectivity = clauselist_selectivity(root, tidrangequals, baserel->relid,
1366 : JOIN_INNER, NULL);
1367 2052 : pages = ceil(selectivity * baserel->pages);
1368 :
1369 2052 : if (pages <= 0.0)
1370 42 : pages = 1.0;
1371 :
1372 : /*
1373 : * The first page in a range requires a random seek, but each subsequent
1374 : * page is just a normal sequential page read. NOTE: it's desirable for
1375 : * TID Range Scans to cost more than the equivalent Sequential Scans,
1376 : * because Seq Scans have some performance advantages such as scan
1377 : * synchronization, and we'd prefer one of them to be picked unless a TID
1378 : * Range Scan really is better.
1379 : */
1380 2052 : ntuples = selectivity * baserel->tuples;
1381 2052 : nseqpages = pages - 1.0;
1382 :
1383 : /*
1384 : * The TID qual expressions will be computed once, any other baserestrict
1385 : * quals once per retrieved tuple.
1386 : */
1387 2052 : cost_qual_eval(&tid_qual_cost, tidrangequals, root);
1388 :
1389 : /* fetch estimated page cost for tablespace containing table */
1390 2052 : get_tablespace_page_costs(baserel->reltablespace,
1391 : &spc_random_page_cost,
1392 : &spc_seq_page_cost);
1393 :
1394 : /* disk costs; 1 random page and the remainder as seq pages */
1395 2052 : disk_run_cost = spc_random_page_cost + spc_seq_page_cost * nseqpages;
1396 :
1397 : /* Add scanning CPU costs */
1398 2052 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1399 :
1400 : /*
1401 : * XXX currently we assume TID quals are a subset of qpquals at this
1402 : * point; they will be removed (if possible) when we create the plan, so
1403 : * we subtract their cost from the total qpqual cost. (If the TID quals
1404 : * can't be removed, this is a mistake and we're going to underestimate
1405 : * the CPU cost a bit.)
1406 : */
1407 2052 : startup_cost = qpqual_cost.startup + tid_qual_cost.per_tuple;
1408 2052 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1409 2052 : tid_qual_cost.per_tuple;
1410 2052 : cpu_run_cost = cpu_per_tuple * ntuples;
1411 :
1412 : /* tlist eval costs are paid per output row, not per tuple scanned */
1413 2052 : startup_cost += path->pathtarget->cost.startup;
1414 2052 : cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
1415 :
1416 : /* Adjust costing for parallelism, if used. */
1417 2052 : if (path->parallel_workers > 0)
1418 : {
1419 48 : double parallel_divisor = get_parallel_divisor(path);
1420 :
1421 : /* The CPU cost is divided among all the workers. */
1422 48 : cpu_run_cost /= parallel_divisor;
1423 :
1424 : /*
1425 : * In the case of a parallel plan, the row count needs to represent
1426 : * the number of tuples processed per worker.
1427 : */
1428 48 : path->rows = clamp_row_est(path->rows / parallel_divisor);
1429 : }
1430 :
1431 : /* we should not generate this path type when enable_tidscan=false */
1432 : Assert(enable_tidscan);
1433 2052 : path->disabled_nodes = 0;
1434 2052 : path->startup_cost = startup_cost;
1435 2052 : path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
1436 2052 : }
1437 :
1438 : /*
1439 : * cost_subqueryscan
1440 : * Determines and returns the cost of scanning a subquery RTE.
1441 : *
1442 : * 'baserel' is the relation to be scanned
1443 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1444 : * 'trivial_pathtarget' is true if the pathtarget is believed to be trivial.
1445 : */
1446 : void
1447 62306 : cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root,
1448 : RelOptInfo *baserel, ParamPathInfo *param_info,
1449 : bool trivial_pathtarget)
1450 : {
1451 : Cost startup_cost;
1452 : Cost run_cost;
1453 : List *qpquals;
1454 : QualCost qpqual_cost;
1455 : Cost cpu_per_tuple;
1456 :
1457 : /* Should only be applied to base relations that are subqueries */
1458 : Assert(baserel->relid > 0);
1459 : Assert(baserel->rtekind == RTE_SUBQUERY);
1460 :
1461 : /*
1462 : * We compute the rowcount estimate as the subplan's estimate times the
1463 : * selectivity of relevant restriction clauses. In simple cases this will
1464 : * come out the same as baserel->rows; but when dealing with parallelized
1465 : * paths we must do it like this to get the right answer.
1466 : */
1467 62306 : if (param_info)
1468 618 : qpquals = list_concat_copy(param_info->ppi_clauses,
1469 618 : baserel->baserestrictinfo);
1470 : else
1471 61688 : qpquals = baserel->baserestrictinfo;
1472 :
1473 62306 : path->path.rows = clamp_row_est(path->subpath->rows *
1474 62306 : clauselist_selectivity(root,
1475 : qpquals,
1476 : 0,
1477 : JOIN_INNER,
1478 : NULL));
1479 :
1480 : /*
1481 : * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1482 : * any restriction clauses and tlist that will be attached to the
1483 : * SubqueryScan node, plus cpu_tuple_cost to account for selection and
1484 : * projection overhead.
1485 : */
1486 62306 : path->path.disabled_nodes = path->subpath->disabled_nodes;
1487 62306 : path->path.startup_cost = path->subpath->startup_cost;
1488 62306 : path->path.total_cost = path->subpath->total_cost;
1489 :
1490 : /*
1491 : * However, if there are no relevant restriction clauses and the
1492 : * pathtarget is trivial, then we expect that setrefs.c will optimize away
1493 : * the SubqueryScan plan node altogether, so we should just make its cost
1494 : * and rowcount equal to the input path's.
1495 : *
1496 : * Note: there are some edge cases where createplan.c will apply a
1497 : * different targetlist to the SubqueryScan node, thus falsifying our
1498 : * current estimate of whether the target is trivial, and making the cost
1499 : * estimate (though not the rowcount) wrong. It does not seem worth the
1500 : * extra complication to try to account for that exactly, especially since
1501 : * that behavior falsifies other cost estimates as well.
1502 : */
1503 62306 : if (qpquals == NIL && trivial_pathtarget)
1504 27542 : return;
1505 :
1506 34764 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1507 :
1508 34764 : startup_cost = qpqual_cost.startup;
1509 34764 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1510 34764 : run_cost = cpu_per_tuple * path->subpath->rows;
1511 :
1512 : /* tlist eval costs are paid per output row, not per tuple scanned */
1513 34764 : startup_cost += path->path.pathtarget->cost.startup;
1514 34764 : run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
1515 :
1516 34764 : path->path.startup_cost += startup_cost;
1517 34764 : path->path.total_cost += startup_cost + run_cost;
1518 : }
1519 :
1520 : /*
1521 : * cost_functionscan
1522 : * Determines and returns the cost of scanning a function RTE.
1523 : *
1524 : * 'baserel' is the relation to be scanned
1525 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1526 : */
1527 : void
1528 52296 : cost_functionscan(Path *path, PlannerInfo *root,
1529 : RelOptInfo *baserel, ParamPathInfo *param_info)
1530 : {
1531 52296 : Cost startup_cost = 0;
1532 52296 : Cost run_cost = 0;
1533 : QualCost qpqual_cost;
1534 : Cost cpu_per_tuple;
1535 : RangeTblEntry *rte;
1536 : QualCost exprcost;
1537 :
1538 : /* Should only be applied to base relations that are functions */
1539 : Assert(baserel->relid > 0);
1540 52296 : rte = planner_rt_fetch(baserel->relid, root);
1541 : Assert(rte->rtekind == RTE_FUNCTION);
1542 :
1543 : /* Mark the path with the correct row estimate */
1544 52296 : if (param_info)
1545 8596 : path->rows = param_info->ppi_rows;
1546 : else
1547 43700 : path->rows = baserel->rows;
1548 :
1549 : /*
1550 : * Estimate costs of executing the function expression(s).
1551 : *
1552 : * Currently, nodeFunctionscan.c always executes the functions to
1553 : * completion before returning any rows, and caches the results in a
1554 : * tuplestore. So the function eval cost is all startup cost, and per-row
1555 : * costs are minimal.
1556 : *
1557 : * XXX in principle we ought to charge tuplestore spill costs if the
1558 : * number of rows is large. However, given how phony our rowcount
1559 : * estimates for functions tend to be, there's not a lot of point in that
1560 : * refinement right now.
1561 : */
1562 52296 : cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1563 :
1564 52296 : startup_cost += exprcost.startup + exprcost.per_tuple;
1565 :
1566 : /* Add scanning CPU costs */
1567 52296 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1568 :
1569 52296 : startup_cost += qpqual_cost.startup;
1570 52296 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1571 52296 : run_cost += cpu_per_tuple * baserel->tuples;
1572 :
1573 : /* tlist eval costs are paid per output row, not per tuple scanned */
1574 52296 : startup_cost += path->pathtarget->cost.startup;
1575 52296 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1576 :
1577 52296 : path->disabled_nodes = 0;
1578 52296 : path->startup_cost = startup_cost;
1579 52296 : path->total_cost = startup_cost + run_cost;
1580 52296 : }
1581 :
1582 : /*
1583 : * cost_tablefuncscan
1584 : * Determines and returns the cost of scanning a table function.
1585 : *
1586 : * 'baserel' is the relation to be scanned
1587 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1588 : */
1589 : void
1590 626 : cost_tablefuncscan(Path *path, PlannerInfo *root,
1591 : RelOptInfo *baserel, ParamPathInfo *param_info)
1592 : {
1593 626 : Cost startup_cost = 0;
1594 626 : Cost run_cost = 0;
1595 : QualCost qpqual_cost;
1596 : Cost cpu_per_tuple;
1597 : RangeTblEntry *rte;
1598 : QualCost exprcost;
1599 :
1600 : /* Should only be applied to base relations that are functions */
1601 : Assert(baserel->relid > 0);
1602 626 : rte = planner_rt_fetch(baserel->relid, root);
1603 : Assert(rte->rtekind == RTE_TABLEFUNC);
1604 :
1605 : /* Mark the path with the correct row estimate */
1606 626 : if (param_info)
1607 234 : path->rows = param_info->ppi_rows;
1608 : else
1609 392 : path->rows = baserel->rows;
1610 :
1611 : /*
1612 : * Estimate costs of executing the table func expression(s).
1613 : *
1614 : * XXX in principle we ought to charge tuplestore spill costs if the
1615 : * number of rows is large. However, given how phony our rowcount
1616 : * estimates for tablefuncs tend to be, there's not a lot of point in that
1617 : * refinement right now.
1618 : */
1619 626 : cost_qual_eval_node(&exprcost, (Node *) rte->tablefunc, root);
1620 :
1621 626 : startup_cost += exprcost.startup + exprcost.per_tuple;
1622 :
1623 : /* Add scanning CPU costs */
1624 626 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1625 :
1626 626 : startup_cost += qpqual_cost.startup;
1627 626 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1628 626 : run_cost += cpu_per_tuple * baserel->tuples;
1629 :
1630 : /* tlist eval costs are paid per output row, not per tuple scanned */
1631 626 : startup_cost += path->pathtarget->cost.startup;
1632 626 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1633 :
1634 626 : path->disabled_nodes = 0;
1635 626 : path->startup_cost = startup_cost;
1636 626 : path->total_cost = startup_cost + run_cost;
1637 626 : }
1638 :
1639 : /*
1640 : * cost_valuesscan
1641 : * Determines and returns the cost of scanning a VALUES RTE.
1642 : *
1643 : * 'baserel' is the relation to be scanned
1644 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1645 : */
1646 : void
1647 8480 : cost_valuesscan(Path *path, PlannerInfo *root,
1648 : RelOptInfo *baserel, ParamPathInfo *param_info)
1649 : {
1650 8480 : Cost startup_cost = 0;
1651 8480 : Cost run_cost = 0;
1652 : QualCost qpqual_cost;
1653 : Cost cpu_per_tuple;
1654 :
1655 : /* Should only be applied to base relations that are values lists */
1656 : Assert(baserel->relid > 0);
1657 : Assert(baserel->rtekind == RTE_VALUES);
1658 :
1659 : /* Mark the path with the correct row estimate */
1660 8480 : if (param_info)
1661 66 : path->rows = param_info->ppi_rows;
1662 : else
1663 8414 : path->rows = baserel->rows;
1664 :
1665 : /*
1666 : * For now, estimate list evaluation cost at one operator eval per list
1667 : * (probably pretty bogus, but is it worth being smarter?)
1668 : */
1669 8480 : cpu_per_tuple = cpu_operator_cost;
1670 :
1671 : /* Add scanning CPU costs */
1672 8480 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1673 :
1674 8480 : startup_cost += qpqual_cost.startup;
1675 8480 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1676 8480 : run_cost += cpu_per_tuple * baserel->tuples;
1677 :
1678 : /* tlist eval costs are paid per output row, not per tuple scanned */
1679 8480 : startup_cost += path->pathtarget->cost.startup;
1680 8480 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1681 :
1682 8480 : path->disabled_nodes = 0;
1683 8480 : path->startup_cost = startup_cost;
1684 8480 : path->total_cost = startup_cost + run_cost;
1685 8480 : }
1686 :
1687 : /*
1688 : * cost_ctescan
1689 : * Determines and returns the cost of scanning a CTE RTE.
1690 : *
1691 : * Note: this is used for both self-reference and regular CTEs; the
1692 : * possible cost differences are below the threshold of what we could
1693 : * estimate accurately anyway. Note that the costs of evaluating the
1694 : * referenced CTE query are added into the final plan as initplan costs,
1695 : * and should NOT be counted here.
1696 : */
1697 : void
1698 5294 : cost_ctescan(Path *path, PlannerInfo *root,
1699 : RelOptInfo *baserel, ParamPathInfo *param_info)
1700 : {
1701 5294 : Cost startup_cost = 0;
1702 5294 : Cost run_cost = 0;
1703 : QualCost qpqual_cost;
1704 : Cost cpu_per_tuple;
1705 :
1706 : /* Should only be applied to base relations that are CTEs */
1707 : Assert(baserel->relid > 0);
1708 : Assert(baserel->rtekind == RTE_CTE);
1709 :
1710 : /* Mark the path with the correct row estimate */
1711 5294 : if (param_info)
1712 0 : path->rows = param_info->ppi_rows;
1713 : else
1714 5294 : path->rows = baserel->rows;
1715 :
1716 : /* Charge one CPU tuple cost per row for tuplestore manipulation */
1717 5294 : cpu_per_tuple = cpu_tuple_cost;
1718 :
1719 : /* Add scanning CPU costs */
1720 5294 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1721 :
1722 5294 : startup_cost += qpqual_cost.startup;
1723 5294 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1724 5294 : run_cost += cpu_per_tuple * baserel->tuples;
1725 :
1726 : /* tlist eval costs are paid per output row, not per tuple scanned */
1727 5294 : startup_cost += path->pathtarget->cost.startup;
1728 5294 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1729 :
1730 5294 : path->disabled_nodes = 0;
1731 5294 : path->startup_cost = startup_cost;
1732 5294 : path->total_cost = startup_cost + run_cost;
1733 5294 : }
1734 :
1735 : /*
1736 : * cost_namedtuplestorescan
1737 : * Determines and returns the cost of scanning a named tuplestore.
1738 : */
1739 : void
1740 478 : cost_namedtuplestorescan(Path *path, PlannerInfo *root,
1741 : RelOptInfo *baserel, ParamPathInfo *param_info)
1742 : {
1743 478 : Cost startup_cost = 0;
1744 478 : Cost run_cost = 0;
1745 : QualCost qpqual_cost;
1746 : Cost cpu_per_tuple;
1747 :
1748 : /* Should only be applied to base relations that are Tuplestores */
1749 : Assert(baserel->relid > 0);
1750 : Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
1751 :
1752 : /* Mark the path with the correct row estimate */
1753 478 : if (param_info)
1754 0 : path->rows = param_info->ppi_rows;
1755 : else
1756 478 : path->rows = baserel->rows;
1757 :
1758 : /* Charge one CPU tuple cost per row for tuplestore manipulation */
1759 478 : cpu_per_tuple = cpu_tuple_cost;
1760 :
1761 : /* Add scanning CPU costs */
1762 478 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1763 :
1764 478 : startup_cost += qpqual_cost.startup;
1765 478 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1766 478 : run_cost += cpu_per_tuple * baserel->tuples;
1767 :
1768 478 : path->disabled_nodes = 0;
1769 478 : path->startup_cost = startup_cost;
1770 478 : path->total_cost = startup_cost + run_cost;
1771 478 : }
1772 :
1773 : /*
1774 : * cost_resultscan
1775 : * Determines and returns the cost of scanning an RTE_RESULT relation.
1776 : */
1777 : void
1778 4346 : cost_resultscan(Path *path, PlannerInfo *root,
1779 : RelOptInfo *baserel, ParamPathInfo *param_info)
1780 : {
1781 4346 : Cost startup_cost = 0;
1782 4346 : Cost run_cost = 0;
1783 : QualCost qpqual_cost;
1784 : Cost cpu_per_tuple;
1785 :
1786 : /* Should only be applied to RTE_RESULT base relations */
1787 : Assert(baserel->relid > 0);
1788 : Assert(baserel->rtekind == RTE_RESULT);
1789 :
1790 : /* Mark the path with the correct row estimate */
1791 4346 : if (param_info)
1792 156 : path->rows = param_info->ppi_rows;
1793 : else
1794 4190 : path->rows = baserel->rows;
1795 :
1796 : /* We charge qual cost plus cpu_tuple_cost */
1797 4346 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1798 :
1799 4346 : startup_cost += qpqual_cost.startup;
1800 4346 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1801 4346 : run_cost += cpu_per_tuple * baserel->tuples;
1802 :
1803 4346 : path->disabled_nodes = 0;
1804 4346 : path->startup_cost = startup_cost;
1805 4346 : path->total_cost = startup_cost + run_cost;
1806 4346 : }
1807 :
1808 : /*
1809 : * cost_recursive_union
1810 : * Determines and returns the cost of performing a recursive union,
1811 : * and also the estimated output size.
1812 : *
1813 : * We are given Paths for the nonrecursive and recursive terms.
1814 : */
1815 : void
1816 932 : cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
1817 : {
1818 : Cost startup_cost;
1819 : Cost total_cost;
1820 : double total_rows;
1821 :
1822 : /* We probably have decent estimates for the non-recursive term */
1823 932 : startup_cost = nrterm->startup_cost;
1824 932 : total_cost = nrterm->total_cost;
1825 932 : total_rows = nrterm->rows;
1826 :
1827 : /*
1828 : * We arbitrarily assume that about 10 recursive iterations will be
1829 : * needed, and that we've managed to get a good fix on the cost and output
1830 : * size of each one of them. These are mighty shaky assumptions but it's
1831 : * hard to see how to do better.
1832 : */
1833 932 : total_cost += 10 * rterm->total_cost;
1834 932 : total_rows += 10 * rterm->rows;
1835 :
1836 : /*
1837 : * Also charge cpu_tuple_cost per row to account for the costs of
1838 : * manipulating the tuplestores. (We don't worry about possible
1839 : * spill-to-disk costs.)
1840 : */
1841 932 : total_cost += cpu_tuple_cost * total_rows;
1842 :
1843 932 : runion->disabled_nodes = nrterm->disabled_nodes + rterm->disabled_nodes;
1844 932 : runion->startup_cost = startup_cost;
1845 932 : runion->total_cost = total_cost;
1846 932 : runion->rows = total_rows;
1847 932 : runion->pathtarget->width = Max(nrterm->pathtarget->width,
1848 : rterm->pathtarget->width);
1849 932 : }
1850 :
1851 : /*
1852 : * cost_tuplesort
1853 : * Determines and returns the cost of sorting a relation using tuplesort,
1854 : * not including the cost of reading the input data.
1855 : *
1856 : * If the total volume of data to sort is less than sort_mem, we will do
1857 : * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1858 : * comparisons for t tuples.
1859 : *
1860 : * If the total volume exceeds sort_mem, we switch to a tape-style merge
1861 : * algorithm. There will still be about t*log2(t) tuple comparisons in
1862 : * total, but we will also need to write and read each tuple once per
1863 : * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1864 : * number of initial runs formed and M is the merge order used by tuplesort.c.
1865 : * Since the average initial run should be about sort_mem, we have
1866 : * disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
1867 : * cpu = comparison_cost * t * log2(t)
1868 : *
1869 : * If the sort is bounded (i.e., only the first k result tuples are needed)
1870 : * and k tuples can fit into sort_mem, we use a heap method that keeps only
1871 : * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1872 : *
1873 : * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1874 : * accesses (XXX can't we refine that guess?)
1875 : *
1876 : * By default, we charge two operator evals per tuple comparison, which should
1877 : * be in the right ballpark in most cases. The caller can tweak this by
1878 : * specifying nonzero comparison_cost; typically that's used for any extra
1879 : * work that has to be done to prepare the inputs to the comparison operators.
1880 : *
1881 : * 'tuples' is the number of tuples in the relation
1882 : * 'width' is the average tuple width in bytes
1883 : * 'comparison_cost' is the extra cost per comparison, if any
1884 : * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1885 : * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1886 : */
1887 : static void
1888 2099748 : cost_tuplesort(Cost *startup_cost, Cost *run_cost,
1889 : double tuples, int width,
1890 : Cost comparison_cost, int sort_mem,
1891 : double limit_tuples)
1892 : {
1893 2099748 : double input_bytes = relation_byte_size(tuples, width);
1894 : double output_bytes;
1895 : double output_tuples;
1896 2099748 : int64 sort_mem_bytes = sort_mem * (int64) 1024;
1897 :
1898 : /*
1899 : * We want to be sure the cost of a sort is never estimated as zero, even
1900 : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1901 : */
1902 2099748 : if (tuples < 2.0)
1903 557640 : tuples = 2.0;
1904 :
1905 : /* Include the default cost-per-comparison */
1906 2099748 : comparison_cost += 2.0 * cpu_operator_cost;
1907 :
1908 : /* Do we have a useful LIMIT? */
1909 2099748 : if (limit_tuples > 0 && limit_tuples < tuples)
1910 : {
1911 1830 : output_tuples = limit_tuples;
1912 1830 : output_bytes = relation_byte_size(output_tuples, width);
1913 : }
1914 : else
1915 : {
1916 2097918 : output_tuples = tuples;
1917 2097918 : output_bytes = input_bytes;
1918 : }
1919 :
1920 2099748 : if (output_bytes > sort_mem_bytes)
1921 : {
1922 : /*
1923 : * We'll have to use a disk-based sort of all the tuples
1924 : */
1925 18342 : double npages = ceil(input_bytes / BLCKSZ);
1926 18342 : double nruns = input_bytes / sort_mem_bytes;
1927 18342 : double mergeorder = tuplesort_merge_order(sort_mem_bytes);
1928 : double log_runs;
1929 : double npageaccesses;
1930 :
1931 : /*
1932 : * CPU costs
1933 : *
1934 : * Assume about N log2 N comparisons
1935 : */
1936 18342 : *startup_cost = comparison_cost * tuples * LOG2(tuples);
1937 :
1938 : /* Disk costs */
1939 :
1940 : /* Compute logM(r) as log(r) / log(M) */
1941 18342 : if (nruns > mergeorder)
1942 4774 : log_runs = ceil(log(nruns) / log(mergeorder));
1943 : else
1944 13568 : log_runs = 1.0;
1945 18342 : npageaccesses = 2.0 * npages * log_runs;
1946 : /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1947 18342 : *startup_cost += npageaccesses *
1948 18342 : (seq_page_cost * 0.75 + random_page_cost * 0.25);
1949 : }
1950 2081406 : else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1951 : {
1952 : /*
1953 : * We'll use a bounded heap-sort keeping just K tuples in memory, for
1954 : * a total number of tuple comparisons of N log2 K; but the constant
1955 : * factor is a bit higher than for quicksort. Tweak it so that the
1956 : * cost curve is continuous at the crossover point.
1957 : */
1958 1352 : *startup_cost = comparison_cost * tuples * LOG2(2.0 * output_tuples);
1959 : }
1960 : else
1961 : {
1962 : /* We'll use plain quicksort on all the input tuples */
1963 2080054 : *startup_cost = comparison_cost * tuples * LOG2(tuples);
1964 : }
1965 :
1966 : /*
1967 : * Also charge a small amount (arbitrarily set equal to operator cost) per
1968 : * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
1969 : * doesn't do qual-checking or projection, so it has less overhead than
1970 : * most plan nodes. Note it's correct to use tuples not output_tuples
1971 : * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1972 : * counting the LIMIT otherwise.
1973 : */
1974 2099748 : *run_cost = cpu_operator_cost * tuples;
1975 2099748 : }
1976 :
1977 : /*
1978 : * cost_incremental_sort
1979 : * Determines and returns the cost of sorting a relation incrementally, when
1980 : * the input path is presorted by a prefix of the pathkeys.
1981 : *
1982 : * 'presorted_keys' is the number of leading pathkeys by which the input path
1983 : * is sorted.
1984 : *
1985 : * We estimate the number of groups into which the relation is divided by the
1986 : * leading pathkeys, and then calculate the cost of sorting a single group
1987 : * with tuplesort using cost_tuplesort().
1988 : */
1989 : void
1990 12428 : cost_incremental_sort(Path *path,
1991 : PlannerInfo *root, List *pathkeys, int presorted_keys,
1992 : int input_disabled_nodes,
1993 : Cost input_startup_cost, Cost input_total_cost,
1994 : double input_tuples, int width, Cost comparison_cost, int sort_mem,
1995 : double limit_tuples)
1996 : {
1997 : Cost startup_cost,
1998 : run_cost,
1999 12428 : input_run_cost = input_total_cost - input_startup_cost;
2000 : double group_tuples,
2001 : input_groups;
2002 : Cost group_startup_cost,
2003 : group_run_cost,
2004 : group_input_run_cost;
2005 12428 : List *presortedExprs = NIL;
2006 : ListCell *l;
2007 12428 : bool unknown_varno = false;
2008 :
2009 : Assert(presorted_keys > 0 && presorted_keys < list_length(pathkeys));
2010 :
2011 : /*
2012 : * We want to be sure the cost of a sort is never estimated as zero, even
2013 : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
2014 : */
2015 12428 : if (input_tuples < 2.0)
2016 6794 : input_tuples = 2.0;
2017 :
2018 : /* Default estimate of number of groups, capped to one group per row. */
2019 12428 : input_groups = Min(input_tuples, DEFAULT_NUM_DISTINCT);
2020 :
2021 : /*
2022 : * Extract presorted keys as list of expressions.
2023 : *
2024 : * We need to be careful about Vars containing "varno 0" which might have
2025 : * been introduced by generate_append_tlist, which would confuse
2026 : * estimate_num_groups (in fact it'd fail for such expressions). See
2027 : * recurse_set_operations which has to deal with the same issue.
2028 : *
2029 : * Unlike recurse_set_operations we can't access the original target list
2030 : * here, and even if we could it's not very clear how useful would that be
2031 : * for a set operation combining multiple tables. So we simply detect if
2032 : * there are any expressions with "varno 0" and use the default
2033 : * DEFAULT_NUM_DISTINCT in that case.
2034 : *
2035 : * We might also use either 1.0 (a single group) or input_tuples (each row
2036 : * being a separate group), pretty much the worst and best case for
2037 : * incremental sort. But those are extreme cases and using something in
2038 : * between seems reasonable. Furthermore, generate_append_tlist is used
2039 : * for set operations, which are likely to produce mostly unique output
2040 : * anyway - from that standpoint the DEFAULT_NUM_DISTINCT is defensive
2041 : * while maintaining lower startup cost.
2042 : */
2043 12524 : foreach(l, pathkeys)
2044 : {
2045 12524 : PathKey *key = (PathKey *) lfirst(l);
2046 12524 : EquivalenceMember *member = (EquivalenceMember *)
2047 12524 : linitial(key->pk_eclass->ec_members);
2048 :
2049 : /*
2050 : * Check if the expression contains Var with "varno 0" so that we
2051 : * don't call estimate_num_groups in that case.
2052 : */
2053 12524 : if (bms_is_member(0, pull_varnos(root, (Node *) member->em_expr)))
2054 : {
2055 10 : unknown_varno = true;
2056 10 : break;
2057 : }
2058 :
2059 : /* expression not containing any Vars with "varno 0" */
2060 12514 : presortedExprs = lappend(presortedExprs, member->em_expr);
2061 :
2062 12514 : if (foreach_current_index(l) + 1 >= presorted_keys)
2063 12418 : break;
2064 : }
2065 :
2066 : /* Estimate the number of groups with equal presorted keys. */
2067 12428 : if (!unknown_varno)
2068 12418 : input_groups = estimate_num_groups(root, presortedExprs, input_tuples,
2069 : NULL, NULL);
2070 :
2071 12428 : group_tuples = input_tuples / input_groups;
2072 12428 : group_input_run_cost = input_run_cost / input_groups;
2073 :
2074 : /*
2075 : * Estimate the average cost of sorting of one group where presorted keys
2076 : * are equal.
2077 : */
2078 12428 : cost_tuplesort(&group_startup_cost, &group_run_cost,
2079 : group_tuples, width, comparison_cost, sort_mem,
2080 : limit_tuples);
2081 :
2082 : /*
2083 : * Startup cost of incremental sort is the startup cost of its first group
2084 : * plus the cost of its input.
2085 : */
2086 12428 : startup_cost = group_startup_cost + input_startup_cost +
2087 : group_input_run_cost;
2088 :
2089 : /*
2090 : * After we started producing tuples from the first group, the cost of
2091 : * producing all the tuples is given by the cost to finish processing this
2092 : * group, plus the total cost to process the remaining groups, plus the
2093 : * remaining cost of input.
2094 : */
2095 12428 : run_cost = group_run_cost + (group_run_cost + group_startup_cost) *
2096 12428 : (input_groups - 1) + group_input_run_cost * (input_groups - 1);
2097 :
2098 : /*
2099 : * Incremental sort adds some overhead by itself. Firstly, it has to
2100 : * detect the sort groups. This is roughly equal to one extra copy and
2101 : * comparison per tuple.
2102 : */
2103 12428 : run_cost += (cpu_tuple_cost + comparison_cost) * input_tuples;
2104 :
2105 : /*
2106 : * Additionally, we charge double cpu_tuple_cost for each input group to
2107 : * account for the tuplesort_reset that's performed after each group.
2108 : */
2109 12428 : run_cost += 2.0 * cpu_tuple_cost * input_groups;
2110 :
2111 12428 : path->rows = input_tuples;
2112 :
2113 : /* should not generate these paths when enable_incremental_sort=false */
2114 : Assert(enable_incremental_sort);
2115 12428 : path->disabled_nodes = input_disabled_nodes;
2116 :
2117 12428 : path->startup_cost = startup_cost;
2118 12428 : path->total_cost = startup_cost + run_cost;
2119 12428 : }
2120 :
2121 : /*
2122 : * cost_sort
2123 : * Determines and returns the cost of sorting a relation, including
2124 : * the cost of reading the input data.
2125 : *
2126 : * NOTE: some callers currently pass NIL for pathkeys because they
2127 : * can't conveniently supply the sort keys. Since this routine doesn't
2128 : * currently do anything with pathkeys anyway, that doesn't matter...
2129 : * but if it ever does, it should react gracefully to lack of key data.
2130 : * (Actually, the thing we'd most likely be interested in is just the number
2131 : * of sort keys, which all callers *could* supply.)
2132 : */
2133 : void
2134 2087320 : cost_sort(Path *path, PlannerInfo *root,
2135 : List *pathkeys, int input_disabled_nodes,
2136 : Cost input_cost, double tuples, int width,
2137 : Cost comparison_cost, int sort_mem,
2138 : double limit_tuples)
2139 :
2140 : {
2141 : Cost startup_cost;
2142 : Cost run_cost;
2143 :
2144 2087320 : cost_tuplesort(&startup_cost, &run_cost,
2145 : tuples, width,
2146 : comparison_cost, sort_mem,
2147 : limit_tuples);
2148 :
2149 2087320 : startup_cost += input_cost;
2150 :
2151 2087320 : path->rows = tuples;
2152 2087320 : path->disabled_nodes = input_disabled_nodes + (enable_sort ? 0 : 1);
2153 2087320 : path->startup_cost = startup_cost;
2154 2087320 : path->total_cost = startup_cost + run_cost;
2155 2087320 : }
2156 :
2157 : /*
2158 : * append_nonpartial_cost
2159 : * Estimate the cost of the non-partial paths in a Parallel Append.
2160 : * The non-partial paths are assumed to be the first "numpaths" paths
2161 : * from the subpaths list, and to be in order of decreasing cost.
2162 : */
2163 : static Cost
2164 26124 : append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
2165 : {
2166 : Cost *costarr;
2167 : int arrlen;
2168 : ListCell *l;
2169 : ListCell *cell;
2170 : int path_index;
2171 : int min_index;
2172 : int max_index;
2173 :
2174 26124 : if (numpaths == 0)
2175 21076 : return 0;
2176 :
2177 : /*
2178 : * Array length is number of workers or number of relevant paths,
2179 : * whichever is less.
2180 : */
2181 5048 : arrlen = Min(parallel_workers, numpaths);
2182 5048 : costarr = palloc_array(Cost, arrlen);
2183 :
2184 : /* The first few paths will each be claimed by a different worker. */
2185 5048 : path_index = 0;
2186 14670 : foreach(cell, subpaths)
2187 : {
2188 10922 : Path *subpath = (Path *) lfirst(cell);
2189 :
2190 10922 : if (path_index == arrlen)
2191 1300 : break;
2192 9622 : costarr[path_index++] = subpath->total_cost;
2193 : }
2194 :
2195 : /*
2196 : * Since subpaths are sorted by decreasing cost, the last one will have
2197 : * the minimum cost.
2198 : */
2199 5048 : min_index = arrlen - 1;
2200 :
2201 : /*
2202 : * For each of the remaining subpaths, add its cost to the array element
2203 : * with minimum cost.
2204 : */
2205 9778 : for_each_cell(l, subpaths, cell)
2206 : {
2207 5276 : Path *subpath = (Path *) lfirst(l);
2208 :
2209 : /* Consider only the non-partial paths */
2210 5276 : if (path_index++ == numpaths)
2211 546 : break;
2212 :
2213 4730 : costarr[min_index] += subpath->total_cost;
2214 :
2215 : /* Update the new min cost array index */
2216 4730 : min_index = 0;
2217 14226 : for (int i = 0; i < arrlen; i++)
2218 : {
2219 9496 : if (costarr[i] < costarr[min_index])
2220 1534 : min_index = i;
2221 : }
2222 : }
2223 :
2224 : /* Return the highest cost from the array */
2225 5048 : max_index = 0;
2226 14670 : for (int i = 0; i < arrlen; i++)
2227 : {
2228 9622 : if (costarr[i] > costarr[max_index])
2229 410 : max_index = i;
2230 : }
2231 :
2232 5048 : return costarr[max_index];
2233 : }
2234 :
2235 : /*
2236 : * cost_append
2237 : * Determines and returns the cost of an Append node.
2238 : */
2239 : void
2240 70830 : cost_append(AppendPath *apath, PlannerInfo *root)
2241 : {
2242 : ListCell *l;
2243 :
2244 70830 : apath->path.disabled_nodes = 0;
2245 70830 : apath->path.startup_cost = 0;
2246 70830 : apath->path.total_cost = 0;
2247 70830 : apath->path.rows = 0;
2248 :
2249 70830 : if (apath->subpaths == NIL)
2250 2018 : return;
2251 :
2252 68812 : if (!apath->path.parallel_aware)
2253 : {
2254 42688 : List *pathkeys = apath->path.pathkeys;
2255 :
2256 42688 : if (pathkeys == NIL)
2257 : {
2258 40544 : Path *firstsubpath = (Path *) linitial(apath->subpaths);
2259 :
2260 : /*
2261 : * For an unordered, non-parallel-aware Append we take the startup
2262 : * cost as the startup cost of the first subpath.
2263 : */
2264 40544 : apath->path.startup_cost = firstsubpath->startup_cost;
2265 :
2266 : /*
2267 : * Compute rows, number of disabled nodes, and total cost as sums
2268 : * of underlying subplan values.
2269 : */
2270 158676 : foreach(l, apath->subpaths)
2271 : {
2272 118132 : Path *subpath = (Path *) lfirst(l);
2273 :
2274 118132 : apath->path.rows += subpath->rows;
2275 118132 : apath->path.disabled_nodes += subpath->disabled_nodes;
2276 118132 : apath->path.total_cost += subpath->total_cost;
2277 : }
2278 : }
2279 : else
2280 : {
2281 : /*
2282 : * For an ordered, non-parallel-aware Append we take the startup
2283 : * cost as the sum of the subpath startup costs. This ensures
2284 : * that we don't underestimate the startup cost when a query's
2285 : * LIMIT is such that several of the children have to be run to
2286 : * satisfy it. This might be overkill --- another plausible hack
2287 : * would be to take the Append's startup cost as the maximum of
2288 : * the child startup costs. But we don't want to risk believing
2289 : * that an ORDER BY LIMIT query can be satisfied at small cost
2290 : * when the first child has small startup cost but later ones
2291 : * don't. (If we had the ability to deal with nonlinear cost
2292 : * interpolation for partial retrievals, we would not need to be
2293 : * so conservative about this.)
2294 : *
2295 : * This case is also different from the above in that we have to
2296 : * account for possibly injecting sorts into subpaths that aren't
2297 : * natively ordered.
2298 : */
2299 8340 : foreach(l, apath->subpaths)
2300 : {
2301 6196 : Path *subpath = (Path *) lfirst(l);
2302 : int presorted_keys;
2303 : Path sort_path; /* dummy for result of
2304 : * cost_sort/cost_incremental_sort */
2305 :
2306 6196 : if (!pathkeys_count_contained_in(pathkeys, subpath->pathkeys,
2307 : &presorted_keys))
2308 : {
2309 : /*
2310 : * We'll need to insert a Sort node, so include costs for
2311 : * that. We choose to use incremental sort if it is
2312 : * enabled and there are presorted keys; otherwise we use
2313 : * full sort.
2314 : *
2315 : * We can use the parent's LIMIT if any, since we
2316 : * certainly won't pull more than that many tuples from
2317 : * any child.
2318 : */
2319 44 : if (enable_incremental_sort && presorted_keys > 0)
2320 : {
2321 12 : cost_incremental_sort(&sort_path,
2322 : root,
2323 : pathkeys,
2324 : presorted_keys,
2325 : subpath->disabled_nodes,
2326 : subpath->startup_cost,
2327 : subpath->total_cost,
2328 : subpath->rows,
2329 12 : subpath->pathtarget->width,
2330 : 0.0,
2331 : work_mem,
2332 : apath->limit_tuples);
2333 : }
2334 : else
2335 : {
2336 32 : cost_sort(&sort_path,
2337 : root,
2338 : pathkeys,
2339 : subpath->disabled_nodes,
2340 : subpath->total_cost,
2341 : subpath->rows,
2342 32 : subpath->pathtarget->width,
2343 : 0.0,
2344 : work_mem,
2345 : apath->limit_tuples);
2346 : }
2347 :
2348 44 : subpath = &sort_path;
2349 : }
2350 :
2351 6196 : apath->path.rows += subpath->rows;
2352 6196 : apath->path.disabled_nodes += subpath->disabled_nodes;
2353 6196 : apath->path.startup_cost += subpath->startup_cost;
2354 6196 : apath->path.total_cost += subpath->total_cost;
2355 : }
2356 : }
2357 : }
2358 : else /* parallel-aware */
2359 : {
2360 26124 : int i = 0;
2361 26124 : double parallel_divisor = get_parallel_divisor(&apath->path);
2362 :
2363 : /* Parallel-aware Append never produces ordered output. */
2364 : Assert(apath->path.pathkeys == NIL);
2365 :
2366 : /* Calculate startup cost. */
2367 103566 : foreach(l, apath->subpaths)
2368 : {
2369 77442 : Path *subpath = (Path *) lfirst(l);
2370 :
2371 : /*
2372 : * Append will start returning tuples when the child node having
2373 : * lowest startup cost is done setting up. We consider only the
2374 : * first few subplans that immediately get a worker assigned.
2375 : */
2376 77442 : if (i == 0)
2377 26124 : apath->path.startup_cost = subpath->startup_cost;
2378 51318 : else if (i < apath->path.parallel_workers)
2379 25548 : apath->path.startup_cost = Min(apath->path.startup_cost,
2380 : subpath->startup_cost);
2381 :
2382 : /*
2383 : * Apply parallel divisor to subpaths. Scale the number of rows
2384 : * for each partial subpath based on the ratio of the parallel
2385 : * divisor originally used for the subpath to the one we adopted.
2386 : * Also add the cost of partial paths to the total cost, but
2387 : * ignore non-partial paths for now.
2388 : */
2389 77442 : if (i < apath->first_partial_path)
2390 14352 : apath->path.rows += subpath->rows / parallel_divisor;
2391 : else
2392 : {
2393 : double subpath_parallel_divisor;
2394 :
2395 63090 : subpath_parallel_divisor = get_parallel_divisor(subpath);
2396 63090 : apath->path.rows += subpath->rows * (subpath_parallel_divisor /
2397 : parallel_divisor);
2398 63090 : apath->path.total_cost += subpath->total_cost;
2399 : }
2400 :
2401 77442 : apath->path.disabled_nodes += subpath->disabled_nodes;
2402 77442 : apath->path.rows = clamp_row_est(apath->path.rows);
2403 :
2404 77442 : i++;
2405 : }
2406 :
2407 : /* Add cost for non-partial subpaths. */
2408 26124 : apath->path.total_cost +=
2409 26124 : append_nonpartial_cost(apath->subpaths,
2410 : apath->first_partial_path,
2411 : apath->path.parallel_workers);
2412 : }
2413 :
2414 : /*
2415 : * Although Append does not do any selection or projection, it's not free;
2416 : * add a small per-tuple overhead.
2417 : */
2418 68812 : apath->path.total_cost +=
2419 68812 : cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * apath->path.rows;
2420 : }
2421 :
2422 : /*
2423 : * cost_merge_append
2424 : * Determines and returns the cost of a MergeAppend node.
2425 : *
2426 : * MergeAppend merges several pre-sorted input streams, using a heap that
2427 : * at any given instant holds the next tuple from each stream. If there
2428 : * are N streams, we need about N*log2(N) tuple comparisons to construct
2429 : * the heap at startup, and then for each output tuple, about log2(N)
2430 : * comparisons to replace the top entry.
2431 : *
2432 : * (The effective value of N will drop once some of the input streams are
2433 : * exhausted, but it seems unlikely to be worth trying to account for that.)
2434 : *
2435 : * The heap is never spilled to disk, since we assume N is not very large.
2436 : * So this is much simpler than cost_sort.
2437 : *
2438 : * As in cost_sort, we charge two operator evals per tuple comparison.
2439 : *
2440 : * 'pathkeys' is a list of sort keys
2441 : * 'n_streams' is the number of input streams
2442 : * 'input_disabled_nodes' is the sum of the input streams' disabled node counts
2443 : * 'input_startup_cost' is the sum of the input streams' startup costs
2444 : * 'input_total_cost' is the sum of the input streams' total costs
2445 : * 'tuples' is the number of tuples in all the streams
2446 : */
2447 : void
2448 9900 : cost_merge_append(Path *path, PlannerInfo *root,
2449 : List *pathkeys, int n_streams,
2450 : int input_disabled_nodes,
2451 : Cost input_startup_cost, Cost input_total_cost,
2452 : double tuples)
2453 : {
2454 9900 : Cost startup_cost = 0;
2455 9900 : Cost run_cost = 0;
2456 : Cost comparison_cost;
2457 : double N;
2458 : double logN;
2459 :
2460 : /*
2461 : * Avoid log(0)...
2462 : */
2463 9900 : N = (n_streams < 2) ? 2.0 : (double) n_streams;
2464 9900 : logN = LOG2(N);
2465 :
2466 : /* Assumed cost per tuple comparison */
2467 9900 : comparison_cost = 2.0 * cpu_operator_cost;
2468 :
2469 : /* Heap creation cost */
2470 9900 : startup_cost += comparison_cost * N * logN;
2471 :
2472 : /* Per-tuple heap maintenance cost */
2473 9900 : run_cost += tuples * comparison_cost * logN;
2474 :
2475 : /*
2476 : * Although MergeAppend does not do any selection or projection, it's not
2477 : * free; add a small per-tuple overhead.
2478 : */
2479 9900 : run_cost += cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * tuples;
2480 :
2481 9900 : path->disabled_nodes = input_disabled_nodes;
2482 9900 : path->startup_cost = startup_cost + input_startup_cost;
2483 9900 : path->total_cost = startup_cost + run_cost + input_total_cost;
2484 9900 : }
2485 :
2486 : /*
2487 : * cost_material
2488 : * Determines and returns the cost of materializing a relation, including
2489 : * the cost of reading the input data.
2490 : *
2491 : * If the total volume of data to materialize exceeds work_mem, we will need
2492 : * to write it to disk, so the cost is much higher in that case.
2493 : *
2494 : * Note that here we are estimating the costs for the first scan of the
2495 : * relation, so the materialization is all overhead --- any savings will
2496 : * occur only on rescan, which is estimated in cost_rescan.
2497 : */
2498 : void
2499 679892 : cost_material(Path *path,
2500 : int input_disabled_nodes,
2501 : Cost input_startup_cost, Cost input_total_cost,
2502 : double tuples, int width)
2503 : {
2504 679892 : Cost startup_cost = input_startup_cost;
2505 679892 : Cost run_cost = input_total_cost - input_startup_cost;
2506 679892 : double nbytes = relation_byte_size(tuples, width);
2507 679892 : double work_mem_bytes = work_mem * (Size) 1024;
2508 :
2509 679892 : path->rows = tuples;
2510 :
2511 : /*
2512 : * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
2513 : * reflect bookkeeping overhead. (This rate must be more than what
2514 : * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
2515 : * if it is exactly the same then there will be a cost tie between
2516 : * nestloop with A outer, materialized B inner and nestloop with B outer,
2517 : * materialized A inner. The extra cost ensures we'll prefer
2518 : * materializing the smaller rel.) Note that this is normally a good deal
2519 : * less than cpu_tuple_cost; which is OK because a Material plan node
2520 : * doesn't do qual-checking or projection, so it's got less overhead than
2521 : * most plan nodes.
2522 : */
2523 679892 : run_cost += 2 * cpu_operator_cost * tuples;
2524 :
2525 : /*
2526 : * If we will spill to disk, charge at the rate of seq_page_cost per page.
2527 : * This cost is assumed to be evenly spread through the plan run phase,
2528 : * which isn't exactly accurate but our cost model doesn't allow for
2529 : * nonuniform costs within the run phase.
2530 : */
2531 679892 : if (nbytes > work_mem_bytes)
2532 : {
2533 4978 : double npages = ceil(nbytes / BLCKSZ);
2534 :
2535 4978 : run_cost += seq_page_cost * npages;
2536 : }
2537 :
2538 679892 : path->disabled_nodes = input_disabled_nodes + (enable_material ? 0 : 1);
2539 679892 : path->startup_cost = startup_cost;
2540 679892 : path->total_cost = startup_cost + run_cost;
2541 679892 : }
2542 :
2543 : /*
2544 : * cost_memoize_rescan
2545 : * Determines the estimated cost of rescanning a Memoize node.
2546 : *
2547 : * In order to estimate this, we must gain knowledge of how often we expect to
2548 : * be called and how many distinct sets of parameters we are likely to be
2549 : * called with. If we expect a good cache hit ratio, then we can set our
2550 : * costs to account for that hit ratio, plus a little bit of cost for the
2551 : * caching itself. Caching will not work out well if we expect to be called
2552 : * with too many distinct parameter values. The worst-case here is that we
2553 : * never see any parameter value twice, in which case we'd never get a cache
2554 : * hit and caching would be a complete waste of effort.
2555 : */
2556 : static void
2557 295294 : cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
2558 : Cost *rescan_startup_cost, Cost *rescan_total_cost)
2559 : {
2560 : EstimationInfo estinfo;
2561 : ListCell *lc;
2562 295294 : Cost input_startup_cost = mpath->subpath->startup_cost;
2563 295294 : Cost input_total_cost = mpath->subpath->total_cost;
2564 295294 : double tuples = mpath->subpath->rows;
2565 295294 : Cardinality est_calls = mpath->est_calls;
2566 295294 : int width = mpath->subpath->pathtarget->width;
2567 :
2568 : double hash_mem_bytes;
2569 : double est_entry_bytes;
2570 : Cardinality est_cache_entries;
2571 : Cardinality ndistinct;
2572 : double evict_ratio;
2573 : double hit_ratio;
2574 : Cost startup_cost;
2575 : Cost total_cost;
2576 :
2577 : /* available cache space */
2578 295294 : hash_mem_bytes = get_hash_memory_limit();
2579 :
2580 : /*
2581 : * Set the number of bytes each cache entry should consume in the cache.
2582 : * To provide us with better estimations on how many cache entries we can
2583 : * store at once, we make a call to the executor here to ask it what
2584 : * memory overheads there are for a single cache entry.
2585 : */
2586 295294 : est_entry_bytes = relation_byte_size(tuples, width) +
2587 295294 : ExecEstimateCacheEntryOverheadBytes(tuples);
2588 :
2589 : /* include the estimated width for the cache keys */
2590 629074 : foreach(lc, mpath->param_exprs)
2591 333780 : est_entry_bytes += get_expr_width(root, (Node *) lfirst(lc));
2592 :
2593 : /* estimate on the upper limit of cache entries we can hold at once */
2594 295294 : est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
2595 :
2596 : /* estimate on the distinct number of parameter values */
2597 295294 : ndistinct = estimate_num_groups(root, mpath->param_exprs, est_calls, NULL,
2598 : &estinfo);
2599 :
2600 : /*
2601 : * When the estimation fell back on using a default value, it's a bit too
2602 : * risky to assume that it's ok to use a Memoize node. The use of a
2603 : * default could cause us to use a Memoize node when it's really
2604 : * inappropriate to do so. If we see that this has been done, then we'll
2605 : * assume that every call will have unique parameters, which will almost
2606 : * certainly mean a MemoizePath will never survive add_path().
2607 : */
2608 295294 : if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
2609 17940 : ndistinct = est_calls;
2610 :
2611 : /* Remember the ndistinct estimate for EXPLAIN */
2612 295294 : mpath->est_unique_keys = ndistinct;
2613 :
2614 : /*
2615 : * Since we've already estimated the maximum number of entries we can
2616 : * store at once and know the estimated number of distinct values we'll be
2617 : * called with, we'll take this opportunity to set the path's est_entries.
2618 : * This will ultimately determine the hash table size that the executor
2619 : * will use. If we leave this at zero, the executor will just choose the
2620 : * size itself. Really this is not the right place to do this, but it's
2621 : * convenient since everything is already calculated.
2622 : */
2623 295294 : mpath->est_entries = Min(Min(ndistinct, est_cache_entries),
2624 : PG_UINT32_MAX);
2625 :
2626 : /*
2627 : * When the number of distinct parameter values is above the amount we can
2628 : * store in the cache, then we'll have to evict some entries from the
2629 : * cache. This is not free. Here we estimate how often we'll incur the
2630 : * cost of that eviction.
2631 : */
2632 295294 : evict_ratio = 1.0 - Min(est_cache_entries, ndistinct) / ndistinct;
2633 :
2634 : /*
2635 : * In order to estimate how costly a single scan will be, we need to
2636 : * attempt to estimate what the cache hit ratio will be. To do that we
2637 : * must look at how many scans are estimated in total for this node and
2638 : * how many of those scans we expect to get a cache hit.
2639 : */
2640 590588 : hit_ratio = ((est_calls - ndistinct) / est_calls) *
2641 295294 : (est_cache_entries / Max(ndistinct, est_cache_entries));
2642 :
2643 : /* Remember the hit ratio estimate for EXPLAIN */
2644 295294 : mpath->est_hit_ratio = hit_ratio;
2645 :
2646 : Assert(hit_ratio >= 0 && hit_ratio <= 1.0);
2647 :
2648 : /*
2649 : * Set the total_cost accounting for the expected cache hit ratio. We
2650 : * also add on a cpu_operator_cost to account for a cache lookup. This
2651 : * will happen regardless of whether it's a cache hit or not.
2652 : */
2653 295294 : total_cost = input_total_cost * (1.0 - hit_ratio) + cpu_operator_cost;
2654 :
2655 : /* Now adjust the total cost to account for cache evictions */
2656 :
2657 : /* Charge a cpu_tuple_cost for evicting the actual cache entry */
2658 295294 : total_cost += cpu_tuple_cost * evict_ratio;
2659 :
2660 : /*
2661 : * Charge a 10th of cpu_operator_cost to evict every tuple in that entry.
2662 : * The per-tuple eviction is really just a pfree, so charging a whole
2663 : * cpu_operator_cost seems a little excessive.
2664 : */
2665 295294 : total_cost += cpu_operator_cost / 10.0 * evict_ratio * tuples;
2666 :
2667 : /*
2668 : * Now adjust for storing things in the cache, since that's not free
2669 : * either. Everything must go in the cache. We don't proportion this
2670 : * over any ratio, just apply it once for the scan. We charge a
2671 : * cpu_tuple_cost for the creation of the cache entry and also a
2672 : * cpu_operator_cost for each tuple we expect to cache.
2673 : */
2674 295294 : total_cost += cpu_tuple_cost + cpu_operator_cost * tuples;
2675 :
2676 : /*
2677 : * Getting the first row must be also be proportioned according to the
2678 : * expected cache hit ratio.
2679 : */
2680 295294 : startup_cost = input_startup_cost * (1.0 - hit_ratio);
2681 :
2682 : /*
2683 : * Additionally we charge a cpu_tuple_cost to account for cache lookups,
2684 : * which we'll do regardless of whether it was a cache hit or not.
2685 : */
2686 295294 : startup_cost += cpu_tuple_cost;
2687 :
2688 295294 : *rescan_startup_cost = startup_cost;
2689 295294 : *rescan_total_cost = total_cost;
2690 295294 : }
2691 :
2692 : /*
2693 : * cost_agg
2694 : * Determines and returns the cost of performing an Agg plan node,
2695 : * including the cost of its input.
2696 : *
2697 : * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
2698 : * we are using a hashed Agg node just to do grouping).
2699 : *
2700 : * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
2701 : * are for appropriately-sorted input.
2702 : */
2703 : void
2704 92908 : cost_agg(Path *path, PlannerInfo *root,
2705 : AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
2706 : int numGroupCols, double numGroups,
2707 : List *quals,
2708 : int disabled_nodes,
2709 : Cost input_startup_cost, Cost input_total_cost,
2710 : double input_tuples, double input_width)
2711 : {
2712 : double output_tuples;
2713 : Cost startup_cost;
2714 : Cost total_cost;
2715 92908 : const AggClauseCosts dummy_aggcosts = {0};
2716 :
2717 : /* Use all-zero per-aggregate costs if NULL is passed */
2718 92908 : if (aggcosts == NULL)
2719 : {
2720 : Assert(aggstrategy == AGG_HASHED);
2721 19230 : aggcosts = &dummy_aggcosts;
2722 : }
2723 :
2724 : /*
2725 : * The transCost.per_tuple component of aggcosts should be charged once
2726 : * per input tuple, corresponding to the costs of evaluating the aggregate
2727 : * transfns and their input expressions. The finalCost.per_tuple component
2728 : * is charged once per output tuple, corresponding to the costs of
2729 : * evaluating the finalfns. Startup costs are of course charged but once.
2730 : *
2731 : * If we are grouping, we charge an additional cpu_operator_cost per
2732 : * grouping column per input tuple for grouping comparisons.
2733 : *
2734 : * We will produce a single output tuple if not grouping, and a tuple per
2735 : * group otherwise. We charge cpu_tuple_cost for each output tuple.
2736 : *
2737 : * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
2738 : * same total CPU cost, but AGG_SORTED has lower startup cost. If the
2739 : * input path is already sorted appropriately, AGG_SORTED should be
2740 : * preferred (since it has no risk of memory overflow). This will happen
2741 : * as long as the computed total costs are indeed exactly equal --- but if
2742 : * there's roundoff error we might do the wrong thing. So be sure that
2743 : * the computations below form the same intermediate values in the same
2744 : * order.
2745 : */
2746 92908 : if (aggstrategy == AGG_PLAIN)
2747 : {
2748 43198 : startup_cost = input_total_cost;
2749 43198 : startup_cost += aggcosts->transCost.startup;
2750 43198 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2751 43198 : startup_cost += aggcosts->finalCost.startup;
2752 43198 : startup_cost += aggcosts->finalCost.per_tuple;
2753 : /* we aren't grouping */
2754 43198 : total_cost = startup_cost + cpu_tuple_cost;
2755 43198 : output_tuples = 1;
2756 : }
2757 49710 : else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
2758 : {
2759 : /* Here we are able to deliver output on-the-fly */
2760 17958 : startup_cost = input_startup_cost;
2761 17958 : total_cost = input_total_cost;
2762 17958 : if (aggstrategy == AGG_MIXED && !enable_hashagg)
2763 480 : ++disabled_nodes;
2764 : /* calcs phrased this way to match HASHED case, see note above */
2765 17958 : total_cost += aggcosts->transCost.startup;
2766 17958 : total_cost += aggcosts->transCost.per_tuple * input_tuples;
2767 17958 : total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2768 17958 : total_cost += aggcosts->finalCost.startup;
2769 17958 : total_cost += aggcosts->finalCost.per_tuple * numGroups;
2770 17958 : total_cost += cpu_tuple_cost * numGroups;
2771 17958 : output_tuples = numGroups;
2772 : }
2773 : else
2774 : {
2775 : /* must be AGG_HASHED */
2776 31752 : startup_cost = input_total_cost;
2777 31752 : if (!enable_hashagg)
2778 1866 : ++disabled_nodes;
2779 31752 : startup_cost += aggcosts->transCost.startup;
2780 31752 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2781 : /* cost of computing hash value */
2782 31752 : startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2783 31752 : startup_cost += aggcosts->finalCost.startup;
2784 :
2785 31752 : total_cost = startup_cost;
2786 31752 : total_cost += aggcosts->finalCost.per_tuple * numGroups;
2787 : /* cost of retrieving from hash table */
2788 31752 : total_cost += cpu_tuple_cost * numGroups;
2789 31752 : output_tuples = numGroups;
2790 : }
2791 :
2792 : /*
2793 : * Add the disk costs of hash aggregation that spills to disk.
2794 : *
2795 : * Groups that go into the hash table stay in memory until finalized, so
2796 : * spilling and reprocessing tuples doesn't incur additional invocations
2797 : * of transCost or finalCost. Furthermore, the computed hash value is
2798 : * stored with the spilled tuples, so we don't incur extra invocations of
2799 : * the hash function.
2800 : *
2801 : * Hash Agg begins returning tuples after the first batch is complete.
2802 : * Accrue writes (spilled tuples) to startup_cost and to total_cost;
2803 : * accrue reads only to total_cost.
2804 : */
2805 92908 : if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
2806 : {
2807 : double pages;
2808 32698 : double pages_written = 0.0;
2809 32698 : double pages_read = 0.0;
2810 : double spill_cost;
2811 : double hashentrysize;
2812 : double nbatches;
2813 : Size mem_limit;
2814 : uint64 ngroups_limit;
2815 : int num_partitions;
2816 : int depth;
2817 :
2818 : /*
2819 : * Estimate number of batches based on the computed limits. If less
2820 : * than or equal to one, all groups are expected to fit in memory;
2821 : * otherwise we expect to spill.
2822 : */
2823 32698 : hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
2824 : input_width,
2825 32698 : aggcosts->transitionSpace);
2826 32698 : hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
2827 : &ngroups_limit, &num_partitions);
2828 :
2829 32698 : nbatches = Max((numGroups * hashentrysize) / mem_limit,
2830 : numGroups / ngroups_limit);
2831 :
2832 32698 : nbatches = Max(ceil(nbatches), 1.0);
2833 32698 : num_partitions = Max(num_partitions, 2);
2834 :
2835 : /*
2836 : * The number of partitions can change at different levels of
2837 : * recursion; but for the purposes of this calculation assume it stays
2838 : * constant.
2839 : */
2840 32698 : depth = ceil(log(nbatches) / log(num_partitions));
2841 :
2842 : /*
2843 : * Estimate number of pages read and written. For each level of
2844 : * recursion, a tuple must be written and then later read.
2845 : */
2846 32698 : pages = relation_byte_size(input_tuples, input_width) / BLCKSZ;
2847 32698 : pages_written = pages_read = pages * depth;
2848 :
2849 : /*
2850 : * HashAgg has somewhat worse IO behavior than Sort on typical
2851 : * hardware/OS combinations. Account for this with a generic penalty.
2852 : */
2853 32698 : pages_read *= 2.0;
2854 32698 : pages_written *= 2.0;
2855 :
2856 32698 : startup_cost += pages_written * random_page_cost;
2857 32698 : total_cost += pages_written * random_page_cost;
2858 32698 : total_cost += pages_read * seq_page_cost;
2859 :
2860 : /* account for CPU cost of spilling a tuple and reading it back */
2861 32698 : spill_cost = depth * input_tuples * 2.0 * cpu_tuple_cost;
2862 32698 : startup_cost += spill_cost;
2863 32698 : total_cost += spill_cost;
2864 : }
2865 :
2866 : /*
2867 : * If there are quals (HAVING quals), account for their cost and
2868 : * selectivity.
2869 : */
2870 92908 : if (quals)
2871 : {
2872 : QualCost qual_cost;
2873 :
2874 4620 : cost_qual_eval(&qual_cost, quals, root);
2875 4620 : startup_cost += qual_cost.startup;
2876 4620 : total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
2877 :
2878 4620 : output_tuples = clamp_row_est(output_tuples *
2879 4620 : clauselist_selectivity(root,
2880 : quals,
2881 : 0,
2882 : JOIN_INNER,
2883 : NULL));
2884 : }
2885 :
2886 92908 : path->rows = output_tuples;
2887 92908 : path->disabled_nodes = disabled_nodes;
2888 92908 : path->startup_cost = startup_cost;
2889 92908 : path->total_cost = total_cost;
2890 92908 : }
2891 :
2892 : /*
2893 : * get_windowclause_startup_tuples
2894 : * Estimate how many tuples we'll need to fetch from a WindowAgg's
2895 : * subnode before we can output the first WindowAgg tuple.
2896 : *
2897 : * How many tuples need to be read depends on the WindowClause. For example,
2898 : * a WindowClause with no PARTITION BY and no ORDER BY requires that all
2899 : * subnode tuples are read and aggregated before the WindowAgg can output
2900 : * anything. If there's a PARTITION BY, then we only need to look at tuples
2901 : * in the first partition. Here we attempt to estimate just how many
2902 : * 'input_tuples' the WindowAgg will need to read for the given WindowClause
2903 : * before the first tuple can be output.
2904 : */
2905 : static double
2906 2976 : get_windowclause_startup_tuples(PlannerInfo *root, WindowClause *wc,
2907 : double input_tuples)
2908 : {
2909 2976 : int frameOptions = wc->frameOptions;
2910 : double partition_tuples;
2911 : double return_tuples;
2912 : double peer_tuples;
2913 :
2914 : /*
2915 : * First, figure out how many partitions there are likely to be and set
2916 : * partition_tuples according to that estimate.
2917 : */
2918 2976 : if (wc->partitionClause != NIL)
2919 : {
2920 : double num_partitions;
2921 734 : List *partexprs = get_sortgrouplist_exprs(wc->partitionClause,
2922 734 : root->parse->targetList);
2923 :
2924 734 : num_partitions = estimate_num_groups(root, partexprs, input_tuples,
2925 : NULL, NULL);
2926 734 : list_free(partexprs);
2927 :
2928 734 : partition_tuples = input_tuples / num_partitions;
2929 : }
2930 : else
2931 : {
2932 : /* all tuples belong to the same partition */
2933 2242 : partition_tuples = input_tuples;
2934 : }
2935 :
2936 : /* estimate the number of tuples in each peer group */
2937 2976 : if (wc->orderClause != NIL)
2938 : {
2939 : double num_groups;
2940 : List *orderexprs;
2941 :
2942 2370 : orderexprs = get_sortgrouplist_exprs(wc->orderClause,
2943 2370 : root->parse->targetList);
2944 :
2945 : /* estimate out how many peer groups there are in the partition */
2946 2370 : num_groups = estimate_num_groups(root, orderexprs,
2947 : partition_tuples, NULL,
2948 : NULL);
2949 2370 : list_free(orderexprs);
2950 2370 : peer_tuples = partition_tuples / num_groups;
2951 : }
2952 : else
2953 : {
2954 : /* no ORDER BY so only 1 tuple belongs in each peer group */
2955 606 : peer_tuples = 1.0;
2956 : }
2957 :
2958 2976 : if (frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING)
2959 : {
2960 : /* include all partition rows */
2961 364 : return_tuples = partition_tuples;
2962 : }
2963 2612 : else if (frameOptions & FRAMEOPTION_END_CURRENT_ROW)
2964 : {
2965 1574 : if (frameOptions & FRAMEOPTION_ROWS)
2966 : {
2967 : /* just count the current row */
2968 722 : return_tuples = 1.0;
2969 : }
2970 852 : else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
2971 : {
2972 : /*
2973 : * When in RANGE/GROUPS mode, it's more complex. If there's no
2974 : * ORDER BY, then all rows in the partition are peers, otherwise
2975 : * we'll need to read the first group of peers.
2976 : */
2977 852 : if (wc->orderClause == NIL)
2978 326 : return_tuples = partition_tuples;
2979 : else
2980 526 : return_tuples = peer_tuples;
2981 : }
2982 : else
2983 : {
2984 : /*
2985 : * Something new we don't support yet? This needs attention.
2986 : * We'll just return 1.0 in the meantime.
2987 : */
2988 : Assert(false);
2989 0 : return_tuples = 1.0;
2990 : }
2991 : }
2992 1038 : else if (frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING)
2993 : {
2994 : /*
2995 : * BETWEEN ... AND N PRECEDING will only need to read the WindowAgg's
2996 : * subnode after N ROWS/RANGES/GROUPS. N can be 0, but not negative,
2997 : * so we'll just assume only the current row needs to be read to fetch
2998 : * the first WindowAgg row.
2999 : */
3000 108 : return_tuples = 1.0;
3001 : }
3002 930 : else if (frameOptions & FRAMEOPTION_END_OFFSET_FOLLOWING)
3003 : {
3004 930 : Const *endOffset = (Const *) wc->endOffset;
3005 : double end_offset_value;
3006 :
3007 : /* try and figure out the value specified in the endOffset. */
3008 930 : if (IsA(endOffset, Const))
3009 : {
3010 930 : if (endOffset->constisnull)
3011 : {
3012 : /*
3013 : * NULLs are not allowed, but currently, there's no code to
3014 : * error out if there's a NULL Const. We'll only discover
3015 : * this during execution. For now, just pretend everything is
3016 : * fine and assume that just the first row/range/group will be
3017 : * needed.
3018 : */
3019 0 : end_offset_value = 1.0;
3020 : }
3021 : else
3022 : {
3023 930 : switch (endOffset->consttype)
3024 : {
3025 24 : case INT2OID:
3026 24 : end_offset_value =
3027 24 : (double) DatumGetInt16(endOffset->constvalue);
3028 24 : break;
3029 132 : case INT4OID:
3030 132 : end_offset_value =
3031 132 : (double) DatumGetInt32(endOffset->constvalue);
3032 132 : break;
3033 432 : case INT8OID:
3034 432 : end_offset_value =
3035 432 : (double) DatumGetInt64(endOffset->constvalue);
3036 432 : break;
3037 342 : default:
3038 342 : end_offset_value =
3039 342 : partition_tuples / peer_tuples *
3040 : DEFAULT_INEQ_SEL;
3041 342 : break;
3042 : }
3043 : }
3044 : }
3045 : else
3046 : {
3047 : /*
3048 : * When the end bound is not a Const, we'll just need to guess. We
3049 : * just make use of DEFAULT_INEQ_SEL.
3050 : */
3051 0 : end_offset_value =
3052 0 : partition_tuples / peer_tuples * DEFAULT_INEQ_SEL;
3053 : }
3054 :
3055 930 : if (frameOptions & FRAMEOPTION_ROWS)
3056 : {
3057 : /* include the N FOLLOWING and the current row */
3058 270 : return_tuples = end_offset_value + 1.0;
3059 : }
3060 660 : else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
3061 : {
3062 : /* include N FOLLOWING ranges/group and the initial range/group */
3063 660 : return_tuples = peer_tuples * (end_offset_value + 1.0);
3064 : }
3065 : else
3066 : {
3067 : /*
3068 : * Something new we don't support yet? This needs attention.
3069 : * We'll just return 1.0 in the meantime.
3070 : */
3071 : Assert(false);
3072 0 : return_tuples = 1.0;
3073 : }
3074 : }
3075 : else
3076 : {
3077 : /*
3078 : * Something new we don't support yet? This needs attention. We'll
3079 : * just return 1.0 in the meantime.
3080 : */
3081 : Assert(false);
3082 0 : return_tuples = 1.0;
3083 : }
3084 :
3085 2976 : if (wc->partitionClause != NIL || wc->orderClause != NIL)
3086 : {
3087 : /*
3088 : * Cap the return value to the estimated partition tuples and account
3089 : * for the extra tuple WindowAgg will need to read to confirm the next
3090 : * tuple does not belong to the same partition or peer group.
3091 : */
3092 2582 : return_tuples = Min(return_tuples + 1.0, partition_tuples);
3093 : }
3094 : else
3095 : {
3096 : /*
3097 : * Cap the return value so it's never higher than the expected tuples
3098 : * in the partition.
3099 : */
3100 394 : return_tuples = Min(return_tuples, partition_tuples);
3101 : }
3102 :
3103 : /*
3104 : * We needn't worry about any EXCLUDE options as those only exclude rows
3105 : * from being aggregated, not from being read from the WindowAgg's
3106 : * subnode.
3107 : */
3108 :
3109 2976 : return clamp_row_est(return_tuples);
3110 : }
3111 :
3112 : /*
3113 : * cost_windowagg
3114 : * Determines and returns the cost of performing a WindowAgg plan node,
3115 : * including the cost of its input.
3116 : *
3117 : * Input is assumed already properly sorted.
3118 : */
3119 : void
3120 2976 : cost_windowagg(Path *path, PlannerInfo *root,
3121 : List *windowFuncs, WindowClause *winclause,
3122 : int input_disabled_nodes,
3123 : Cost input_startup_cost, Cost input_total_cost,
3124 : double input_tuples)
3125 : {
3126 : Cost startup_cost;
3127 : Cost total_cost;
3128 : double startup_tuples;
3129 : int numPartCols;
3130 : int numOrderCols;
3131 : ListCell *lc;
3132 :
3133 2976 : numPartCols = list_length(winclause->partitionClause);
3134 2976 : numOrderCols = list_length(winclause->orderClause);
3135 :
3136 2976 : startup_cost = input_startup_cost;
3137 2976 : total_cost = input_total_cost;
3138 :
3139 : /*
3140 : * Window functions are assumed to cost their stated execution cost, plus
3141 : * the cost of evaluating their input expressions, per tuple. Since they
3142 : * may in fact evaluate their inputs at multiple rows during each cycle,
3143 : * this could be a drastic underestimate; but without a way to know how
3144 : * many rows the window function will fetch, it's hard to do better. In
3145 : * any case, it's a good estimate for all the built-in window functions,
3146 : * so we'll just do this for now.
3147 : */
3148 6822 : foreach(lc, windowFuncs)
3149 : {
3150 3846 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc);
3151 : Cost wfunccost;
3152 : QualCost argcosts;
3153 :
3154 3846 : argcosts.startup = argcosts.per_tuple = 0;
3155 3846 : add_function_cost(root, wfunc->winfnoid, (Node *) wfunc,
3156 : &argcosts);
3157 3846 : startup_cost += argcosts.startup;
3158 3846 : wfunccost = argcosts.per_tuple;
3159 :
3160 : /* also add the input expressions' cost to per-input-row costs */
3161 3846 : cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
3162 3846 : startup_cost += argcosts.startup;
3163 3846 : wfunccost += argcosts.per_tuple;
3164 :
3165 : /*
3166 : * Add the filter's cost to per-input-row costs. XXX We should reduce
3167 : * input expression costs according to filter selectivity.
3168 : */
3169 3846 : cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
3170 3846 : startup_cost += argcosts.startup;
3171 3846 : wfunccost += argcosts.per_tuple;
3172 :
3173 3846 : total_cost += wfunccost * input_tuples;
3174 : }
3175 :
3176 : /*
3177 : * We also charge cpu_operator_cost per grouping column per tuple for
3178 : * grouping comparisons, plus cpu_tuple_cost per tuple for general
3179 : * overhead.
3180 : *
3181 : * XXX this neglects costs of spooling the data to disk when it overflows
3182 : * work_mem. Sooner or later that should get accounted for.
3183 : */
3184 2976 : total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
3185 2976 : total_cost += cpu_tuple_cost * input_tuples;
3186 :
3187 2976 : path->rows = input_tuples;
3188 2976 : path->disabled_nodes = input_disabled_nodes;
3189 2976 : path->startup_cost = startup_cost;
3190 2976 : path->total_cost = total_cost;
3191 :
3192 : /*
3193 : * Also, take into account how many tuples we need to read from the
3194 : * subnode in order to produce the first tuple from the WindowAgg. To do
3195 : * this we proportion the run cost (total cost not including startup cost)
3196 : * over the estimated startup tuples. We already included the startup
3197 : * cost of the subnode, so we only need to do this when the estimated
3198 : * startup tuples is above 1.0.
3199 : */
3200 2976 : startup_tuples = get_windowclause_startup_tuples(root, winclause,
3201 : input_tuples);
3202 :
3203 2976 : if (startup_tuples > 1.0)
3204 2568 : path->startup_cost += (total_cost - startup_cost) / input_tuples *
3205 2568 : (startup_tuples - 1.0);
3206 2976 : }
3207 :
3208 : /*
3209 : * cost_group
3210 : * Determines and returns the cost of performing a Group plan node,
3211 : * including the cost of its input.
3212 : *
3213 : * Note: caller must ensure that input costs are for appropriately-sorted
3214 : * input.
3215 : */
3216 : void
3217 1226 : cost_group(Path *path, PlannerInfo *root,
3218 : int numGroupCols, double numGroups,
3219 : List *quals,
3220 : int input_disabled_nodes,
3221 : Cost input_startup_cost, Cost input_total_cost,
3222 : double input_tuples)
3223 : {
3224 : double output_tuples;
3225 : Cost startup_cost;
3226 : Cost total_cost;
3227 :
3228 1226 : output_tuples = numGroups;
3229 1226 : startup_cost = input_startup_cost;
3230 1226 : total_cost = input_total_cost;
3231 :
3232 : /*
3233 : * Charge one cpu_operator_cost per comparison per input tuple. We assume
3234 : * all columns get compared at most of the tuples.
3235 : */
3236 1226 : total_cost += cpu_operator_cost * input_tuples * numGroupCols;
3237 :
3238 : /*
3239 : * If there are quals (HAVING quals), account for their cost and
3240 : * selectivity.
3241 : */
3242 1226 : if (quals)
3243 : {
3244 : QualCost qual_cost;
3245 :
3246 0 : cost_qual_eval(&qual_cost, quals, root);
3247 0 : startup_cost += qual_cost.startup;
3248 0 : total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
3249 :
3250 0 : output_tuples = clamp_row_est(output_tuples *
3251 0 : clauselist_selectivity(root,
3252 : quals,
3253 : 0,
3254 : JOIN_INNER,
3255 : NULL));
3256 : }
3257 :
3258 1226 : path->rows = output_tuples;
3259 1226 : path->disabled_nodes = input_disabled_nodes;
3260 1226 : path->startup_cost = startup_cost;
3261 1226 : path->total_cost = total_cost;
3262 1226 : }
3263 :
3264 : /*
3265 : * initial_cost_nestloop
3266 : * Preliminary estimate of the cost of a nestloop join path.
3267 : *
3268 : * This must quickly produce lower-bound estimates of the path's startup and
3269 : * total costs. If we are unable to eliminate the proposed path from
3270 : * consideration using the lower bounds, final_cost_nestloop will be called
3271 : * to obtain the final estimates.
3272 : *
3273 : * The exact division of labor between this function and final_cost_nestloop
3274 : * is private to them, and represents a tradeoff between speed of the initial
3275 : * estimate and getting a tight lower bound. We choose to not examine the
3276 : * join quals here, since that's by far the most expensive part of the
3277 : * calculations. The end result is that CPU-cost considerations must be
3278 : * left for the second phase; and for SEMI/ANTI joins, we must also postpone
3279 : * incorporation of the inner path's run cost.
3280 : *
3281 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3282 : * other data to be used by final_cost_nestloop
3283 : * 'jointype' is the type of join to be performed
3284 : * 'outer_path' is the outer input to the join
3285 : * 'inner_path' is the inner input to the join
3286 : * 'extra' contains miscellaneous information about the join
3287 : */
3288 : void
3289 3292804 : initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
3290 : JoinType jointype,
3291 : Path *outer_path, Path *inner_path,
3292 : JoinPathExtraData *extra)
3293 : {
3294 : int disabled_nodes;
3295 3292804 : Cost startup_cost = 0;
3296 3292804 : Cost run_cost = 0;
3297 3292804 : double outer_path_rows = outer_path->rows;
3298 : Cost inner_rescan_start_cost;
3299 : Cost inner_rescan_total_cost;
3300 : Cost inner_run_cost;
3301 : Cost inner_rescan_run_cost;
3302 :
3303 : /* Count up disabled nodes. */
3304 3292804 : disabled_nodes = enable_nestloop ? 0 : 1;
3305 3292804 : disabled_nodes += inner_path->disabled_nodes;
3306 3292804 : disabled_nodes += outer_path->disabled_nodes;
3307 :
3308 : /* estimate costs to rescan the inner relation */
3309 3292804 : cost_rescan(root, inner_path,
3310 : &inner_rescan_start_cost,
3311 : &inner_rescan_total_cost);
3312 :
3313 : /* cost of source data */
3314 :
3315 : /*
3316 : * NOTE: clearly, we must pay both outer and inner paths' startup_cost
3317 : * before we can start returning tuples, so the join's startup cost is
3318 : * their sum. We'll also pay the inner path's rescan startup cost
3319 : * multiple times.
3320 : */
3321 3292804 : startup_cost += outer_path->startup_cost + inner_path->startup_cost;
3322 3292804 : run_cost += outer_path->total_cost - outer_path->startup_cost;
3323 3292804 : if (outer_path_rows > 1)
3324 2403882 : run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
3325 :
3326 3292804 : inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
3327 3292804 : inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
3328 :
3329 3292804 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI ||
3330 3230752 : extra->inner_unique)
3331 : {
3332 : /*
3333 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3334 : * executor will stop after the first match.
3335 : *
3336 : * Getting decent estimates requires inspection of the join quals,
3337 : * which we choose to postpone to final_cost_nestloop.
3338 : */
3339 :
3340 : /* Save private data for final_cost_nestloop */
3341 1342416 : workspace->inner_run_cost = inner_run_cost;
3342 1342416 : workspace->inner_rescan_run_cost = inner_rescan_run_cost;
3343 : }
3344 : else
3345 : {
3346 : /* Normal case; we'll scan whole input rel for each outer row */
3347 1950388 : run_cost += inner_run_cost;
3348 1950388 : if (outer_path_rows > 1)
3349 1509616 : run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
3350 : }
3351 :
3352 : /* CPU costs left for later */
3353 :
3354 : /* Public result fields */
3355 3292804 : workspace->disabled_nodes = disabled_nodes;
3356 3292804 : workspace->startup_cost = startup_cost;
3357 3292804 : workspace->total_cost = startup_cost + run_cost;
3358 : /* Save private data for final_cost_nestloop */
3359 3292804 : workspace->run_cost = run_cost;
3360 3292804 : }
3361 :
3362 : /*
3363 : * final_cost_nestloop
3364 : * Final estimate of the cost and result size of a nestloop join path.
3365 : *
3366 : * 'path' is already filled in except for the rows and cost fields
3367 : * 'workspace' is the result from initial_cost_nestloop
3368 : * 'extra' contains miscellaneous information about the join
3369 : */
3370 : void
3371 1477320 : final_cost_nestloop(PlannerInfo *root, NestPath *path,
3372 : JoinCostWorkspace *workspace,
3373 : JoinPathExtraData *extra)
3374 : {
3375 1477320 : Path *outer_path = path->jpath.outerjoinpath;
3376 1477320 : Path *inner_path = path->jpath.innerjoinpath;
3377 1477320 : double outer_path_rows = outer_path->rows;
3378 1477320 : double inner_path_rows = inner_path->rows;
3379 1477320 : Cost startup_cost = workspace->startup_cost;
3380 1477320 : Cost run_cost = workspace->run_cost;
3381 : Cost cpu_per_tuple;
3382 : QualCost restrict_qual_cost;
3383 : double ntuples;
3384 :
3385 : /* Set the number of disabled nodes. */
3386 1477320 : path->jpath.path.disabled_nodes = workspace->disabled_nodes;
3387 :
3388 : /* Protect some assumptions below that rowcounts aren't zero */
3389 1477320 : if (outer_path_rows <= 0)
3390 0 : outer_path_rows = 1;
3391 1477320 : if (inner_path_rows <= 0)
3392 726 : inner_path_rows = 1;
3393 : /* Mark the path with the correct row estimate */
3394 1477320 : if (path->jpath.path.param_info)
3395 32106 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3396 : else
3397 1445214 : path->jpath.path.rows = path->jpath.path.parent->rows;
3398 :
3399 : /* For partial paths, scale row estimate. */
3400 1477320 : if (path->jpath.path.parallel_workers > 0)
3401 : {
3402 44142 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3403 :
3404 44142 : path->jpath.path.rows =
3405 44142 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3406 : }
3407 :
3408 : /* cost of inner-relation source data (we already dealt with outer rel) */
3409 :
3410 1477320 : if (path->jpath.jointype == JOIN_SEMI || path->jpath.jointype == JOIN_ANTI ||
3411 1433990 : extra->inner_unique)
3412 925232 : {
3413 : /*
3414 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3415 : * executor will stop after the first match.
3416 : */
3417 925232 : Cost inner_run_cost = workspace->inner_run_cost;
3418 925232 : Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
3419 : double outer_matched_rows;
3420 : double outer_unmatched_rows;
3421 : Selectivity inner_scan_frac;
3422 :
3423 : /*
3424 : * For an outer-rel row that has at least one match, we can expect the
3425 : * inner scan to stop after a fraction 1/(match_count+1) of the inner
3426 : * rows, if the matches are evenly distributed. Since they probably
3427 : * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
3428 : * that fraction. (If we used a larger fuzz factor, we'd have to
3429 : * clamp inner_scan_frac to at most 1.0; but since match_count is at
3430 : * least 1, no such clamp is needed now.)
3431 : */
3432 925232 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
3433 925232 : outer_unmatched_rows = outer_path_rows - outer_matched_rows;
3434 925232 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
3435 :
3436 : /*
3437 : * Compute number of tuples processed (not number emitted!). First,
3438 : * account for successfully-matched outer rows.
3439 : */
3440 925232 : ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
3441 :
3442 : /*
3443 : * Now we need to estimate the actual costs of scanning the inner
3444 : * relation, which may be quite a bit less than N times inner_run_cost
3445 : * due to early scan stops. We consider two cases. If the inner path
3446 : * is an indexscan using all the joinquals as indexquals, then an
3447 : * unmatched outer row results in an indexscan returning no rows,
3448 : * which is probably quite cheap. Otherwise, the executor will have
3449 : * to scan the whole inner rel for an unmatched row; not so cheap.
3450 : */
3451 925232 : if (has_indexed_join_quals(path))
3452 : {
3453 : /*
3454 : * Successfully-matched outer rows will only require scanning
3455 : * inner_scan_frac of the inner relation. In this case, we don't
3456 : * need to charge the full inner_run_cost even when that's more
3457 : * than inner_rescan_run_cost, because we can assume that none of
3458 : * the inner scans ever scan the whole inner relation. So it's
3459 : * okay to assume that all the inner scan executions can be
3460 : * fractions of the full cost, even if materialization is reducing
3461 : * the rescan cost. At this writing, it's impossible to get here
3462 : * for a materialized inner scan, so inner_run_cost and
3463 : * inner_rescan_run_cost will be the same anyway; but just in
3464 : * case, use inner_run_cost for the first matched tuple and
3465 : * inner_rescan_run_cost for additional ones.
3466 : */
3467 152398 : run_cost += inner_run_cost * inner_scan_frac;
3468 152398 : if (outer_matched_rows > 1)
3469 23006 : run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
3470 :
3471 : /*
3472 : * Add the cost of inner-scan executions for unmatched outer rows.
3473 : * We estimate this as the same cost as returning the first tuple
3474 : * of a nonempty scan. We consider that these are all rescans,
3475 : * since we used inner_run_cost once already.
3476 : */
3477 152398 : run_cost += outer_unmatched_rows *
3478 152398 : inner_rescan_run_cost / inner_path_rows;
3479 :
3480 : /*
3481 : * We won't be evaluating any quals at all for unmatched rows, so
3482 : * don't add them to ntuples.
3483 : */
3484 : }
3485 : else
3486 : {
3487 : /*
3488 : * Here, a complicating factor is that rescans may be cheaper than
3489 : * first scans. If we never scan all the way to the end of the
3490 : * inner rel, it might be (depending on the plan type) that we'd
3491 : * never pay the whole inner first-scan run cost. However it is
3492 : * difficult to estimate whether that will happen (and it could
3493 : * not happen if there are any unmatched outer rows!), so be
3494 : * conservative and always charge the whole first-scan cost once.
3495 : * We consider this charge to correspond to the first unmatched
3496 : * outer row, unless there isn't one in our estimate, in which
3497 : * case blame it on the first matched row.
3498 : */
3499 :
3500 : /* First, count all unmatched join tuples as being processed */
3501 772834 : ntuples += outer_unmatched_rows * inner_path_rows;
3502 :
3503 : /* Now add the forced full scan, and decrement appropriate count */
3504 772834 : run_cost += inner_run_cost;
3505 772834 : if (outer_unmatched_rows >= 1)
3506 733804 : outer_unmatched_rows -= 1;
3507 : else
3508 39030 : outer_matched_rows -= 1;
3509 :
3510 : /* Add inner run cost for additional outer tuples having matches */
3511 772834 : if (outer_matched_rows > 0)
3512 277966 : run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
3513 :
3514 : /* Add inner run cost for additional unmatched outer tuples */
3515 772834 : if (outer_unmatched_rows > 0)
3516 488080 : run_cost += outer_unmatched_rows * inner_rescan_run_cost;
3517 : }
3518 : }
3519 : else
3520 : {
3521 : /* Normal-case source costs were included in preliminary estimate */
3522 :
3523 : /* Compute number of tuples processed (not number emitted!) */
3524 552088 : ntuples = outer_path_rows * inner_path_rows;
3525 : }
3526 :
3527 : /* CPU costs */
3528 1477320 : cost_qual_eval(&restrict_qual_cost, path->jpath.joinrestrictinfo, root);
3529 1477320 : startup_cost += restrict_qual_cost.startup;
3530 1477320 : cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
3531 1477320 : run_cost += cpu_per_tuple * ntuples;
3532 :
3533 : /* tlist eval costs are paid per output row, not per tuple scanned */
3534 1477320 : startup_cost += path->jpath.path.pathtarget->cost.startup;
3535 1477320 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3536 :
3537 1477320 : path->jpath.path.startup_cost = startup_cost;
3538 1477320 : path->jpath.path.total_cost = startup_cost + run_cost;
3539 1477320 : }
3540 :
3541 : /*
3542 : * initial_cost_mergejoin
3543 : * Preliminary estimate of the cost of a mergejoin path.
3544 : *
3545 : * This must quickly produce lower-bound estimates of the path's startup and
3546 : * total costs. If we are unable to eliminate the proposed path from
3547 : * consideration using the lower bounds, final_cost_mergejoin will be called
3548 : * to obtain the final estimates.
3549 : *
3550 : * The exact division of labor between this function and final_cost_mergejoin
3551 : * is private to them, and represents a tradeoff between speed of the initial
3552 : * estimate and getting a tight lower bound. We choose to not examine the
3553 : * join quals here, except for obtaining the scan selectivity estimate which
3554 : * is really essential (but fortunately, use of caching keeps the cost of
3555 : * getting that down to something reasonable).
3556 : * We also assume that cost_sort/cost_incremental_sort is cheap enough to use
3557 : * here.
3558 : *
3559 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3560 : * other data to be used by final_cost_mergejoin
3561 : * 'jointype' is the type of join to be performed
3562 : * 'mergeclauses' is the list of joinclauses to be used as merge clauses
3563 : * 'outer_path' is the outer input to the join
3564 : * 'inner_path' is the inner input to the join
3565 : * 'outersortkeys' is the list of sort keys for the outer path
3566 : * 'innersortkeys' is the list of sort keys for the inner path
3567 : * 'outer_presorted_keys' is the number of presorted keys of the outer path
3568 : * 'extra' contains miscellaneous information about the join
3569 : *
3570 : * Note: outersortkeys and innersortkeys should be NIL if no explicit
3571 : * sort is needed because the respective source path is already ordered.
3572 : */
3573 : void
3574 1489162 : initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
3575 : JoinType jointype,
3576 : List *mergeclauses,
3577 : Path *outer_path, Path *inner_path,
3578 : List *outersortkeys, List *innersortkeys,
3579 : int outer_presorted_keys,
3580 : JoinPathExtraData *extra)
3581 : {
3582 : int disabled_nodes;
3583 1489162 : Cost startup_cost = 0;
3584 1489162 : Cost run_cost = 0;
3585 1489162 : double outer_path_rows = outer_path->rows;
3586 1489162 : double inner_path_rows = inner_path->rows;
3587 : Cost inner_run_cost;
3588 : double outer_rows,
3589 : inner_rows,
3590 : outer_skip_rows,
3591 : inner_skip_rows;
3592 : Selectivity outerstartsel,
3593 : outerendsel,
3594 : innerstartsel,
3595 : innerendsel;
3596 : Path sort_path; /* dummy for result of
3597 : * cost_sort/cost_incremental_sort */
3598 :
3599 : /* Protect some assumptions below that rowcounts aren't zero */
3600 1489162 : if (outer_path_rows <= 0)
3601 96 : outer_path_rows = 1;
3602 1489162 : if (inner_path_rows <= 0)
3603 126 : inner_path_rows = 1;
3604 :
3605 : /*
3606 : * A merge join will stop as soon as it exhausts either input stream
3607 : * (unless it's an outer join, in which case the outer side has to be
3608 : * scanned all the way anyway). Estimate fraction of the left and right
3609 : * inputs that will actually need to be scanned. Likewise, we can
3610 : * estimate the number of rows that will be skipped before the first join
3611 : * pair is found, which should be factored into startup cost. We use only
3612 : * the first (most significant) merge clause for this purpose. Since
3613 : * mergejoinscansel() is a fairly expensive computation, we cache the
3614 : * results in the merge clause RestrictInfo.
3615 : */
3616 1489162 : if (mergeclauses && jointype != JOIN_FULL)
3617 1482898 : {
3618 1482898 : RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
3619 : List *opathkeys;
3620 : List *ipathkeys;
3621 : PathKey *opathkey;
3622 : PathKey *ipathkey;
3623 : MergeScanSelCache *cache;
3624 :
3625 : /* Get the input pathkeys to determine the sort-order details */
3626 1482898 : opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
3627 1482898 : ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
3628 : Assert(opathkeys);
3629 : Assert(ipathkeys);
3630 1482898 : opathkey = (PathKey *) linitial(opathkeys);
3631 1482898 : ipathkey = (PathKey *) linitial(ipathkeys);
3632 : /* debugging check */
3633 1482898 : if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
3634 1482898 : opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
3635 1482898 : opathkey->pk_cmptype != ipathkey->pk_cmptype ||
3636 1482898 : opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
3637 0 : elog(ERROR, "left and right pathkeys do not match in mergejoin");
3638 :
3639 : /* Get the selectivity with caching */
3640 1482898 : cache = cached_scansel(root, firstclause, opathkey);
3641 :
3642 1482898 : if (bms_is_subset(firstclause->left_relids,
3643 1482898 : outer_path->parent->relids))
3644 : {
3645 : /* left side of clause is outer */
3646 773324 : outerstartsel = cache->leftstartsel;
3647 773324 : outerendsel = cache->leftendsel;
3648 773324 : innerstartsel = cache->rightstartsel;
3649 773324 : innerendsel = cache->rightendsel;
3650 : }
3651 : else
3652 : {
3653 : /* left side of clause is inner */
3654 709574 : outerstartsel = cache->rightstartsel;
3655 709574 : outerendsel = cache->rightendsel;
3656 709574 : innerstartsel = cache->leftstartsel;
3657 709574 : innerendsel = cache->leftendsel;
3658 : }
3659 1482898 : if (jointype == JOIN_LEFT ||
3660 : jointype == JOIN_ANTI)
3661 : {
3662 199744 : outerstartsel = 0.0;
3663 199744 : outerendsel = 1.0;
3664 : }
3665 1283154 : else if (jointype == JOIN_RIGHT ||
3666 : jointype == JOIN_RIGHT_ANTI)
3667 : {
3668 199274 : innerstartsel = 0.0;
3669 199274 : innerendsel = 1.0;
3670 : }
3671 : }
3672 : else
3673 : {
3674 : /* cope with clauseless or full mergejoin */
3675 6264 : outerstartsel = innerstartsel = 0.0;
3676 6264 : outerendsel = innerendsel = 1.0;
3677 : }
3678 :
3679 : /*
3680 : * Convert selectivities to row counts. We force outer_rows and
3681 : * inner_rows to be at least 1, but the skip_rows estimates can be zero.
3682 : */
3683 1489162 : outer_skip_rows = rint(outer_path_rows * outerstartsel);
3684 1489162 : inner_skip_rows = rint(inner_path_rows * innerstartsel);
3685 1489162 : outer_rows = clamp_row_est(outer_path_rows * outerendsel);
3686 1489162 : inner_rows = clamp_row_est(inner_path_rows * innerendsel);
3687 :
3688 : Assert(outer_skip_rows <= outer_rows);
3689 : Assert(inner_skip_rows <= inner_rows);
3690 :
3691 : /*
3692 : * Readjust scan selectivities to account for above rounding. This is
3693 : * normally an insignificant effect, but when there are only a few rows in
3694 : * the inputs, failing to do this makes for a large percentage error.
3695 : */
3696 1489162 : outerstartsel = outer_skip_rows / outer_path_rows;
3697 1489162 : innerstartsel = inner_skip_rows / inner_path_rows;
3698 1489162 : outerendsel = outer_rows / outer_path_rows;
3699 1489162 : innerendsel = inner_rows / inner_path_rows;
3700 :
3701 : Assert(outerstartsel <= outerendsel);
3702 : Assert(innerstartsel <= innerendsel);
3703 :
3704 1489162 : disabled_nodes = enable_mergejoin ? 0 : 1;
3705 :
3706 : /* cost of source data */
3707 :
3708 1489162 : if (outersortkeys) /* do we need to sort outer? */
3709 : {
3710 : /*
3711 : * We can assert that the outer path is not already ordered
3712 : * appropriately for the mergejoin; otherwise, outersortkeys would
3713 : * have been set to NIL.
3714 : */
3715 : Assert(!pathkeys_contained_in(outersortkeys, outer_path->pathkeys));
3716 :
3717 : /*
3718 : * We choose to use incremental sort if it is enabled and there are
3719 : * presorted keys; otherwise we use full sort.
3720 : */
3721 760506 : if (enable_incremental_sort && outer_presorted_keys > 0)
3722 : {
3723 1582 : cost_incremental_sort(&sort_path,
3724 : root,
3725 : outersortkeys,
3726 : outer_presorted_keys,
3727 : outer_path->disabled_nodes,
3728 : outer_path->startup_cost,
3729 : outer_path->total_cost,
3730 : outer_path_rows,
3731 1582 : outer_path->pathtarget->width,
3732 : 0.0,
3733 : work_mem,
3734 : -1.0);
3735 : }
3736 : else
3737 : {
3738 758924 : cost_sort(&sort_path,
3739 : root,
3740 : outersortkeys,
3741 : outer_path->disabled_nodes,
3742 : outer_path->total_cost,
3743 : outer_path_rows,
3744 758924 : outer_path->pathtarget->width,
3745 : 0.0,
3746 : work_mem,
3747 : -1.0);
3748 : }
3749 :
3750 760506 : disabled_nodes += sort_path.disabled_nodes;
3751 760506 : startup_cost += sort_path.startup_cost;
3752 760506 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3753 760506 : * outerstartsel;
3754 760506 : run_cost += (sort_path.total_cost - sort_path.startup_cost)
3755 760506 : * (outerendsel - outerstartsel);
3756 : }
3757 : else
3758 : {
3759 728656 : disabled_nodes += outer_path->disabled_nodes;
3760 728656 : startup_cost += outer_path->startup_cost;
3761 728656 : startup_cost += (outer_path->total_cost - outer_path->startup_cost)
3762 728656 : * outerstartsel;
3763 728656 : run_cost += (outer_path->total_cost - outer_path->startup_cost)
3764 728656 : * (outerendsel - outerstartsel);
3765 : }
3766 :
3767 1489162 : if (innersortkeys) /* do we need to sort inner? */
3768 : {
3769 : /*
3770 : * We can assert that the inner path is not already ordered
3771 : * appropriately for the mergejoin; otherwise, innersortkeys would
3772 : * have been set to NIL.
3773 : */
3774 : Assert(!pathkeys_contained_in(innersortkeys, inner_path->pathkeys));
3775 :
3776 : /*
3777 : * We do not consider incremental sort for inner path, because
3778 : * incremental sort does not support mark/restore.
3779 : */
3780 :
3781 1198592 : cost_sort(&sort_path,
3782 : root,
3783 : innersortkeys,
3784 : inner_path->disabled_nodes,
3785 : inner_path->total_cost,
3786 : inner_path_rows,
3787 1198592 : inner_path->pathtarget->width,
3788 : 0.0,
3789 : work_mem,
3790 : -1.0);
3791 1198592 : disabled_nodes += sort_path.disabled_nodes;
3792 1198592 : startup_cost += sort_path.startup_cost;
3793 1198592 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3794 1198592 : * innerstartsel;
3795 1198592 : inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
3796 1198592 : * (innerendsel - innerstartsel);
3797 : }
3798 : else
3799 : {
3800 290570 : disabled_nodes += inner_path->disabled_nodes;
3801 290570 : startup_cost += inner_path->startup_cost;
3802 290570 : startup_cost += (inner_path->total_cost - inner_path->startup_cost)
3803 290570 : * innerstartsel;
3804 290570 : inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
3805 290570 : * (innerendsel - innerstartsel);
3806 : }
3807 :
3808 : /*
3809 : * We can't yet determine whether rescanning occurs, or whether
3810 : * materialization of the inner input should be done. The minimum
3811 : * possible inner input cost, regardless of rescan and materialization
3812 : * considerations, is inner_run_cost. We include that in
3813 : * workspace->total_cost, but not yet in run_cost.
3814 : */
3815 :
3816 : /* CPU costs left for later */
3817 :
3818 : /* Public result fields */
3819 1489162 : workspace->disabled_nodes = disabled_nodes;
3820 1489162 : workspace->startup_cost = startup_cost;
3821 1489162 : workspace->total_cost = startup_cost + run_cost + inner_run_cost;
3822 : /* Save private data for final_cost_mergejoin */
3823 1489162 : workspace->run_cost = run_cost;
3824 1489162 : workspace->inner_run_cost = inner_run_cost;
3825 1489162 : workspace->outer_rows = outer_rows;
3826 1489162 : workspace->inner_rows = inner_rows;
3827 1489162 : workspace->outer_skip_rows = outer_skip_rows;
3828 1489162 : workspace->inner_skip_rows = inner_skip_rows;
3829 1489162 : }
3830 :
3831 : /*
3832 : * final_cost_mergejoin
3833 : * Final estimate of the cost and result size of a mergejoin path.
3834 : *
3835 : * Unlike other costsize functions, this routine makes two actual decisions:
3836 : * whether the executor will need to do mark/restore, and whether we should
3837 : * materialize the inner path. It would be logically cleaner to build
3838 : * separate paths testing these alternatives, but that would require repeating
3839 : * most of the cost calculations, which are not all that cheap. Since the
3840 : * choice will not affect output pathkeys or startup cost, only total cost,
3841 : * there is no possibility of wanting to keep more than one path. So it seems
3842 : * best to make the decisions here and record them in the path's
3843 : * skip_mark_restore and materialize_inner fields.
3844 : *
3845 : * Mark/restore overhead is usually required, but can be skipped if we know
3846 : * that the executor need find only one match per outer tuple, and that the
3847 : * mergeclauses are sufficient to identify a match.
3848 : *
3849 : * We materialize the inner path if we need mark/restore and either the inner
3850 : * path can't support mark/restore, or it's cheaper to use an interposed
3851 : * Material node to handle mark/restore.
3852 : *
3853 : * 'path' is already filled in except for the rows and cost fields and
3854 : * skip_mark_restore and materialize_inner
3855 : * 'workspace' is the result from initial_cost_mergejoin
3856 : * 'extra' contains miscellaneous information about the join
3857 : */
3858 : void
3859 462624 : final_cost_mergejoin(PlannerInfo *root, MergePath *path,
3860 : JoinCostWorkspace *workspace,
3861 : JoinPathExtraData *extra)
3862 : {
3863 462624 : Path *outer_path = path->jpath.outerjoinpath;
3864 462624 : Path *inner_path = path->jpath.innerjoinpath;
3865 462624 : double inner_path_rows = inner_path->rows;
3866 462624 : List *mergeclauses = path->path_mergeclauses;
3867 462624 : List *innersortkeys = path->innersortkeys;
3868 462624 : Cost startup_cost = workspace->startup_cost;
3869 462624 : Cost run_cost = workspace->run_cost;
3870 462624 : Cost inner_run_cost = workspace->inner_run_cost;
3871 462624 : double outer_rows = workspace->outer_rows;
3872 462624 : double inner_rows = workspace->inner_rows;
3873 462624 : double outer_skip_rows = workspace->outer_skip_rows;
3874 462624 : double inner_skip_rows = workspace->inner_skip_rows;
3875 : Cost cpu_per_tuple,
3876 : bare_inner_cost,
3877 : mat_inner_cost;
3878 : QualCost merge_qual_cost;
3879 : QualCost qp_qual_cost;
3880 : double mergejointuples,
3881 : rescannedtuples;
3882 : double rescanratio;
3883 :
3884 : /* Set the number of disabled nodes. */
3885 462624 : path->jpath.path.disabled_nodes = workspace->disabled_nodes;
3886 :
3887 : /* Protect some assumptions below that rowcounts aren't zero */
3888 462624 : if (inner_path_rows <= 0)
3889 90 : inner_path_rows = 1;
3890 :
3891 : /* Mark the path with the correct row estimate */
3892 462624 : if (path->jpath.path.param_info)
3893 1624 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3894 : else
3895 461000 : path->jpath.path.rows = path->jpath.path.parent->rows;
3896 :
3897 : /* For partial paths, scale row estimate. */
3898 462624 : if (path->jpath.path.parallel_workers > 0)
3899 : {
3900 65828 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3901 :
3902 65828 : path->jpath.path.rows =
3903 65828 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3904 : }
3905 :
3906 : /*
3907 : * Compute cost of the mergequals and qpquals (other restriction clauses)
3908 : * separately.
3909 : */
3910 462624 : cost_qual_eval(&merge_qual_cost, mergeclauses, root);
3911 462624 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
3912 462624 : qp_qual_cost.startup -= merge_qual_cost.startup;
3913 462624 : qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
3914 :
3915 : /*
3916 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3917 : * executor will stop scanning for matches after the first match. When
3918 : * all the joinclauses are merge clauses, this means we don't ever need to
3919 : * back up the merge, and so we can skip mark/restore overhead.
3920 : */
3921 462624 : if ((path->jpath.jointype == JOIN_SEMI ||
3922 455378 : path->jpath.jointype == JOIN_ANTI ||
3923 608702 : extra->inner_unique) &&
3924 160376 : (list_length(path->jpath.joinrestrictinfo) ==
3925 160376 : list_length(path->path_mergeclauses)))
3926 138944 : path->skip_mark_restore = true;
3927 : else
3928 323680 : path->skip_mark_restore = false;
3929 :
3930 : /*
3931 : * Get approx # tuples passing the mergequals. We use approx_tuple_count
3932 : * here because we need an estimate done with JOIN_INNER semantics.
3933 : */
3934 462624 : mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
3935 :
3936 : /*
3937 : * When there are equal merge keys in the outer relation, the mergejoin
3938 : * must rescan any matching tuples in the inner relation. This means
3939 : * re-fetching inner tuples; we have to estimate how often that happens.
3940 : *
3941 : * For regular inner and outer joins, the number of re-fetches can be
3942 : * estimated approximately as size of merge join output minus size of
3943 : * inner relation. Assume that the distinct key values are 1, 2, ..., and
3944 : * denote the number of values of each key in the outer relation as m1,
3945 : * m2, ...; in the inner relation, n1, n2, ... Then we have
3946 : *
3947 : * size of join = m1 * n1 + m2 * n2 + ...
3948 : *
3949 : * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
3950 : * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
3951 : * relation
3952 : *
3953 : * This equation works correctly for outer tuples having no inner match
3954 : * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
3955 : * are effectively subtracting those from the number of rescanned tuples,
3956 : * when we should not. Can we do better without expensive selectivity
3957 : * computations?
3958 : *
3959 : * The whole issue is moot if we know we don't need to mark/restore at
3960 : * all, or if we are working from a unique-ified outer input.
3961 : */
3962 462624 : if (path->skip_mark_restore ||
3963 323680 : RELATION_WAS_MADE_UNIQUE(outer_path->parent, extra->sjinfo,
3964 : path->jpath.jointype))
3965 144074 : rescannedtuples = 0;
3966 : else
3967 : {
3968 318550 : rescannedtuples = mergejointuples - inner_path_rows;
3969 : /* Must clamp because of possible underestimate */
3970 318550 : if (rescannedtuples < 0)
3971 76788 : rescannedtuples = 0;
3972 : }
3973 :
3974 : /*
3975 : * We'll inflate various costs this much to account for rescanning. Note
3976 : * that this is to be multiplied by something involving inner_rows, or
3977 : * another number related to the portion of the inner rel we'll scan.
3978 : */
3979 462624 : rescanratio = 1.0 + (rescannedtuples / inner_rows);
3980 :
3981 : /*
3982 : * Decide whether we want to materialize the inner input to shield it from
3983 : * mark/restore and performing re-fetches. Our cost model for regular
3984 : * re-fetches is that a re-fetch costs the same as an original fetch,
3985 : * which is probably an overestimate; but on the other hand we ignore the
3986 : * bookkeeping costs of mark/restore. Not clear if it's worth developing
3987 : * a more refined model. So we just need to inflate the inner run cost by
3988 : * rescanratio.
3989 : */
3990 462624 : bare_inner_cost = inner_run_cost * rescanratio;
3991 :
3992 : /*
3993 : * When we interpose a Material node the re-fetch cost is assumed to be
3994 : * just cpu_operator_cost per tuple, independently of the underlying
3995 : * plan's cost; and we charge an extra cpu_operator_cost per original
3996 : * fetch as well. Note that we're assuming the materialize node will
3997 : * never spill to disk, since it only has to remember tuples back to the
3998 : * last mark. (If there are a huge number of duplicates, our other cost
3999 : * factors will make the path so expensive that it probably won't get
4000 : * chosen anyway.) So we don't use cost_rescan here.
4001 : *
4002 : * Note: keep this estimate in sync with create_mergejoin_plan's labeling
4003 : * of the generated Material node.
4004 : */
4005 462624 : mat_inner_cost = inner_run_cost +
4006 462624 : cpu_operator_cost * inner_rows * rescanratio;
4007 :
4008 : /*
4009 : * If we don't need mark/restore at all, we don't need materialization.
4010 : */
4011 462624 : if (path->skip_mark_restore)
4012 138944 : path->materialize_inner = false;
4013 :
4014 : /*
4015 : * Prefer materializing if it looks cheaper, unless the user has asked to
4016 : * suppress materialization.
4017 : */
4018 323680 : else if (enable_material && mat_inner_cost < bare_inner_cost)
4019 3402 : path->materialize_inner = true;
4020 :
4021 : /*
4022 : * Even if materializing doesn't look cheaper, we *must* do it if the
4023 : * inner path is to be used directly (without sorting) and it doesn't
4024 : * support mark/restore.
4025 : *
4026 : * Since the inner side must be ordered, and only Sorts and IndexScans can
4027 : * create order to begin with, and they both support mark/restore, you
4028 : * might think there's no problem --- but you'd be wrong. Nestloop and
4029 : * merge joins can *preserve* the order of their inputs, so they can be
4030 : * selected as the input of a mergejoin, and they don't support
4031 : * mark/restore at present.
4032 : *
4033 : * We don't test the value of enable_material here, because
4034 : * materialization is required for correctness in this case, and turning
4035 : * it off does not entitle us to deliver an invalid plan.
4036 : */
4037 320278 : else if (innersortkeys == NIL &&
4038 8726 : !ExecSupportsMarkRestore(inner_path))
4039 1904 : path->materialize_inner = true;
4040 :
4041 : /*
4042 : * Also, force materializing if the inner path is to be sorted and the
4043 : * sort is expected to spill to disk. This is because the final merge
4044 : * pass can be done on-the-fly if it doesn't have to support mark/restore.
4045 : * We don't try to adjust the cost estimates for this consideration,
4046 : * though.
4047 : *
4048 : * Since materialization is a performance optimization in this case,
4049 : * rather than necessary for correctness, we skip it if enable_material is
4050 : * off.
4051 : */
4052 318374 : else if (enable_material && innersortkeys != NIL &&
4053 311504 : relation_byte_size(inner_path_rows,
4054 311504 : inner_path->pathtarget->width) >
4055 311504 : work_mem * (Size) 1024)
4056 284 : path->materialize_inner = true;
4057 : else
4058 318090 : path->materialize_inner = false;
4059 :
4060 : /* Charge the right incremental cost for the chosen case */
4061 462624 : if (path->materialize_inner)
4062 5590 : run_cost += mat_inner_cost;
4063 : else
4064 457034 : run_cost += bare_inner_cost;
4065 :
4066 : /* CPU costs */
4067 :
4068 : /*
4069 : * The number of tuple comparisons needed is approximately number of outer
4070 : * rows plus number of inner rows plus number of rescanned tuples (can we
4071 : * refine this?). At each one, we need to evaluate the mergejoin quals.
4072 : */
4073 462624 : startup_cost += merge_qual_cost.startup;
4074 462624 : startup_cost += merge_qual_cost.per_tuple *
4075 462624 : (outer_skip_rows + inner_skip_rows * rescanratio);
4076 462624 : run_cost += merge_qual_cost.per_tuple *
4077 462624 : ((outer_rows - outer_skip_rows) +
4078 462624 : (inner_rows - inner_skip_rows) * rescanratio);
4079 :
4080 : /*
4081 : * For each tuple that gets through the mergejoin proper, we charge
4082 : * cpu_tuple_cost plus the cost of evaluating additional restriction
4083 : * clauses that are to be applied at the join. (This is pessimistic since
4084 : * not all of the quals may get evaluated at each tuple.)
4085 : *
4086 : * Note: we could adjust for SEMI/ANTI joins skipping some qual
4087 : * evaluations here, but it's probably not worth the trouble.
4088 : */
4089 462624 : startup_cost += qp_qual_cost.startup;
4090 462624 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
4091 462624 : run_cost += cpu_per_tuple * mergejointuples;
4092 :
4093 : /* tlist eval costs are paid per output row, not per tuple scanned */
4094 462624 : startup_cost += path->jpath.path.pathtarget->cost.startup;
4095 462624 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
4096 :
4097 462624 : path->jpath.path.startup_cost = startup_cost;
4098 462624 : path->jpath.path.total_cost = startup_cost + run_cost;
4099 462624 : }
4100 :
4101 : /*
4102 : * run mergejoinscansel() with caching
4103 : */
4104 : static MergeScanSelCache *
4105 1482898 : cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
4106 : {
4107 : MergeScanSelCache *cache;
4108 : ListCell *lc;
4109 : Selectivity leftstartsel,
4110 : leftendsel,
4111 : rightstartsel,
4112 : rightendsel;
4113 : MemoryContext oldcontext;
4114 :
4115 : /* Do we have this result already? */
4116 1482904 : foreach(lc, rinfo->scansel_cache)
4117 : {
4118 1340044 : cache = (MergeScanSelCache *) lfirst(lc);
4119 1340044 : if (cache->opfamily == pathkey->pk_opfamily &&
4120 1340044 : cache->collation == pathkey->pk_eclass->ec_collation &&
4121 1340044 : cache->cmptype == pathkey->pk_cmptype &&
4122 1340038 : cache->nulls_first == pathkey->pk_nulls_first)
4123 1340038 : return cache;
4124 : }
4125 :
4126 : /* Nope, do the computation */
4127 142860 : mergejoinscansel(root,
4128 142860 : (Node *) rinfo->clause,
4129 : pathkey->pk_opfamily,
4130 : pathkey->pk_cmptype,
4131 142860 : pathkey->pk_nulls_first,
4132 : &leftstartsel,
4133 : &leftendsel,
4134 : &rightstartsel,
4135 : &rightendsel);
4136 :
4137 : /* Cache the result in suitably long-lived workspace */
4138 142860 : oldcontext = MemoryContextSwitchTo(root->planner_cxt);
4139 :
4140 142860 : cache = palloc_object(MergeScanSelCache);
4141 142860 : cache->opfamily = pathkey->pk_opfamily;
4142 142860 : cache->collation = pathkey->pk_eclass->ec_collation;
4143 142860 : cache->cmptype = pathkey->pk_cmptype;
4144 142860 : cache->nulls_first = pathkey->pk_nulls_first;
4145 142860 : cache->leftstartsel = leftstartsel;
4146 142860 : cache->leftendsel = leftendsel;
4147 142860 : cache->rightstartsel = rightstartsel;
4148 142860 : cache->rightendsel = rightendsel;
4149 :
4150 142860 : rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
4151 :
4152 142860 : MemoryContextSwitchTo(oldcontext);
4153 :
4154 142860 : return cache;
4155 : }
4156 :
4157 : /*
4158 : * initial_cost_hashjoin
4159 : * Preliminary estimate of the cost of a hashjoin path.
4160 : *
4161 : * This must quickly produce lower-bound estimates of the path's startup and
4162 : * total costs. If we are unable to eliminate the proposed path from
4163 : * consideration using the lower bounds, final_cost_hashjoin will be called
4164 : * to obtain the final estimates.
4165 : *
4166 : * The exact division of labor between this function and final_cost_hashjoin
4167 : * is private to them, and represents a tradeoff between speed of the initial
4168 : * estimate and getting a tight lower bound. We choose to not examine the
4169 : * join quals here (other than by counting the number of hash clauses),
4170 : * so we can't do much with CPU costs. We do assume that
4171 : * ExecChooseHashTableSize is cheap enough to use here.
4172 : *
4173 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
4174 : * other data to be used by final_cost_hashjoin
4175 : * 'jointype' is the type of join to be performed
4176 : * 'hashclauses' is the list of joinclauses to be used as hash clauses
4177 : * 'outer_path' is the outer input to the join
4178 : * 'inner_path' is the inner input to the join
4179 : * 'extra' contains miscellaneous information about the join
4180 : * 'parallel_hash' indicates that inner_path is partial and that a shared
4181 : * hash table will be built in parallel
4182 : */
4183 : void
4184 882772 : initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
4185 : JoinType jointype,
4186 : List *hashclauses,
4187 : Path *outer_path, Path *inner_path,
4188 : JoinPathExtraData *extra,
4189 : bool parallel_hash)
4190 : {
4191 : int disabled_nodes;
4192 882772 : Cost startup_cost = 0;
4193 882772 : Cost run_cost = 0;
4194 882772 : double outer_path_rows = outer_path->rows;
4195 882772 : double inner_path_rows = inner_path->rows;
4196 882772 : double inner_path_rows_total = inner_path_rows;
4197 882772 : int num_hashclauses = list_length(hashclauses);
4198 : int numbuckets;
4199 : int numbatches;
4200 : int num_skew_mcvs;
4201 : size_t space_allowed; /* unused */
4202 :
4203 : /* Count up disabled nodes. */
4204 882772 : disabled_nodes = enable_hashjoin ? 0 : 1;
4205 882772 : disabled_nodes += inner_path->disabled_nodes;
4206 882772 : disabled_nodes += outer_path->disabled_nodes;
4207 :
4208 : /* cost of source data */
4209 882772 : startup_cost += outer_path->startup_cost;
4210 882772 : run_cost += outer_path->total_cost - outer_path->startup_cost;
4211 882772 : startup_cost += inner_path->total_cost;
4212 :
4213 : /*
4214 : * Cost of computing hash function: must do it once per input tuple. We
4215 : * charge one cpu_operator_cost for each column's hash function. Also,
4216 : * tack on one cpu_tuple_cost per inner row, to model the costs of
4217 : * inserting the row into the hashtable.
4218 : *
4219 : * XXX when a hashclause is more complex than a single operator, we really
4220 : * should charge the extra eval costs of the left or right side, as
4221 : * appropriate, here. This seems more work than it's worth at the moment.
4222 : */
4223 882772 : startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
4224 882772 : * inner_path_rows;
4225 882772 : run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
4226 :
4227 : /*
4228 : * If this is a parallel hash build, then the value we have for
4229 : * inner_rows_total currently refers only to the rows returned by each
4230 : * participant. For shared hash table size estimation, we need the total
4231 : * number, so we need to undo the division.
4232 : */
4233 882772 : if (parallel_hash)
4234 75186 : inner_path_rows_total *= get_parallel_divisor(inner_path);
4235 :
4236 : /*
4237 : * Get hash table size that executor would use for inner relation.
4238 : *
4239 : * XXX for the moment, always assume that skew optimization will be
4240 : * performed. As long as SKEW_HASH_MEM_PERCENT is small, it's not worth
4241 : * trying to determine that for sure.
4242 : *
4243 : * XXX at some point it might be interesting to try to account for skew
4244 : * optimization in the cost estimate, but for now, we don't.
4245 : */
4246 882772 : ExecChooseHashTableSize(inner_path_rows_total,
4247 882772 : inner_path->pathtarget->width,
4248 : true, /* useskew */
4249 : parallel_hash, /* try_combined_hash_mem */
4250 : outer_path->parallel_workers,
4251 : &space_allowed,
4252 : &numbuckets,
4253 : &numbatches,
4254 : &num_skew_mcvs);
4255 :
4256 : /*
4257 : * If inner relation is too big then we will need to "batch" the join,
4258 : * which implies writing and reading most of the tuples to disk an extra
4259 : * time. Charge seq_page_cost per page, since the I/O should be nice and
4260 : * sequential. Writing the inner rel counts as startup cost, all the rest
4261 : * as run cost.
4262 : */
4263 882772 : if (numbatches > 1)
4264 : {
4265 4658 : double outerpages = page_size(outer_path_rows,
4266 4658 : outer_path->pathtarget->width);
4267 4658 : double innerpages = page_size(inner_path_rows,
4268 4658 : inner_path->pathtarget->width);
4269 :
4270 4658 : startup_cost += seq_page_cost * innerpages;
4271 4658 : run_cost += seq_page_cost * (innerpages + 2 * outerpages);
4272 : }
4273 :
4274 : /* CPU costs left for later */
4275 :
4276 : /* Public result fields */
4277 882772 : workspace->disabled_nodes = disabled_nodes;
4278 882772 : workspace->startup_cost = startup_cost;
4279 882772 : workspace->total_cost = startup_cost + run_cost;
4280 : /* Save private data for final_cost_hashjoin */
4281 882772 : workspace->run_cost = run_cost;
4282 882772 : workspace->numbuckets = numbuckets;
4283 882772 : workspace->numbatches = numbatches;
4284 882772 : workspace->inner_rows_total = inner_path_rows_total;
4285 882772 : }
4286 :
4287 : /*
4288 : * final_cost_hashjoin
4289 : * Final estimate of the cost and result size of a hashjoin path.
4290 : *
4291 : * Note: the numbatches estimate is also saved into 'path' for use later
4292 : *
4293 : * 'path' is already filled in except for the rows and cost fields and
4294 : * num_batches
4295 : * 'workspace' is the result from initial_cost_hashjoin
4296 : * 'extra' contains miscellaneous information about the join
4297 : */
4298 : void
4299 457376 : final_cost_hashjoin(PlannerInfo *root, HashPath *path,
4300 : JoinCostWorkspace *workspace,
4301 : JoinPathExtraData *extra)
4302 : {
4303 457376 : Path *outer_path = path->jpath.outerjoinpath;
4304 457376 : Path *inner_path = path->jpath.innerjoinpath;
4305 457376 : double outer_path_rows = outer_path->rows;
4306 457376 : double inner_path_rows = inner_path->rows;
4307 457376 : double inner_path_rows_total = workspace->inner_rows_total;
4308 457376 : List *hashclauses = path->path_hashclauses;
4309 457376 : Cost startup_cost = workspace->startup_cost;
4310 457376 : Cost run_cost = workspace->run_cost;
4311 457376 : int numbuckets = workspace->numbuckets;
4312 457376 : int numbatches = workspace->numbatches;
4313 : Cost cpu_per_tuple;
4314 : QualCost hash_qual_cost;
4315 : QualCost qp_qual_cost;
4316 : double hashjointuples;
4317 : double virtualbuckets;
4318 : Selectivity innerbucketsize;
4319 : Selectivity innermcvfreq;
4320 : ListCell *hcl;
4321 :
4322 : /* Set the number of disabled nodes. */
4323 457376 : path->jpath.path.disabled_nodes = workspace->disabled_nodes;
4324 :
4325 : /* Mark the path with the correct row estimate */
4326 457376 : if (path->jpath.path.param_info)
4327 3674 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
4328 : else
4329 453702 : path->jpath.path.rows = path->jpath.path.parent->rows;
4330 :
4331 : /* For partial paths, scale row estimate. */
4332 457376 : if (path->jpath.path.parallel_workers > 0)
4333 : {
4334 107134 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
4335 :
4336 107134 : path->jpath.path.rows =
4337 107134 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
4338 : }
4339 :
4340 : /* mark the path with estimated # of batches */
4341 457376 : path->num_batches = numbatches;
4342 :
4343 : /* store the total number of tuples (sum of partial row estimates) */
4344 457376 : path->inner_rows_total = inner_path_rows_total;
4345 :
4346 : /* and compute the number of "virtual" buckets in the whole join */
4347 457376 : virtualbuckets = (double) numbuckets * (double) numbatches;
4348 :
4349 : /*
4350 : * Determine bucketsize fraction and MCV frequency for the inner relation.
4351 : * We use the smallest bucketsize or MCV frequency estimated for any
4352 : * individual hashclause; this is undoubtedly conservative.
4353 : *
4354 : * BUT: if inner relation has been unique-ified, we can assume it's good
4355 : * for hashing. This is important both because it's the right answer, and
4356 : * because we avoid contaminating the cache with a value that's wrong for
4357 : * non-unique-ified paths.
4358 : */
4359 457376 : if (RELATION_WAS_MADE_UNIQUE(inner_path->parent, extra->sjinfo,
4360 : path->jpath.jointype))
4361 : {
4362 4418 : innerbucketsize = 1.0 / virtualbuckets;
4363 4418 : innermcvfreq = 0.0;
4364 : }
4365 : else
4366 : {
4367 : List *otherclauses;
4368 :
4369 452958 : innerbucketsize = 1.0;
4370 452958 : innermcvfreq = 1.0;
4371 :
4372 : /* At first, try to estimate bucket size using extended statistics. */
4373 452958 : otherclauses = estimate_multivariate_bucketsize(root,
4374 : inner_path->parent,
4375 : hashclauses,
4376 : &innerbucketsize);
4377 :
4378 : /* Pass through the remaining clauses */
4379 942620 : foreach(hcl, otherclauses)
4380 : {
4381 489662 : RestrictInfo *restrictinfo = lfirst_node(RestrictInfo, hcl);
4382 : Selectivity thisbucketsize;
4383 : Selectivity thismcvfreq;
4384 :
4385 : /*
4386 : * First we have to figure out which side of the hashjoin clause
4387 : * is the inner side.
4388 : *
4389 : * Since we tend to visit the same clauses over and over when
4390 : * planning a large query, we cache the bucket stats estimates in
4391 : * the RestrictInfo node to avoid repeated lookups of statistics.
4392 : */
4393 489662 : if (bms_is_subset(restrictinfo->right_relids,
4394 489662 : inner_path->parent->relids))
4395 : {
4396 : /* righthand side is inner */
4397 254014 : thisbucketsize = restrictinfo->right_bucketsize;
4398 254014 : if (thisbucketsize < 0)
4399 : {
4400 : /* not cached yet */
4401 110356 : estimate_hash_bucket_stats(root,
4402 110356 : get_rightop(restrictinfo->clause),
4403 : virtualbuckets,
4404 : &restrictinfo->right_mcvfreq,
4405 : &restrictinfo->right_bucketsize);
4406 110356 : thisbucketsize = restrictinfo->right_bucketsize;
4407 : }
4408 254014 : thismcvfreq = restrictinfo->right_mcvfreq;
4409 : }
4410 : else
4411 : {
4412 : Assert(bms_is_subset(restrictinfo->left_relids,
4413 : inner_path->parent->relids));
4414 : /* lefthand side is inner */
4415 235648 : thisbucketsize = restrictinfo->left_bucketsize;
4416 235648 : if (thisbucketsize < 0)
4417 : {
4418 : /* not cached yet */
4419 95700 : estimate_hash_bucket_stats(root,
4420 95700 : get_leftop(restrictinfo->clause),
4421 : virtualbuckets,
4422 : &restrictinfo->left_mcvfreq,
4423 : &restrictinfo->left_bucketsize);
4424 95700 : thisbucketsize = restrictinfo->left_bucketsize;
4425 : }
4426 235648 : thismcvfreq = restrictinfo->left_mcvfreq;
4427 : }
4428 :
4429 489662 : if (innerbucketsize > thisbucketsize)
4430 369668 : innerbucketsize = thisbucketsize;
4431 489662 : if (innermcvfreq > thismcvfreq)
4432 455606 : innermcvfreq = thismcvfreq;
4433 : }
4434 : }
4435 :
4436 : /*
4437 : * If the bucket holding the inner MCV would exceed hash_mem, we don't
4438 : * want to hash unless there is really no other alternative, so apply
4439 : * disable_cost. (The executor normally copes with excessive memory usage
4440 : * by splitting batches, but obviously it cannot separate equal values
4441 : * that way, so it will be unable to drive the batch size below hash_mem
4442 : * when this is true.)
4443 : */
4444 457376 : if (relation_byte_size(clamp_row_est(inner_path_rows * innermcvfreq),
4445 914752 : inner_path->pathtarget->width) > get_hash_memory_limit())
4446 8 : startup_cost += disable_cost;
4447 :
4448 : /*
4449 : * Compute cost of the hashquals and qpquals (other restriction clauses)
4450 : * separately.
4451 : */
4452 457376 : cost_qual_eval(&hash_qual_cost, hashclauses, root);
4453 457376 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
4454 457376 : qp_qual_cost.startup -= hash_qual_cost.startup;
4455 457376 : qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
4456 :
4457 : /* CPU costs */
4458 :
4459 457376 : if (path->jpath.jointype == JOIN_SEMI ||
4460 451018 : path->jpath.jointype == JOIN_ANTI ||
4461 446426 : extra->inner_unique)
4462 125162 : {
4463 : double outer_matched_rows;
4464 : Selectivity inner_scan_frac;
4465 :
4466 : /*
4467 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
4468 : * executor will stop after the first match.
4469 : *
4470 : * For an outer-rel row that has at least one match, we can expect the
4471 : * bucket scan to stop after a fraction 1/(match_count+1) of the
4472 : * bucket's rows, if the matches are evenly distributed. Since they
4473 : * probably aren't quite evenly distributed, we apply a fuzz factor of
4474 : * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
4475 : * to clamp inner_scan_frac to at most 1.0; but since match_count is
4476 : * at least 1, no such clamp is needed now.)
4477 : */
4478 125162 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
4479 125162 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
4480 :
4481 125162 : startup_cost += hash_qual_cost.startup;
4482 250324 : run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
4483 125162 : clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
4484 :
4485 : /*
4486 : * For unmatched outer-rel rows, the picture is quite a lot different.
4487 : * In the first place, there is no reason to assume that these rows
4488 : * preferentially hit heavily-populated buckets; instead assume they
4489 : * are uncorrelated with the inner distribution and so they see an
4490 : * average bucket size of inner_path_rows / virtualbuckets. In the
4491 : * second place, it seems likely that they will have few if any exact
4492 : * hash-code matches and so very few of the tuples in the bucket will
4493 : * actually require eval of the hash quals. We don't have any good
4494 : * way to estimate how many will, but for the moment assume that the
4495 : * effective cost per bucket entry is one-tenth what it is for
4496 : * matchable tuples.
4497 : */
4498 250324 : run_cost += hash_qual_cost.per_tuple *
4499 250324 : (outer_path_rows - outer_matched_rows) *
4500 125162 : clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
4501 :
4502 : /* Get # of tuples that will pass the basic join */
4503 125162 : if (path->jpath.jointype == JOIN_ANTI)
4504 4592 : hashjointuples = outer_path_rows - outer_matched_rows;
4505 : else
4506 120570 : hashjointuples = outer_matched_rows;
4507 : }
4508 : else
4509 : {
4510 : /*
4511 : * The number of tuple comparisons needed is the number of outer
4512 : * tuples times the typical number of tuples in a hash bucket, which
4513 : * is the inner relation size times its bucketsize fraction. At each
4514 : * one, we need to evaluate the hashjoin quals. But actually,
4515 : * charging the full qual eval cost at each tuple is pessimistic,
4516 : * since we don't evaluate the quals unless the hash values match
4517 : * exactly. For lack of a better idea, halve the cost estimate to
4518 : * allow for that.
4519 : */
4520 332214 : startup_cost += hash_qual_cost.startup;
4521 664428 : run_cost += hash_qual_cost.per_tuple * outer_path_rows *
4522 332214 : clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
4523 :
4524 : /*
4525 : * Get approx # tuples passing the hashquals. We use
4526 : * approx_tuple_count here because we need an estimate done with
4527 : * JOIN_INNER semantics.
4528 : */
4529 332214 : hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
4530 : }
4531 :
4532 : /*
4533 : * For each tuple that gets through the hashjoin proper, we charge
4534 : * cpu_tuple_cost plus the cost of evaluating additional restriction
4535 : * clauses that are to be applied at the join. (This is pessimistic since
4536 : * not all of the quals may get evaluated at each tuple.)
4537 : */
4538 457376 : startup_cost += qp_qual_cost.startup;
4539 457376 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
4540 457376 : run_cost += cpu_per_tuple * hashjointuples;
4541 :
4542 : /* tlist eval costs are paid per output row, not per tuple scanned */
4543 457376 : startup_cost += path->jpath.path.pathtarget->cost.startup;
4544 457376 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
4545 :
4546 457376 : path->jpath.path.startup_cost = startup_cost;
4547 457376 : path->jpath.path.total_cost = startup_cost + run_cost;
4548 457376 : }
4549 :
4550 :
4551 : /*
4552 : * cost_subplan
4553 : * Figure the costs for a SubPlan (or initplan).
4554 : *
4555 : * Note: we could dig the subplan's Plan out of the root list, but in practice
4556 : * all callers have it handy already, so we make them pass it.
4557 : */
4558 : void
4559 44184 : cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
4560 : {
4561 : QualCost sp_cost;
4562 :
4563 : /*
4564 : * Figure any cost for evaluating the testexpr.
4565 : *
4566 : * Usually, SubPlan nodes are built very early, before we have constructed
4567 : * any RelOptInfos for the parent query level, which means the parent root
4568 : * does not yet contain enough information to safely consult statistics.
4569 : * Therefore, we pass root as NULL here. cost_qual_eval() is already
4570 : * well-equipped to handle a NULL root.
4571 : *
4572 : * One exception is SubPlan nodes built for the initplans of MIN/MAX
4573 : * aggregates from indexes (cf. SS_make_initplan_from_plan). In this
4574 : * case, having a NULL root is safe because testexpr will be NULL.
4575 : * Besides, an initplan will by definition not consult anything from the
4576 : * parent plan.
4577 : */
4578 44184 : cost_qual_eval(&sp_cost,
4579 44184 : make_ands_implicit((Expr *) subplan->testexpr),
4580 : NULL);
4581 :
4582 44184 : if (subplan->useHashTable)
4583 : {
4584 : /*
4585 : * If we are using a hash table for the subquery outputs, then the
4586 : * cost of evaluating the query is a one-time cost. We charge one
4587 : * cpu_operator_cost per tuple for the work of loading the hashtable,
4588 : * too.
4589 : */
4590 2126 : sp_cost.startup += plan->total_cost +
4591 2126 : cpu_operator_cost * plan->plan_rows;
4592 :
4593 : /*
4594 : * The per-tuple costs include the cost of evaluating the lefthand
4595 : * expressions, plus the cost of probing the hashtable. We already
4596 : * accounted for the lefthand expressions as part of the testexpr, and
4597 : * will also have counted one cpu_operator_cost for each comparison
4598 : * operator. That is probably too low for the probing cost, but it's
4599 : * hard to make a better estimate, so live with it for now.
4600 : */
4601 : }
4602 : else
4603 : {
4604 : /*
4605 : * Otherwise we will be rescanning the subplan output on each
4606 : * evaluation. We need to estimate how much of the output we will
4607 : * actually need to scan. NOTE: this logic should agree with the
4608 : * tuple_fraction estimates used by make_subplan() in
4609 : * plan/subselect.c.
4610 : */
4611 42058 : Cost plan_run_cost = plan->total_cost - plan->startup_cost;
4612 :
4613 42058 : if (subplan->subLinkType == EXISTS_SUBLINK)
4614 : {
4615 : /* we only need to fetch 1 tuple; clamp to avoid zero divide */
4616 2532 : sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
4617 : }
4618 39526 : else if (subplan->subLinkType == ALL_SUBLINK ||
4619 39508 : subplan->subLinkType == ANY_SUBLINK)
4620 : {
4621 : /* assume we need 50% of the tuples */
4622 146 : sp_cost.per_tuple += 0.50 * plan_run_cost;
4623 : /* also charge a cpu_operator_cost per row examined */
4624 146 : sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
4625 : }
4626 : else
4627 : {
4628 : /* assume we need all tuples */
4629 39380 : sp_cost.per_tuple += plan_run_cost;
4630 : }
4631 :
4632 : /*
4633 : * Also account for subplan's startup cost. If the subplan is
4634 : * uncorrelated or undirect correlated, AND its topmost node is one
4635 : * that materializes its output, assume that we'll only need to pay
4636 : * its startup cost once; otherwise assume we pay the startup cost
4637 : * every time.
4638 : */
4639 55384 : if (subplan->parParam == NIL &&
4640 13326 : ExecMaterializesOutput(nodeTag(plan)))
4641 734 : sp_cost.startup += plan->startup_cost;
4642 : else
4643 41324 : sp_cost.per_tuple += plan->startup_cost;
4644 : }
4645 :
4646 44184 : subplan->startup_cost = sp_cost.startup;
4647 44184 : subplan->per_call_cost = sp_cost.per_tuple;
4648 44184 : }
4649 :
4650 :
4651 : /*
4652 : * cost_rescan
4653 : * Given a finished Path, estimate the costs of rescanning it after
4654 : * having done so the first time. For some Path types a rescan is
4655 : * cheaper than an original scan (if no parameters change), and this
4656 : * function embodies knowledge about that. The default is to return
4657 : * the same costs stored in the Path. (Note that the cost estimates
4658 : * actually stored in Paths are always for first scans.)
4659 : *
4660 : * This function is not currently intended to model effects such as rescans
4661 : * being cheaper due to disk block caching; what we are concerned with is
4662 : * plan types wherein the executor caches results explicitly, or doesn't
4663 : * redo startup calculations, etc.
4664 : */
4665 : static void
4666 3292804 : cost_rescan(PlannerInfo *root, Path *path,
4667 : Cost *rescan_startup_cost, /* output parameters */
4668 : Cost *rescan_total_cost)
4669 : {
4670 3292804 : switch (path->pathtype)
4671 : {
4672 53038 : case T_FunctionScan:
4673 :
4674 : /*
4675 : * Currently, nodeFunctionscan.c always executes the function to
4676 : * completion before returning any rows, and caches the results in
4677 : * a tuplestore. So the function eval cost is all startup cost
4678 : * and isn't paid over again on rescans. However, all run costs
4679 : * will be paid over again.
4680 : */
4681 53038 : *rescan_startup_cost = 0;
4682 53038 : *rescan_total_cost = path->total_cost - path->startup_cost;
4683 53038 : break;
4684 136892 : case T_HashJoin:
4685 :
4686 : /*
4687 : * If it's a single-batch join, we don't need to rebuild the hash
4688 : * table during a rescan.
4689 : */
4690 136892 : if (((HashPath *) path)->num_batches == 1)
4691 : {
4692 : /* Startup cost is exactly the cost of hash table building */
4693 136892 : *rescan_startup_cost = 0;
4694 136892 : *rescan_total_cost = path->total_cost - path->startup_cost;
4695 : }
4696 : else
4697 : {
4698 : /* Otherwise, no special treatment */
4699 0 : *rescan_startup_cost = path->startup_cost;
4700 0 : *rescan_total_cost = path->total_cost;
4701 : }
4702 136892 : break;
4703 8116 : case T_CteScan:
4704 : case T_WorkTableScan:
4705 : {
4706 : /*
4707 : * These plan types materialize their final result in a
4708 : * tuplestore or tuplesort object. So the rescan cost is only
4709 : * cpu_tuple_cost per tuple, unless the result is large enough
4710 : * to spill to disk.
4711 : */
4712 8116 : Cost run_cost = cpu_tuple_cost * path->rows;
4713 8116 : double nbytes = relation_byte_size(path->rows,
4714 8116 : path->pathtarget->width);
4715 8116 : double work_mem_bytes = work_mem * (Size) 1024;
4716 :
4717 8116 : if (nbytes > work_mem_bytes)
4718 : {
4719 : /* It will spill, so account for re-read cost */
4720 368 : double npages = ceil(nbytes / BLCKSZ);
4721 :
4722 368 : run_cost += seq_page_cost * npages;
4723 : }
4724 8116 : *rescan_startup_cost = 0;
4725 8116 : *rescan_total_cost = run_cost;
4726 : }
4727 8116 : break;
4728 1188446 : case T_Material:
4729 : case T_Sort:
4730 : {
4731 : /*
4732 : * These plan types not only materialize their results, but do
4733 : * not implement qual filtering or projection. So they are
4734 : * even cheaper to rescan than the ones above. We charge only
4735 : * cpu_operator_cost per tuple. (Note: keep that in sync with
4736 : * the run_cost charge in cost_sort, and also see comments in
4737 : * cost_material before you change it.)
4738 : */
4739 1188446 : Cost run_cost = cpu_operator_cost * path->rows;
4740 1188446 : double nbytes = relation_byte_size(path->rows,
4741 1188446 : path->pathtarget->width);
4742 1188446 : double work_mem_bytes = work_mem * (Size) 1024;
4743 :
4744 1188446 : if (nbytes > work_mem_bytes)
4745 : {
4746 : /* It will spill, so account for re-read cost */
4747 9906 : double npages = ceil(nbytes / BLCKSZ);
4748 :
4749 9906 : run_cost += seq_page_cost * npages;
4750 : }
4751 1188446 : *rescan_startup_cost = 0;
4752 1188446 : *rescan_total_cost = run_cost;
4753 : }
4754 1188446 : break;
4755 295294 : case T_Memoize:
4756 : /* All the hard work is done by cost_memoize_rescan */
4757 295294 : cost_memoize_rescan(root, (MemoizePath *) path,
4758 : rescan_startup_cost, rescan_total_cost);
4759 295294 : break;
4760 1611018 : default:
4761 1611018 : *rescan_startup_cost = path->startup_cost;
4762 1611018 : *rescan_total_cost = path->total_cost;
4763 1611018 : break;
4764 : }
4765 3292804 : }
4766 :
4767 :
4768 : /*
4769 : * cost_qual_eval
4770 : * Estimate the CPU costs of evaluating a WHERE clause.
4771 : * The input can be either an implicitly-ANDed list of boolean
4772 : * expressions, or a list of RestrictInfo nodes. (The latter is
4773 : * preferred since it allows caching of the results.)
4774 : * The result includes both a one-time (startup) component,
4775 : * and a per-evaluation component.
4776 : *
4777 : * Note: in some code paths root can be passed as NULL, resulting in
4778 : * slightly worse estimates.
4779 : */
4780 : void
4781 4704654 : cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
4782 : {
4783 : cost_qual_eval_context context;
4784 : ListCell *l;
4785 :
4786 4704654 : context.root = root;
4787 4704654 : context.total.startup = 0;
4788 4704654 : context.total.per_tuple = 0;
4789 :
4790 : /* We don't charge any cost for the implicit ANDing at top level ... */
4791 :
4792 8951796 : foreach(l, quals)
4793 : {
4794 4247142 : Node *qual = (Node *) lfirst(l);
4795 :
4796 4247142 : cost_qual_eval_walker(qual, &context);
4797 : }
4798 :
4799 4704654 : *cost = context.total;
4800 4704654 : }
4801 :
4802 : /*
4803 : * cost_qual_eval_node
4804 : * As above, for a single RestrictInfo or expression.
4805 : */
4806 : void
4807 1884166 : cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
4808 : {
4809 : cost_qual_eval_context context;
4810 :
4811 1884166 : context.root = root;
4812 1884166 : context.total.startup = 0;
4813 1884166 : context.total.per_tuple = 0;
4814 :
4815 1884166 : cost_qual_eval_walker(qual, &context);
4816 :
4817 1884166 : *cost = context.total;
4818 1884166 : }
4819 :
4820 : static bool
4821 9634744 : cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
4822 : {
4823 9634744 : if (node == NULL)
4824 94630 : return false;
4825 :
4826 : /*
4827 : * RestrictInfo nodes contain an eval_cost field reserved for this
4828 : * routine's use, so that it's not necessary to evaluate the qual clause's
4829 : * cost more than once. If the clause's cost hasn't been computed yet,
4830 : * the field's startup value will contain -1.
4831 : */
4832 9540114 : if (IsA(node, RestrictInfo))
4833 : {
4834 4435806 : RestrictInfo *rinfo = (RestrictInfo *) node;
4835 :
4836 4435806 : if (rinfo->eval_cost.startup < 0)
4837 : {
4838 : cost_qual_eval_context locContext;
4839 :
4840 609558 : locContext.root = context->root;
4841 609558 : locContext.total.startup = 0;
4842 609558 : locContext.total.per_tuple = 0;
4843 :
4844 : /*
4845 : * For an OR clause, recurse into the marked-up tree so that we
4846 : * set the eval_cost for contained RestrictInfos too.
4847 : */
4848 609558 : if (rinfo->orclause)
4849 9622 : cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
4850 : else
4851 599936 : cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
4852 :
4853 : /*
4854 : * If the RestrictInfo is marked pseudoconstant, it will be tested
4855 : * only once, so treat its cost as all startup cost.
4856 : */
4857 609558 : if (rinfo->pseudoconstant)
4858 : {
4859 : /* count one execution during startup */
4860 10310 : locContext.total.startup += locContext.total.per_tuple;
4861 10310 : locContext.total.per_tuple = 0;
4862 : }
4863 609558 : rinfo->eval_cost = locContext.total;
4864 : }
4865 4435806 : context->total.startup += rinfo->eval_cost.startup;
4866 4435806 : context->total.per_tuple += rinfo->eval_cost.per_tuple;
4867 : /* do NOT recurse into children */
4868 4435806 : return false;
4869 : }
4870 :
4871 : /*
4872 : * For each operator or function node in the given tree, we charge the
4873 : * estimated execution cost given by pg_proc.procost (remember to multiply
4874 : * this by cpu_operator_cost).
4875 : *
4876 : * Vars and Consts are charged zero, and so are boolean operators (AND,
4877 : * OR, NOT). Simplistic, but a lot better than no model at all.
4878 : *
4879 : * Should we try to account for the possibility of short-circuit
4880 : * evaluation of AND/OR? Probably *not*, because that would make the
4881 : * results depend on the clause ordering, and we are not in any position
4882 : * to expect that the current ordering of the clauses is the one that's
4883 : * going to end up being used. The above per-RestrictInfo caching would
4884 : * not mix well with trying to re-order clauses anyway.
4885 : *
4886 : * Another issue that is entirely ignored here is that if a set-returning
4887 : * function is below top level in the tree, the functions/operators above
4888 : * it will need to be evaluated multiple times. In practical use, such
4889 : * cases arise so seldom as to not be worth the added complexity needed;
4890 : * moreover, since our rowcount estimates for functions tend to be pretty
4891 : * phony, the results would also be pretty phony.
4892 : */
4893 5104308 : if (IsA(node, FuncExpr))
4894 : {
4895 346080 : add_function_cost(context->root, ((FuncExpr *) node)->funcid, node,
4896 : &context->total);
4897 : }
4898 4758228 : else if (IsA(node, OpExpr) ||
4899 4091292 : IsA(node, DistinctExpr) ||
4900 4090212 : IsA(node, NullIfExpr))
4901 : {
4902 : /* rely on struct equivalence to treat these all alike */
4903 668140 : set_opfuncid((OpExpr *) node);
4904 668140 : add_function_cost(context->root, ((OpExpr *) node)->opfuncid, node,
4905 : &context->total);
4906 : }
4907 4090088 : else if (IsA(node, ScalarArrayOpExpr))
4908 : {
4909 45144 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
4910 45144 : Node *arraynode = (Node *) lsecond(saop->args);
4911 : QualCost sacosts;
4912 : QualCost hcosts;
4913 45144 : double estarraylen = estimate_array_length(context->root, arraynode);
4914 :
4915 45144 : set_sa_opfuncid(saop);
4916 45144 : sacosts.startup = sacosts.per_tuple = 0;
4917 45144 : add_function_cost(context->root, saop->opfuncid, NULL,
4918 : &sacosts);
4919 :
4920 45144 : if (OidIsValid(saop->hashfuncid))
4921 : {
4922 : /* Handle costs for hashed ScalarArrayOpExpr */
4923 430 : hcosts.startup = hcosts.per_tuple = 0;
4924 :
4925 430 : add_function_cost(context->root, saop->hashfuncid, NULL, &hcosts);
4926 430 : context->total.startup += sacosts.startup + hcosts.startup;
4927 :
4928 : /* Estimate the cost of building the hashtable. */
4929 430 : context->total.startup += estarraylen * hcosts.per_tuple;
4930 :
4931 : /*
4932 : * XXX should we charge a little bit for sacosts.per_tuple when
4933 : * building the table, or is it ok to assume there will be zero
4934 : * hash collision?
4935 : */
4936 :
4937 : /*
4938 : * Charge for hashtable lookups. Charge a single hash and a
4939 : * single comparison.
4940 : */
4941 430 : context->total.per_tuple += hcosts.per_tuple + sacosts.per_tuple;
4942 : }
4943 : else
4944 : {
4945 : /*
4946 : * Estimate that the operator will be applied to about half of the
4947 : * array elements before the answer is determined.
4948 : */
4949 44714 : context->total.startup += sacosts.startup;
4950 89428 : context->total.per_tuple += sacosts.per_tuple *
4951 44714 : estimate_array_length(context->root, arraynode) * 0.5;
4952 : }
4953 : }
4954 4044944 : else if (IsA(node, Aggref) ||
4955 3973790 : IsA(node, WindowFunc))
4956 : {
4957 : /*
4958 : * Aggref and WindowFunc nodes are (and should be) treated like Vars,
4959 : * ie, zero execution cost in the current model, because they behave
4960 : * essentially like Vars at execution. We disregard the costs of
4961 : * their input expressions for the same reason. The actual execution
4962 : * costs of the aggregate/window functions and their arguments have to
4963 : * be factored into plan-node-specific costing of the Agg or WindowAgg
4964 : * plan node.
4965 : */
4966 75032 : return false; /* don't recurse into children */
4967 : }
4968 3969912 : else if (IsA(node, GroupingFunc))
4969 : {
4970 : /* Treat this as having cost 1 */
4971 422 : context->total.per_tuple += cpu_operator_cost;
4972 422 : return false; /* don't recurse into children */
4973 : }
4974 3969490 : else if (IsA(node, CoerceViaIO))
4975 : {
4976 22682 : CoerceViaIO *iocoerce = (CoerceViaIO *) node;
4977 : Oid iofunc;
4978 : Oid typioparam;
4979 : bool typisvarlena;
4980 :
4981 : /* check the result type's input function */
4982 22682 : getTypeInputInfo(iocoerce->resulttype,
4983 : &iofunc, &typioparam);
4984 22682 : add_function_cost(context->root, iofunc, NULL,
4985 : &context->total);
4986 : /* check the input type's output function */
4987 22682 : getTypeOutputInfo(exprType((Node *) iocoerce->arg),
4988 : &iofunc, &typisvarlena);
4989 22682 : add_function_cost(context->root, iofunc, NULL,
4990 : &context->total);
4991 : }
4992 3946808 : else if (IsA(node, ArrayCoerceExpr))
4993 : {
4994 5206 : ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
4995 : QualCost perelemcost;
4996 :
4997 5206 : cost_qual_eval_node(&perelemcost, (Node *) acoerce->elemexpr,
4998 : context->root);
4999 5206 : context->total.startup += perelemcost.startup;
5000 5206 : if (perelemcost.per_tuple > 0)
5001 66 : context->total.per_tuple += perelemcost.per_tuple *
5002 66 : estimate_array_length(context->root, (Node *) acoerce->arg);
5003 : }
5004 3941602 : else if (IsA(node, RowCompareExpr))
5005 : {
5006 : /* Conservatively assume we will check all the columns */
5007 252 : RowCompareExpr *rcexpr = (RowCompareExpr *) node;
5008 : ListCell *lc;
5009 :
5010 810 : foreach(lc, rcexpr->opnos)
5011 : {
5012 558 : Oid opid = lfirst_oid(lc);
5013 :
5014 558 : add_function_cost(context->root, get_opcode(opid), NULL,
5015 : &context->total);
5016 : }
5017 : }
5018 3941350 : else if (IsA(node, MinMaxExpr) ||
5019 3941078 : IsA(node, SQLValueFunction) ||
5020 3936290 : IsA(node, XmlExpr) ||
5021 3935588 : IsA(node, CoerceToDomain) ||
5022 3925730 : IsA(node, NextValueExpr) ||
5023 3925332 : IsA(node, JsonExpr))
5024 : {
5025 : /* Treat all these as having cost 1 */
5026 18590 : context->total.per_tuple += cpu_operator_cost;
5027 : }
5028 3922760 : else if (IsA(node, SubLink))
5029 : {
5030 : /* This routine should not be applied to un-planned expressions */
5031 0 : elog(ERROR, "cannot handle unplanned sub-select");
5032 : }
5033 3922760 : else if (IsA(node, SubPlan))
5034 : {
5035 : /*
5036 : * A subplan node in an expression typically indicates that the
5037 : * subplan will be executed on each evaluation, so charge accordingly.
5038 : * (Sub-selects that can be executed as InitPlans have already been
5039 : * removed from the expression.)
5040 : */
5041 43714 : SubPlan *subplan = (SubPlan *) node;
5042 :
5043 43714 : context->total.startup += subplan->startup_cost;
5044 43714 : context->total.per_tuple += subplan->per_call_cost;
5045 :
5046 : /*
5047 : * We don't want to recurse into the testexpr, because it was already
5048 : * counted in the SubPlan node's costs. So we're done.
5049 : */
5050 43714 : return false;
5051 : }
5052 3879046 : else if (IsA(node, AlternativeSubPlan))
5053 : {
5054 : /*
5055 : * Arbitrarily use the first alternative plan for costing. (We should
5056 : * certainly only include one alternative, and we don't yet have
5057 : * enough information to know which one the executor is most likely to
5058 : * use.)
5059 : */
5060 1858 : AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
5061 :
5062 1858 : return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
5063 : context);
5064 : }
5065 3877188 : else if (IsA(node, PlaceHolderVar))
5066 : {
5067 : /*
5068 : * A PlaceHolderVar should be given cost zero when considering general
5069 : * expression evaluation costs. The expense of doing the contained
5070 : * expression is charged as part of the tlist eval costs of the scan
5071 : * or join where the PHV is first computed (see set_rel_width and
5072 : * add_placeholders_to_joinrel). If we charged it again here, we'd be
5073 : * double-counting the cost for each level of plan that the PHV
5074 : * bubbles up through. Hence, return without recursing into the
5075 : * phexpr.
5076 : */
5077 5220 : return false;
5078 : }
5079 :
5080 : /* recurse into children */
5081 4978062 : return expression_tree_walker(node, cost_qual_eval_walker, context);
5082 : }
5083 :
5084 : /*
5085 : * get_restriction_qual_cost
5086 : * Compute evaluation costs of a baserel's restriction quals, plus any
5087 : * movable join quals that have been pushed down to the scan.
5088 : * Results are returned into *qpqual_cost.
5089 : *
5090 : * This is a convenience subroutine that works for seqscans and other cases
5091 : * where all the given quals will be evaluated the hard way. It's not useful
5092 : * for cost_index(), for example, where the index machinery takes care of
5093 : * some of the quals. We assume baserestrictcost was previously set by
5094 : * set_baserel_size_estimates().
5095 : */
5096 : static void
5097 1111448 : get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
5098 : ParamPathInfo *param_info,
5099 : QualCost *qpqual_cost)
5100 : {
5101 1111448 : if (param_info)
5102 : {
5103 : /* Include costs of pushed-down clauses */
5104 246536 : cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
5105 :
5106 246536 : qpqual_cost->startup += baserel->baserestrictcost.startup;
5107 246536 : qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
5108 : }
5109 : else
5110 864912 : *qpqual_cost = baserel->baserestrictcost;
5111 1111448 : }
5112 :
5113 :
5114 : /*
5115 : * compute_semi_anti_join_factors
5116 : * Estimate how much of the inner input a SEMI, ANTI, or inner_unique join
5117 : * can be expected to scan.
5118 : *
5119 : * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
5120 : * inner rows as soon as it finds a match to the current outer row.
5121 : * The same happens if we have detected the inner rel is unique.
5122 : * We should therefore adjust some of the cost components for this effect.
5123 : * This function computes some estimates needed for these adjustments.
5124 : * These estimates will be the same regardless of the particular paths used
5125 : * for the outer and inner relation, so we compute these once and then pass
5126 : * them to all the join cost estimation functions.
5127 : *
5128 : * Input parameters:
5129 : * joinrel: join relation under consideration
5130 : * outerrel: outer relation under consideration
5131 : * innerrel: inner relation under consideration
5132 : * jointype: if not JOIN_SEMI or JOIN_ANTI, we assume it's inner_unique
5133 : * sjinfo: SpecialJoinInfo relevant to this join
5134 : * restrictlist: join quals
5135 : * Output parameters:
5136 : * *semifactors is filled in (see pathnodes.h for field definitions)
5137 : */
5138 : void
5139 215972 : compute_semi_anti_join_factors(PlannerInfo *root,
5140 : RelOptInfo *joinrel,
5141 : RelOptInfo *outerrel,
5142 : RelOptInfo *innerrel,
5143 : JoinType jointype,
5144 : SpecialJoinInfo *sjinfo,
5145 : List *restrictlist,
5146 : SemiAntiJoinFactors *semifactors)
5147 : {
5148 : Selectivity jselec;
5149 : Selectivity nselec;
5150 : Selectivity avgmatch;
5151 : SpecialJoinInfo norm_sjinfo;
5152 : List *joinquals;
5153 : ListCell *l;
5154 :
5155 : /*
5156 : * In an ANTI join, we must ignore clauses that are "pushed down", since
5157 : * those won't affect the match logic. In a SEMI join, we do not
5158 : * distinguish joinquals from "pushed down" quals, so just use the whole
5159 : * restrictinfo list. For other outer join types, we should consider only
5160 : * non-pushed-down quals, so that this devolves to an IS_OUTER_JOIN check.
5161 : */
5162 215972 : if (IS_OUTER_JOIN(jointype))
5163 : {
5164 75606 : joinquals = NIL;
5165 165450 : foreach(l, restrictlist)
5166 : {
5167 89844 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
5168 :
5169 89844 : if (!RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5170 84968 : joinquals = lappend(joinquals, rinfo);
5171 : }
5172 : }
5173 : else
5174 140366 : joinquals = restrictlist;
5175 :
5176 : /*
5177 : * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
5178 : */
5179 215972 : jselec = clauselist_selectivity(root,
5180 : joinquals,
5181 : 0,
5182 : (jointype == JOIN_ANTI) ? JOIN_ANTI : JOIN_SEMI,
5183 : sjinfo);
5184 :
5185 : /*
5186 : * Also get the normal inner-join selectivity of the join clauses.
5187 : */
5188 215972 : init_dummy_sjinfo(&norm_sjinfo, outerrel->relids, innerrel->relids);
5189 :
5190 215972 : nselec = clauselist_selectivity(root,
5191 : joinquals,
5192 : 0,
5193 : JOIN_INNER,
5194 : &norm_sjinfo);
5195 :
5196 : /* Avoid leaking a lot of ListCells */
5197 215972 : if (IS_OUTER_JOIN(jointype))
5198 75606 : list_free(joinquals);
5199 :
5200 : /*
5201 : * jselec can be interpreted as the fraction of outer-rel rows that have
5202 : * any matches (this is true for both SEMI and ANTI cases). And nselec is
5203 : * the fraction of the Cartesian product that matches. So, the average
5204 : * number of matches for each outer-rel row that has at least one match is
5205 : * nselec * inner_rows / jselec.
5206 : *
5207 : * Note: it is correct to use the inner rel's "rows" count here, even
5208 : * though we might later be considering a parameterized inner path with
5209 : * fewer rows. This is because we have included all the join clauses in
5210 : * the selectivity estimate.
5211 : */
5212 215972 : if (jselec > 0) /* protect against zero divide */
5213 : {
5214 215584 : avgmatch = nselec * innerrel->rows / jselec;
5215 : /* Clamp to sane range */
5216 215584 : avgmatch = Max(1.0, avgmatch);
5217 : }
5218 : else
5219 388 : avgmatch = 1.0;
5220 :
5221 215972 : semifactors->outer_match_frac = jselec;
5222 215972 : semifactors->match_count = avgmatch;
5223 215972 : }
5224 :
5225 : /*
5226 : * has_indexed_join_quals
5227 : * Check whether all the joinquals of a nestloop join are used as
5228 : * inner index quals.
5229 : *
5230 : * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
5231 : * indexscan) that uses all the joinquals as indexquals, we can assume that an
5232 : * unmatched outer tuple is cheap to process, whereas otherwise it's probably
5233 : * expensive.
5234 : */
5235 : static bool
5236 925232 : has_indexed_join_quals(NestPath *path)
5237 : {
5238 925232 : JoinPath *joinpath = &path->jpath;
5239 925232 : Relids joinrelids = joinpath->path.parent->relids;
5240 925232 : Path *innerpath = joinpath->innerjoinpath;
5241 : List *indexclauses;
5242 : bool found_one;
5243 : ListCell *lc;
5244 :
5245 : /* If join still has quals to evaluate, it's not fast */
5246 925232 : if (joinpath->joinrestrictinfo != NIL)
5247 656168 : return false;
5248 : /* Nor if the inner path isn't parameterized at all */
5249 269064 : if (innerpath->param_info == NULL)
5250 3300 : return false;
5251 :
5252 : /* Find the indexclauses list for the inner scan */
5253 265764 : switch (innerpath->pathtype)
5254 : {
5255 161466 : case T_IndexScan:
5256 : case T_IndexOnlyScan:
5257 161466 : indexclauses = ((IndexPath *) innerpath)->indexclauses;
5258 161466 : break;
5259 270 : case T_BitmapHeapScan:
5260 : {
5261 : /* Accept only a simple bitmap scan, not AND/OR cases */
5262 270 : Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
5263 :
5264 270 : if (IsA(bmqual, IndexPath))
5265 222 : indexclauses = ((IndexPath *) bmqual)->indexclauses;
5266 : else
5267 48 : return false;
5268 222 : break;
5269 : }
5270 104028 : default:
5271 :
5272 : /*
5273 : * If it's not a simple indexscan, it probably doesn't run quickly
5274 : * for zero rows out, even if it's a parameterized path using all
5275 : * the joinquals.
5276 : */
5277 104028 : return false;
5278 : }
5279 :
5280 : /*
5281 : * Examine the inner path's param clauses. Any that are from the outer
5282 : * path must be found in the indexclauses list, either exactly or in an
5283 : * equivalent form generated by equivclass.c. Also, we must find at least
5284 : * one such clause, else it's a clauseless join which isn't fast.
5285 : */
5286 161688 : found_one = false;
5287 319228 : foreach(lc, innerpath->param_info->ppi_clauses)
5288 : {
5289 166278 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
5290 :
5291 166278 : if (join_clause_is_movable_into(rinfo,
5292 166278 : innerpath->parent->relids,
5293 : joinrelids))
5294 : {
5295 165726 : if (!is_redundant_with_indexclauses(rinfo, indexclauses))
5296 8738 : return false;
5297 156988 : found_one = true;
5298 : }
5299 : }
5300 152950 : return found_one;
5301 : }
5302 :
5303 :
5304 : /*
5305 : * approx_tuple_count
5306 : * Quick-and-dirty estimation of the number of join rows passing
5307 : * a set of qual conditions.
5308 : *
5309 : * The quals can be either an implicitly-ANDed list of boolean expressions,
5310 : * or a list of RestrictInfo nodes (typically the latter).
5311 : *
5312 : * We intentionally compute the selectivity under JOIN_INNER rules, even
5313 : * if it's some type of outer join. This is appropriate because we are
5314 : * trying to figure out how many tuples pass the initial merge or hash
5315 : * join step.
5316 : *
5317 : * This is quick-and-dirty because we bypass clauselist_selectivity, and
5318 : * simply multiply the independent clause selectivities together. Now
5319 : * clauselist_selectivity often can't do any better than that anyhow, but
5320 : * for some situations (such as range constraints) it is smarter. However,
5321 : * we can't effectively cache the results of clauselist_selectivity, whereas
5322 : * the individual clause selectivities can be and are cached.
5323 : *
5324 : * Since we are only using the results to estimate how many potential
5325 : * output tuples are generated and passed through qpqual checking, it
5326 : * seems OK to live with the approximation.
5327 : */
5328 : static double
5329 794838 : approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
5330 : {
5331 : double tuples;
5332 794838 : double outer_tuples = path->outerjoinpath->rows;
5333 794838 : double inner_tuples = path->innerjoinpath->rows;
5334 : SpecialJoinInfo sjinfo;
5335 794838 : Selectivity selec = 1.0;
5336 : ListCell *l;
5337 :
5338 : /*
5339 : * Make up a SpecialJoinInfo for JOIN_INNER semantics.
5340 : */
5341 794838 : init_dummy_sjinfo(&sjinfo, path->outerjoinpath->parent->relids,
5342 794838 : path->innerjoinpath->parent->relids);
5343 :
5344 : /* Get the approximate selectivity */
5345 1679862 : foreach(l, quals)
5346 : {
5347 885024 : Node *qual = (Node *) lfirst(l);
5348 :
5349 : /* Note that clause_selectivity will be able to cache its result */
5350 885024 : selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
5351 : }
5352 :
5353 : /* Apply it to the input relation sizes */
5354 794838 : tuples = selec * outer_tuples * inner_tuples;
5355 :
5356 794838 : return clamp_row_est(tuples);
5357 : }
5358 :
5359 :
5360 : /*
5361 : * set_baserel_size_estimates
5362 : * Set the size estimates for the given base relation.
5363 : *
5364 : * The rel's targetlist and restrictinfo list must have been constructed
5365 : * already, and rel->tuples must be set.
5366 : *
5367 : * We set the following fields of the rel node:
5368 : * rows: the estimated number of output tuples (after applying
5369 : * restriction clauses).
5370 : * width: the estimated average output tuple width in bytes.
5371 : * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
5372 : */
5373 : void
5374 529752 : set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5375 : {
5376 : double nrows;
5377 :
5378 : /* Should only be applied to base relations */
5379 : Assert(rel->relid > 0);
5380 :
5381 1059474 : nrows = rel->tuples *
5382 529752 : clauselist_selectivity(root,
5383 : rel->baserestrictinfo,
5384 : 0,
5385 : JOIN_INNER,
5386 : NULL);
5387 :
5388 529722 : rel->rows = clamp_row_est(nrows);
5389 :
5390 529722 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
5391 :
5392 529722 : set_rel_width(root, rel);
5393 529722 : }
5394 :
5395 : /*
5396 : * get_parameterized_baserel_size
5397 : * Make a size estimate for a parameterized scan of a base relation.
5398 : *
5399 : * 'param_clauses' lists the additional join clauses to be used.
5400 : *
5401 : * set_baserel_size_estimates must have been applied already.
5402 : */
5403 : double
5404 161906 : get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel,
5405 : List *param_clauses)
5406 : {
5407 : List *allclauses;
5408 : double nrows;
5409 :
5410 : /*
5411 : * Estimate the number of rows returned by the parameterized scan, knowing
5412 : * that it will apply all the extra join clauses as well as the rel's own
5413 : * restriction clauses. Note that we force the clauses to be treated as
5414 : * non-join clauses during selectivity estimation.
5415 : */
5416 161906 : allclauses = list_concat_copy(param_clauses, rel->baserestrictinfo);
5417 323812 : nrows = rel->tuples *
5418 161906 : clauselist_selectivity(root,
5419 : allclauses,
5420 161906 : rel->relid, /* do not use 0! */
5421 : JOIN_INNER,
5422 : NULL);
5423 161906 : nrows = clamp_row_est(nrows);
5424 : /* For safety, make sure result is not more than the base estimate */
5425 161906 : if (nrows > rel->rows)
5426 0 : nrows = rel->rows;
5427 161906 : return nrows;
5428 : }
5429 :
5430 : /*
5431 : * set_joinrel_size_estimates
5432 : * Set the size estimates for the given join relation.
5433 : *
5434 : * The rel's targetlist must have been constructed already, and a
5435 : * restriction clause list that matches the given component rels must
5436 : * be provided.
5437 : *
5438 : * Since there is more than one way to make a joinrel for more than two
5439 : * base relations, the results we get here could depend on which component
5440 : * rel pair is provided. In theory we should get the same answers no matter
5441 : * which pair is provided; in practice, since the selectivity estimation
5442 : * routines don't handle all cases equally well, we might not. But there's
5443 : * not much to be done about it. (Would it make sense to repeat the
5444 : * calculations for each pair of input rels that's encountered, and somehow
5445 : * average the results? Probably way more trouble than it's worth, and
5446 : * anyway we must keep the rowcount estimate the same for all paths for the
5447 : * joinrel.)
5448 : *
5449 : * We set only the rows field here. The reltarget field was already set by
5450 : * build_joinrel_tlist, and baserestrictcost is not used for join rels.
5451 : */
5452 : void
5453 256422 : set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
5454 : RelOptInfo *outer_rel,
5455 : RelOptInfo *inner_rel,
5456 : SpecialJoinInfo *sjinfo,
5457 : List *restrictlist)
5458 : {
5459 256422 : rel->rows = calc_joinrel_size_estimate(root,
5460 : rel,
5461 : outer_rel,
5462 : inner_rel,
5463 : outer_rel->rows,
5464 : inner_rel->rows,
5465 : sjinfo,
5466 : restrictlist);
5467 256422 : }
5468 :
5469 : /*
5470 : * get_parameterized_joinrel_size
5471 : * Make a size estimate for a parameterized scan of a join relation.
5472 : *
5473 : * 'rel' is the joinrel under consideration.
5474 : * 'outer_path', 'inner_path' are (probably also parameterized) Paths that
5475 : * produce the relations being joined.
5476 : * 'sjinfo' is any SpecialJoinInfo relevant to this join.
5477 : * 'restrict_clauses' lists the join clauses that need to be applied at the
5478 : * join node (including any movable clauses that were moved down to this join,
5479 : * and not including any movable clauses that were pushed down into the
5480 : * child paths).
5481 : *
5482 : * set_joinrel_size_estimates must have been applied already.
5483 : */
5484 : double
5485 9668 : get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel,
5486 : Path *outer_path,
5487 : Path *inner_path,
5488 : SpecialJoinInfo *sjinfo,
5489 : List *restrict_clauses)
5490 : {
5491 : double nrows;
5492 :
5493 : /*
5494 : * Estimate the number of rows returned by the parameterized join as the
5495 : * sizes of the input paths times the selectivity of the clauses that have
5496 : * ended up at this join node.
5497 : *
5498 : * As with set_joinrel_size_estimates, the rowcount estimate could depend
5499 : * on the pair of input paths provided, though ideally we'd get the same
5500 : * estimate for any pair with the same parameterization.
5501 : */
5502 9668 : nrows = calc_joinrel_size_estimate(root,
5503 : rel,
5504 : outer_path->parent,
5505 : inner_path->parent,
5506 : outer_path->rows,
5507 : inner_path->rows,
5508 : sjinfo,
5509 : restrict_clauses);
5510 : /* For safety, make sure result is not more than the base estimate */
5511 9668 : if (nrows > rel->rows)
5512 12 : nrows = rel->rows;
5513 9668 : return nrows;
5514 : }
5515 :
5516 : /*
5517 : * calc_joinrel_size_estimate
5518 : * Workhorse for set_joinrel_size_estimates and
5519 : * get_parameterized_joinrel_size.
5520 : *
5521 : * outer_rel/inner_rel are the relations being joined, but they should be
5522 : * assumed to have sizes outer_rows/inner_rows; those numbers might be less
5523 : * than what rel->rows says, when we are considering parameterized paths.
5524 : */
5525 : static double
5526 266090 : calc_joinrel_size_estimate(PlannerInfo *root,
5527 : RelOptInfo *joinrel,
5528 : RelOptInfo *outer_rel,
5529 : RelOptInfo *inner_rel,
5530 : double outer_rows,
5531 : double inner_rows,
5532 : SpecialJoinInfo *sjinfo,
5533 : List *restrictlist)
5534 : {
5535 266090 : JoinType jointype = sjinfo->jointype;
5536 : Selectivity fkselec;
5537 : Selectivity jselec;
5538 : Selectivity pselec;
5539 : double nrows;
5540 :
5541 : /*
5542 : * Compute joinclause selectivity. Note that we are only considering
5543 : * clauses that become restriction clauses at this join level; we are not
5544 : * double-counting them because they were not considered in estimating the
5545 : * sizes of the component rels.
5546 : *
5547 : * First, see whether any of the joinclauses can be matched to known FK
5548 : * constraints. If so, drop those clauses from the restrictlist, and
5549 : * instead estimate their selectivity using FK semantics. (We do this
5550 : * without regard to whether said clauses are local or "pushed down".
5551 : * Probably, an FK-matching clause could never be seen as pushed down at
5552 : * an outer join, since it would be strict and hence would be grounds for
5553 : * join strength reduction.) fkselec gets the net selectivity for
5554 : * FK-matching clauses, or 1.0 if there are none.
5555 : */
5556 266090 : fkselec = get_foreign_key_join_selectivity(root,
5557 : outer_rel->relids,
5558 : inner_rel->relids,
5559 : sjinfo,
5560 : &restrictlist);
5561 :
5562 : /*
5563 : * For an outer join, we have to distinguish the selectivity of the join's
5564 : * own clauses (JOIN/ON conditions) from any clauses that were "pushed
5565 : * down". For inner joins we just count them all as joinclauses.
5566 : */
5567 266090 : if (IS_OUTER_JOIN(jointype))
5568 : {
5569 80872 : List *joinquals = NIL;
5570 80872 : List *pushedquals = NIL;
5571 : ListCell *l;
5572 :
5573 : /* Grovel through the clauses to separate into two lists */
5574 182256 : foreach(l, restrictlist)
5575 : {
5576 101384 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
5577 :
5578 101384 : if (RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5579 4298 : pushedquals = lappend(pushedquals, rinfo);
5580 : else
5581 97086 : joinquals = lappend(joinquals, rinfo);
5582 : }
5583 :
5584 : /* Get the separate selectivities */
5585 80872 : jselec = clauselist_selectivity(root,
5586 : joinquals,
5587 : 0,
5588 : jointype,
5589 : sjinfo);
5590 80872 : pselec = clauselist_selectivity(root,
5591 : pushedquals,
5592 : 0,
5593 : jointype,
5594 : sjinfo);
5595 :
5596 : /* Avoid leaking a lot of ListCells */
5597 80872 : list_free(joinquals);
5598 80872 : list_free(pushedquals);
5599 : }
5600 : else
5601 : {
5602 185218 : jselec = clauselist_selectivity(root,
5603 : restrictlist,
5604 : 0,
5605 : jointype,
5606 : sjinfo);
5607 185218 : pselec = 0.0; /* not used, keep compiler quiet */
5608 : }
5609 :
5610 : /*
5611 : * Basically, we multiply size of Cartesian product by selectivity.
5612 : *
5613 : * If we are doing an outer join, take that into account: the joinqual
5614 : * selectivity has to be clamped using the knowledge that the output must
5615 : * be at least as large as the non-nullable input. However, any
5616 : * pushed-down quals are applied after the outer join, so their
5617 : * selectivity applies fully.
5618 : *
5619 : * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
5620 : * of LHS rows that have matches, and we apply that straightforwardly.
5621 : */
5622 266090 : switch (jointype)
5623 : {
5624 176982 : case JOIN_INNER:
5625 176982 : nrows = outer_rows * inner_rows * fkselec * jselec;
5626 : /* pselec not used */
5627 176982 : break;
5628 74112 : case JOIN_LEFT:
5629 74112 : nrows = outer_rows * inner_rows * fkselec * jselec;
5630 74112 : if (nrows < outer_rows)
5631 28466 : nrows = outer_rows;
5632 74112 : nrows *= pselec;
5633 74112 : break;
5634 1720 : case JOIN_FULL:
5635 1720 : nrows = outer_rows * inner_rows * fkselec * jselec;
5636 1720 : if (nrows < outer_rows)
5637 1178 : nrows = outer_rows;
5638 1720 : if (nrows < inner_rows)
5639 120 : nrows = inner_rows;
5640 1720 : nrows *= pselec;
5641 1720 : break;
5642 8236 : case JOIN_SEMI:
5643 8236 : nrows = outer_rows * fkselec * jselec;
5644 : /* pselec not used */
5645 8236 : break;
5646 5040 : case JOIN_ANTI:
5647 5040 : nrows = outer_rows * (1.0 - fkselec * jselec);
5648 5040 : nrows *= pselec;
5649 5040 : break;
5650 0 : default:
5651 : /* other values not expected here */
5652 0 : elog(ERROR, "unrecognized join type: %d", (int) jointype);
5653 : nrows = 0; /* keep compiler quiet */
5654 : break;
5655 : }
5656 :
5657 266090 : return clamp_row_est(nrows);
5658 : }
5659 :
5660 : /*
5661 : * get_foreign_key_join_selectivity
5662 : * Estimate join selectivity for foreign-key-related clauses.
5663 : *
5664 : * Remove any clauses that can be matched to FK constraints from *restrictlist,
5665 : * and return a substitute estimate of their selectivity. 1.0 is returned
5666 : * when there are no such clauses.
5667 : *
5668 : * The reason for treating such clauses specially is that we can get better
5669 : * estimates this way than by relying on clauselist_selectivity(), especially
5670 : * for multi-column FKs where that function's assumption that the clauses are
5671 : * independent falls down badly. But even with single-column FKs, we may be
5672 : * able to get a better answer when the pg_statistic stats are missing or out
5673 : * of date.
5674 : */
5675 : static Selectivity
5676 266090 : get_foreign_key_join_selectivity(PlannerInfo *root,
5677 : Relids outer_relids,
5678 : Relids inner_relids,
5679 : SpecialJoinInfo *sjinfo,
5680 : List **restrictlist)
5681 : {
5682 266090 : Selectivity fkselec = 1.0;
5683 266090 : JoinType jointype = sjinfo->jointype;
5684 266090 : List *worklist = *restrictlist;
5685 : ListCell *lc;
5686 :
5687 : /* Consider each FK constraint that is known to match the query */
5688 268056 : foreach(lc, root->fkey_list)
5689 : {
5690 1966 : ForeignKeyOptInfo *fkinfo = (ForeignKeyOptInfo *) lfirst(lc);
5691 : bool ref_is_outer;
5692 : List *removedlist;
5693 : ListCell *cell;
5694 :
5695 : /*
5696 : * This FK is not relevant unless it connects a baserel on one side of
5697 : * this join to a baserel on the other side.
5698 : */
5699 3580 : if (bms_is_member(fkinfo->con_relid, outer_relids) &&
5700 1614 : bms_is_member(fkinfo->ref_relid, inner_relids))
5701 1440 : ref_is_outer = false;
5702 866 : else if (bms_is_member(fkinfo->ref_relid, outer_relids) &&
5703 340 : bms_is_member(fkinfo->con_relid, inner_relids))
5704 130 : ref_is_outer = true;
5705 : else
5706 396 : continue;
5707 :
5708 : /*
5709 : * If we're dealing with a semi/anti join, and the FK's referenced
5710 : * relation is on the outside, then knowledge of the FK doesn't help
5711 : * us figure out what we need to know (which is the fraction of outer
5712 : * rows that have matches). On the other hand, if the referenced rel
5713 : * is on the inside, then all outer rows must have matches in the
5714 : * referenced table (ignoring nulls). But any restriction or join
5715 : * clauses that filter that table will reduce the fraction of matches.
5716 : * We can account for restriction clauses, but it's too hard to guess
5717 : * how many table rows would get through a join that's inside the RHS.
5718 : * Hence, if either case applies, punt and ignore the FK.
5719 : */
5720 1570 : if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
5721 1048 : (ref_is_outer || bms_membership(inner_relids) != BMS_SINGLETON))
5722 12 : continue;
5723 :
5724 : /*
5725 : * Modify the restrictlist by removing clauses that match the FK (and
5726 : * putting them into removedlist instead). It seems unsafe to modify
5727 : * the originally-passed List structure, so we make a shallow copy the
5728 : * first time through.
5729 : */
5730 1558 : if (worklist == *restrictlist)
5731 1334 : worklist = list_copy(worklist);
5732 :
5733 1558 : removedlist = NIL;
5734 3252 : foreach(cell, worklist)
5735 : {
5736 1694 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
5737 1694 : bool remove_it = false;
5738 : int i;
5739 :
5740 : /* Drop this clause if it matches any column of the FK */
5741 2140 : for (i = 0; i < fkinfo->nkeys; i++)
5742 : {
5743 2110 : if (rinfo->parent_ec)
5744 : {
5745 : /*
5746 : * EC-derived clauses can only match by EC. It is okay to
5747 : * consider any clause derived from the same EC as
5748 : * matching the FK: even if equivclass.c chose to generate
5749 : * a clause equating some other pair of Vars, it could
5750 : * have generated one equating the FK's Vars. So for
5751 : * purposes of estimation, we can act as though it did so.
5752 : *
5753 : * Note: checking parent_ec is a bit of a cheat because
5754 : * there are EC-derived clauses that don't have parent_ec
5755 : * set; but such clauses must compare expressions that
5756 : * aren't just Vars, so they cannot match the FK anyway.
5757 : */
5758 304 : if (fkinfo->eclass[i] == rinfo->parent_ec)
5759 : {
5760 298 : remove_it = true;
5761 298 : break;
5762 : }
5763 : }
5764 : else
5765 : {
5766 : /*
5767 : * Otherwise, see if rinfo was previously matched to FK as
5768 : * a "loose" clause.
5769 : */
5770 1806 : if (list_member_ptr(fkinfo->rinfos[i], rinfo))
5771 : {
5772 1366 : remove_it = true;
5773 1366 : break;
5774 : }
5775 : }
5776 : }
5777 1694 : if (remove_it)
5778 : {
5779 1664 : worklist = foreach_delete_current(worklist, cell);
5780 1664 : removedlist = lappend(removedlist, rinfo);
5781 : }
5782 : }
5783 :
5784 : /*
5785 : * If we failed to remove all the matching clauses we expected to
5786 : * find, chicken out and ignore this FK; applying its selectivity
5787 : * might result in double-counting. Put any clauses we did manage to
5788 : * remove back into the worklist.
5789 : *
5790 : * Since the matching clauses are known not outerjoin-delayed, they
5791 : * would normally have appeared in the initial joinclause list. If we
5792 : * didn't find them, there are two possibilities:
5793 : *
5794 : * 1. If the FK match is based on an EC that is ec_has_const, it won't
5795 : * have generated any join clauses at all. We discount such ECs while
5796 : * checking to see if we have "all" the clauses. (Below, we'll adjust
5797 : * the selectivity estimate for this case.)
5798 : *
5799 : * 2. The clauses were matched to some other FK in a previous
5800 : * iteration of this loop, and thus removed from worklist. (A likely
5801 : * case is that two FKs are matched to the same EC; there will be only
5802 : * one EC-derived clause in the initial list, so the first FK will
5803 : * consume it.) Applying both FKs' selectivity independently risks
5804 : * underestimating the join size; in particular, this would undo one
5805 : * of the main things that ECs were invented for, namely to avoid
5806 : * double-counting the selectivity of redundant equality conditions.
5807 : * Later we might think of a reasonable way to combine the estimates,
5808 : * but for now, just punt, since this is a fairly uncommon situation.
5809 : */
5810 1558 : if (removedlist == NIL ||
5811 1272 : list_length(removedlist) !=
5812 1272 : (fkinfo->nmatched_ec - fkinfo->nconst_ec + fkinfo->nmatched_ri))
5813 : {
5814 286 : worklist = list_concat(worklist, removedlist);
5815 286 : continue;
5816 : }
5817 :
5818 : /*
5819 : * Finally we get to the payoff: estimate selectivity using the
5820 : * knowledge that each referencing row will match exactly one row in
5821 : * the referenced table.
5822 : *
5823 : * XXX that's not true in the presence of nulls in the referencing
5824 : * column(s), so in principle we should derate the estimate for those.
5825 : * However (1) if there are any strict restriction clauses for the
5826 : * referencing column(s) elsewhere in the query, derating here would
5827 : * be double-counting the null fraction, and (2) it's not very clear
5828 : * how to combine null fractions for multiple referencing columns. So
5829 : * we do nothing for now about correcting for nulls.
5830 : *
5831 : * XXX another point here is that if either side of an FK constraint
5832 : * is an inheritance parent, we estimate as though the constraint
5833 : * covers all its children as well. This is not an unreasonable
5834 : * assumption for a referencing table, ie the user probably applied
5835 : * identical constraints to all child tables (though perhaps we ought
5836 : * to check that). But it's not possible to have done that for a
5837 : * referenced table. Fortunately, precisely because that doesn't
5838 : * work, it is uncommon in practice to have an FK referencing a parent
5839 : * table. So, at least for now, disregard inheritance here.
5840 : */
5841 1272 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
5842 824 : {
5843 : /*
5844 : * For JOIN_SEMI and JOIN_ANTI, we only get here when the FK's
5845 : * referenced table is exactly the inside of the join. The join
5846 : * selectivity is defined as the fraction of LHS rows that have
5847 : * matches. The FK implies that every LHS row has a match *in the
5848 : * referenced table*; but any restriction clauses on it will
5849 : * reduce the number of matches. Hence we take the join
5850 : * selectivity as equal to the selectivity of the table's
5851 : * restriction clauses, which is rows / tuples; but we must guard
5852 : * against tuples == 0.
5853 : */
5854 824 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5855 824 : double ref_tuples = Max(ref_rel->tuples, 1.0);
5856 :
5857 824 : fkselec *= ref_rel->rows / ref_tuples;
5858 : }
5859 : else
5860 : {
5861 : /*
5862 : * Otherwise, selectivity is exactly 1/referenced-table-size; but
5863 : * guard against tuples == 0. Note we should use the raw table
5864 : * tuple count, not any estimate of its filtered or joined size.
5865 : */
5866 448 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5867 448 : double ref_tuples = Max(ref_rel->tuples, 1.0);
5868 :
5869 448 : fkselec *= 1.0 / ref_tuples;
5870 : }
5871 :
5872 : /*
5873 : * If any of the FK columns participated in ec_has_const ECs, then
5874 : * equivclass.c will have generated "var = const" restrictions for
5875 : * each side of the join, thus reducing the sizes of both input
5876 : * relations. Taking the fkselec at face value would amount to
5877 : * double-counting the selectivity of the constant restriction for the
5878 : * referencing Var. Hence, look for the restriction clause(s) that
5879 : * were applied to the referencing Var(s), and divide out their
5880 : * selectivity to correct for this.
5881 : */
5882 1272 : if (fkinfo->nconst_ec > 0)
5883 : {
5884 24 : for (int i = 0; i < fkinfo->nkeys; i++)
5885 : {
5886 18 : EquivalenceClass *ec = fkinfo->eclass[i];
5887 :
5888 18 : if (ec && ec->ec_has_const)
5889 : {
5890 6 : EquivalenceMember *em = fkinfo->fk_eclass_member[i];
5891 6 : RestrictInfo *rinfo = find_derived_clause_for_ec_member(root,
5892 : ec,
5893 : em);
5894 :
5895 6 : if (rinfo)
5896 : {
5897 : Selectivity s0;
5898 :
5899 6 : s0 = clause_selectivity(root,
5900 : (Node *) rinfo,
5901 : 0,
5902 : jointype,
5903 : sjinfo);
5904 6 : if (s0 > 0)
5905 6 : fkselec /= s0;
5906 : }
5907 : }
5908 : }
5909 : }
5910 : }
5911 :
5912 266090 : *restrictlist = worklist;
5913 266090 : CLAMP_PROBABILITY(fkselec);
5914 266090 : return fkselec;
5915 : }
5916 :
5917 : /*
5918 : * set_subquery_size_estimates
5919 : * Set the size estimates for a base relation that is a subquery.
5920 : *
5921 : * The rel's targetlist and restrictinfo list must have been constructed
5922 : * already, and the Paths for the subquery must have been completed.
5923 : * We look at the subquery's PlannerInfo to extract data.
5924 : *
5925 : * We set the same fields as set_baserel_size_estimates.
5926 : */
5927 : void
5928 39480 : set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5929 : {
5930 39480 : PlannerInfo *subroot = rel->subroot;
5931 : RelOptInfo *sub_final_rel;
5932 : ListCell *lc;
5933 :
5934 : /* Should only be applied to base relations that are subqueries */
5935 : Assert(rel->relid > 0);
5936 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_SUBQUERY);
5937 :
5938 : /*
5939 : * Copy raw number of output rows from subquery. All of its paths should
5940 : * have the same output rowcount, so just look at cheapest-total.
5941 : */
5942 39480 : sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
5943 39480 : rel->tuples = sub_final_rel->cheapest_total_path->rows;
5944 :
5945 : /*
5946 : * Compute per-output-column width estimates by examining the subquery's
5947 : * targetlist. For any output that is a plain Var, get the width estimate
5948 : * that was made while planning the subquery. Otherwise, we leave it to
5949 : * set_rel_width to fill in a datatype-based default estimate.
5950 : */
5951 186346 : foreach(lc, subroot->parse->targetList)
5952 : {
5953 146866 : TargetEntry *te = lfirst_node(TargetEntry, lc);
5954 146866 : Node *texpr = (Node *) te->expr;
5955 146866 : int32 item_width = 0;
5956 :
5957 : /* junk columns aren't visible to upper query */
5958 146866 : if (te->resjunk)
5959 6900 : continue;
5960 :
5961 : /*
5962 : * The subquery could be an expansion of a view that's had columns
5963 : * added to it since the current query was parsed, so that there are
5964 : * non-junk tlist columns in it that don't correspond to any column
5965 : * visible at our query level. Ignore such columns.
5966 : */
5967 139966 : if (te->resno < rel->min_attr || te->resno > rel->max_attr)
5968 0 : continue;
5969 :
5970 : /*
5971 : * XXX This currently doesn't work for subqueries containing set
5972 : * operations, because the Vars in their tlists are bogus references
5973 : * to the first leaf subquery, which wouldn't give the right answer
5974 : * even if we could still get to its PlannerInfo.
5975 : *
5976 : * Also, the subquery could be an appendrel for which all branches are
5977 : * known empty due to constraint exclusion, in which case
5978 : * set_append_rel_pathlist will have left the attr_widths set to zero.
5979 : *
5980 : * In either case, we just leave the width estimate zero until
5981 : * set_rel_width fixes it.
5982 : */
5983 139966 : if (IsA(texpr, Var) &&
5984 62552 : subroot->parse->setOperations == NULL)
5985 : {
5986 60702 : Var *var = (Var *) texpr;
5987 60702 : RelOptInfo *subrel = find_base_rel(subroot, var->varno);
5988 :
5989 60702 : item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
5990 : }
5991 139966 : rel->attr_widths[te->resno - rel->min_attr] = item_width;
5992 : }
5993 :
5994 : /* Now estimate number of output rows, etc */
5995 39480 : set_baserel_size_estimates(root, rel);
5996 39480 : }
5997 :
5998 : /*
5999 : * set_function_size_estimates
6000 : * Set the size estimates for a base relation that is a function call.
6001 : *
6002 : * The rel's targetlist and restrictinfo list must have been constructed
6003 : * already.
6004 : *
6005 : * We set the same fields as set_baserel_size_estimates.
6006 : */
6007 : void
6008 52296 : set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6009 : {
6010 : RangeTblEntry *rte;
6011 : ListCell *lc;
6012 :
6013 : /* Should only be applied to base relations that are functions */
6014 : Assert(rel->relid > 0);
6015 52296 : rte = planner_rt_fetch(rel->relid, root);
6016 : Assert(rte->rtekind == RTE_FUNCTION);
6017 :
6018 : /*
6019 : * Estimate number of rows the functions will return. The rowcount of the
6020 : * node is that of the largest function result.
6021 : */
6022 52296 : rel->tuples = 0;
6023 105094 : foreach(lc, rte->functions)
6024 : {
6025 52798 : RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
6026 52798 : double ntup = expression_returns_set_rows(root, rtfunc->funcexpr);
6027 :
6028 52798 : if (ntup > rel->tuples)
6029 52320 : rel->tuples = ntup;
6030 : }
6031 :
6032 : /* Now estimate number of output rows, etc */
6033 52296 : set_baserel_size_estimates(root, rel);
6034 52296 : }
6035 :
6036 : /*
6037 : * set_function_size_estimates
6038 : * Set the size estimates for a base relation that is a function call.
6039 : *
6040 : * The rel's targetlist and restrictinfo list must have been constructed
6041 : * already.
6042 : *
6043 : * We set the same fields as set_tablefunc_size_estimates.
6044 : */
6045 : void
6046 626 : set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6047 : {
6048 : /* Should only be applied to base relations that are functions */
6049 : Assert(rel->relid > 0);
6050 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_TABLEFUNC);
6051 :
6052 626 : rel->tuples = 100;
6053 :
6054 : /* Now estimate number of output rows, etc */
6055 626 : set_baserel_size_estimates(root, rel);
6056 626 : }
6057 :
6058 : /*
6059 : * set_values_size_estimates
6060 : * Set the size estimates for a base relation that is a values list.
6061 : *
6062 : * The rel's targetlist and restrictinfo list must have been constructed
6063 : * already.
6064 : *
6065 : * We set the same fields as set_baserel_size_estimates.
6066 : */
6067 : void
6068 8480 : set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6069 : {
6070 : RangeTblEntry *rte;
6071 :
6072 : /* Should only be applied to base relations that are values lists */
6073 : Assert(rel->relid > 0);
6074 8480 : rte = planner_rt_fetch(rel->relid, root);
6075 : Assert(rte->rtekind == RTE_VALUES);
6076 :
6077 : /*
6078 : * Estimate number of rows the values list will return. We know this
6079 : * precisely based on the list length (well, barring set-returning
6080 : * functions in list items, but that's a refinement not catered for
6081 : * anywhere else either).
6082 : */
6083 8480 : rel->tuples = list_length(rte->values_lists);
6084 :
6085 : /* Now estimate number of output rows, etc */
6086 8480 : set_baserel_size_estimates(root, rel);
6087 8480 : }
6088 :
6089 : /*
6090 : * set_cte_size_estimates
6091 : * Set the size estimates for a base relation that is a CTE reference.
6092 : *
6093 : * The rel's targetlist and restrictinfo list must have been constructed
6094 : * already, and we need an estimate of the number of rows returned by the CTE
6095 : * (if a regular CTE) or the non-recursive term (if a self-reference).
6096 : *
6097 : * We set the same fields as set_baserel_size_estimates.
6098 : */
6099 : void
6100 5294 : set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
6101 : {
6102 : RangeTblEntry *rte;
6103 :
6104 : /* Should only be applied to base relations that are CTE references */
6105 : Assert(rel->relid > 0);
6106 5294 : rte = planner_rt_fetch(rel->relid, root);
6107 : Assert(rte->rtekind == RTE_CTE);
6108 :
6109 5294 : if (rte->self_reference)
6110 : {
6111 : /*
6112 : * In a self-reference, we assume the average worktable size is a
6113 : * multiple of the nonrecursive term's size. The best multiplier will
6114 : * vary depending on query "fan-out", so make its value adjustable.
6115 : */
6116 938 : rel->tuples = clamp_row_est(recursive_worktable_factor * cte_rows);
6117 : }
6118 : else
6119 : {
6120 : /* Otherwise just believe the CTE's rowcount estimate */
6121 4356 : rel->tuples = cte_rows;
6122 : }
6123 :
6124 : /* Now estimate number of output rows, etc */
6125 5294 : set_baserel_size_estimates(root, rel);
6126 5294 : }
6127 :
6128 : /*
6129 : * set_namedtuplestore_size_estimates
6130 : * Set the size estimates for a base relation that is a tuplestore reference.
6131 : *
6132 : * The rel's targetlist and restrictinfo list must have been constructed
6133 : * already.
6134 : *
6135 : * We set the same fields as set_baserel_size_estimates.
6136 : */
6137 : void
6138 478 : set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6139 : {
6140 : RangeTblEntry *rte;
6141 :
6142 : /* Should only be applied to base relations that are tuplestore references */
6143 : Assert(rel->relid > 0);
6144 478 : rte = planner_rt_fetch(rel->relid, root);
6145 : Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
6146 :
6147 : /*
6148 : * Use the estimate provided by the code which is generating the named
6149 : * tuplestore. In some cases, the actual number might be available; in
6150 : * others the same plan will be re-used, so a "typical" value might be
6151 : * estimated and used.
6152 : */
6153 478 : rel->tuples = rte->enrtuples;
6154 478 : if (rel->tuples < 0)
6155 0 : rel->tuples = 1000;
6156 :
6157 : /* Now estimate number of output rows, etc */
6158 478 : set_baserel_size_estimates(root, rel);
6159 478 : }
6160 :
6161 : /*
6162 : * set_result_size_estimates
6163 : * Set the size estimates for an RTE_RESULT base relation
6164 : *
6165 : * The rel's targetlist and restrictinfo list must have been constructed
6166 : * already.
6167 : *
6168 : * We set the same fields as set_baserel_size_estimates.
6169 : */
6170 : void
6171 4286 : set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6172 : {
6173 : /* Should only be applied to RTE_RESULT base relations */
6174 : Assert(rel->relid > 0);
6175 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_RESULT);
6176 :
6177 : /* RTE_RESULT always generates a single row, natively */
6178 4286 : rel->tuples = 1;
6179 :
6180 : /* Now estimate number of output rows, etc */
6181 4286 : set_baserel_size_estimates(root, rel);
6182 4286 : }
6183 :
6184 : /*
6185 : * set_foreign_size_estimates
6186 : * Set the size estimates for a base relation that is a foreign table.
6187 : *
6188 : * There is not a whole lot that we can do here; the foreign-data wrapper
6189 : * is responsible for producing useful estimates. We can do a decent job
6190 : * of estimating baserestrictcost, so we set that, and we also set up width
6191 : * using what will be purely datatype-driven estimates from the targetlist.
6192 : * There is no way to do anything sane with the rows value, so we just put
6193 : * a default estimate and hope that the wrapper can improve on it. The
6194 : * wrapper's GetForeignRelSize function will be called momentarily.
6195 : *
6196 : * The rel's targetlist and restrictinfo list must have been constructed
6197 : * already.
6198 : */
6199 : void
6200 2462 : set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6201 : {
6202 : /* Should only be applied to base relations */
6203 : Assert(rel->relid > 0);
6204 :
6205 2462 : rel->rows = 1000; /* entirely bogus default estimate */
6206 :
6207 2462 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
6208 :
6209 2462 : set_rel_width(root, rel);
6210 2462 : }
6211 :
6212 :
6213 : /*
6214 : * set_rel_width
6215 : * Set the estimated output width of a base relation.
6216 : *
6217 : * The estimated output width is the sum of the per-attribute width estimates
6218 : * for the actually-referenced columns, plus any PHVs or other expressions
6219 : * that have to be calculated at this relation. This is the amount of data
6220 : * we'd need to pass upwards in case of a sort, hash, etc.
6221 : *
6222 : * This function also sets reltarget->cost, so it's a bit misnamed now.
6223 : *
6224 : * NB: this works best on plain relations because it prefers to look at
6225 : * real Vars. For subqueries, set_subquery_size_estimates will already have
6226 : * copied up whatever per-column estimates were made within the subquery,
6227 : * and for other types of rels there isn't much we can do anyway. We fall
6228 : * back on (fairly stupid) datatype-based width estimates if we can't get
6229 : * any better number.
6230 : *
6231 : * The per-attribute width estimates are cached for possible re-use while
6232 : * building join relations or post-scan/join pathtargets.
6233 : */
6234 : static void
6235 532184 : set_rel_width(PlannerInfo *root, RelOptInfo *rel)
6236 : {
6237 532184 : Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
6238 532184 : int64 tuple_width = 0;
6239 532184 : bool have_wholerow_var = false;
6240 : ListCell *lc;
6241 :
6242 : /* Vars are assumed to have cost zero, but other exprs do not */
6243 532184 : rel->reltarget->cost.startup = 0;
6244 532184 : rel->reltarget->cost.per_tuple = 0;
6245 :
6246 1894744 : foreach(lc, rel->reltarget->exprs)
6247 : {
6248 1362560 : Node *node = (Node *) lfirst(lc);
6249 :
6250 : /*
6251 : * Ordinarily, a Var in a rel's targetlist must belong to that rel;
6252 : * but there are corner cases involving LATERAL references where that
6253 : * isn't so. If the Var has the wrong varno, fall through to the
6254 : * generic case (it doesn't seem worth the trouble to be any smarter).
6255 : */
6256 1362560 : if (IsA(node, Var) &&
6257 1338210 : ((Var *) node)->varno == rel->relid)
6258 370662 : {
6259 1338144 : Var *var = (Var *) node;
6260 : int ndx;
6261 : int32 item_width;
6262 :
6263 : Assert(var->varattno >= rel->min_attr);
6264 : Assert(var->varattno <= rel->max_attr);
6265 :
6266 1338144 : ndx = var->varattno - rel->min_attr;
6267 :
6268 : /*
6269 : * If it's a whole-row Var, we'll deal with it below after we have
6270 : * already cached as many attr widths as possible.
6271 : */
6272 1338144 : if (var->varattno == 0)
6273 : {
6274 3054 : have_wholerow_var = true;
6275 3054 : continue;
6276 : }
6277 :
6278 : /*
6279 : * The width may have been cached already (especially if it's a
6280 : * subquery), so don't duplicate effort.
6281 : */
6282 1335090 : if (rel->attr_widths[ndx] > 0)
6283 : {
6284 268660 : tuple_width += rel->attr_widths[ndx];
6285 268660 : continue;
6286 : }
6287 :
6288 : /* Try to get column width from statistics */
6289 1066430 : if (reloid != InvalidOid && var->varattno > 0)
6290 : {
6291 836706 : item_width = get_attavgwidth(reloid, var->varattno);
6292 836706 : if (item_width > 0)
6293 : {
6294 695768 : rel->attr_widths[ndx] = item_width;
6295 695768 : tuple_width += item_width;
6296 695768 : continue;
6297 : }
6298 : }
6299 :
6300 : /*
6301 : * Not a plain relation, or can't find statistics for it. Estimate
6302 : * using just the type info.
6303 : */
6304 370662 : item_width = get_typavgwidth(var->vartype, var->vartypmod);
6305 : Assert(item_width > 0);
6306 370662 : rel->attr_widths[ndx] = item_width;
6307 370662 : tuple_width += item_width;
6308 : }
6309 24416 : else if (IsA(node, PlaceHolderVar))
6310 : {
6311 : /*
6312 : * We will need to evaluate the PHV's contained expression while
6313 : * scanning this rel, so be sure to include it in reltarget->cost.
6314 : */
6315 2014 : PlaceHolderVar *phv = (PlaceHolderVar *) node;
6316 2014 : PlaceHolderInfo *phinfo = find_placeholder_info(root, phv);
6317 : QualCost cost;
6318 :
6319 2014 : tuple_width += phinfo->ph_width;
6320 2014 : cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
6321 2014 : rel->reltarget->cost.startup += cost.startup;
6322 2014 : rel->reltarget->cost.per_tuple += cost.per_tuple;
6323 : }
6324 : else
6325 : {
6326 : /*
6327 : * We could be looking at an expression pulled up from a subquery,
6328 : * or a ROW() representing a whole-row child Var, etc. Do what we
6329 : * can using the expression type information.
6330 : */
6331 : int32 item_width;
6332 : QualCost cost;
6333 :
6334 22402 : item_width = get_typavgwidth(exprType(node), exprTypmod(node));
6335 : Assert(item_width > 0);
6336 22402 : tuple_width += item_width;
6337 : /* Not entirely clear if we need to account for cost, but do so */
6338 22402 : cost_qual_eval_node(&cost, node, root);
6339 22402 : rel->reltarget->cost.startup += cost.startup;
6340 22402 : rel->reltarget->cost.per_tuple += cost.per_tuple;
6341 : }
6342 : }
6343 :
6344 : /*
6345 : * If we have a whole-row reference, estimate its width as the sum of
6346 : * per-column widths plus heap tuple header overhead.
6347 : */
6348 532184 : if (have_wholerow_var)
6349 : {
6350 3054 : int64 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
6351 :
6352 3054 : if (reloid != InvalidOid)
6353 : {
6354 : /* Real relation, so estimate true tuple width */
6355 2396 : wholerow_width += get_relation_data_width(reloid,
6356 2396 : rel->attr_widths - rel->min_attr);
6357 : }
6358 : else
6359 : {
6360 : /* Do what we can with info for a phony rel */
6361 : AttrNumber i;
6362 :
6363 1794 : for (i = 1; i <= rel->max_attr; i++)
6364 1136 : wholerow_width += rel->attr_widths[i - rel->min_attr];
6365 : }
6366 :
6367 3054 : rel->attr_widths[0 - rel->min_attr] = clamp_width_est(wholerow_width);
6368 :
6369 : /*
6370 : * Include the whole-row Var as part of the output tuple. Yes, that
6371 : * really is what happens at runtime.
6372 : */
6373 3054 : tuple_width += wholerow_width;
6374 : }
6375 :
6376 532184 : rel->reltarget->width = clamp_width_est(tuple_width);
6377 532184 : }
6378 :
6379 : /*
6380 : * set_pathtarget_cost_width
6381 : * Set the estimated eval cost and output width of a PathTarget tlist.
6382 : *
6383 : * As a notational convenience, returns the same PathTarget pointer passed in.
6384 : *
6385 : * Most, though not quite all, uses of this function occur after we've run
6386 : * set_rel_width() for base relations; so we can usually obtain cached width
6387 : * estimates for Vars. If we can't, fall back on datatype-based width
6388 : * estimates. Present early-planning uses of PathTargets don't need accurate
6389 : * widths badly enough to justify going to the catalogs for better data.
6390 : */
6391 : PathTarget *
6392 640172 : set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
6393 : {
6394 640172 : int64 tuple_width = 0;
6395 : ListCell *lc;
6396 :
6397 : /* Vars are assumed to have cost zero, but other exprs do not */
6398 640172 : target->cost.startup = 0;
6399 640172 : target->cost.per_tuple = 0;
6400 :
6401 2208840 : foreach(lc, target->exprs)
6402 : {
6403 1568668 : Node *node = (Node *) lfirst(lc);
6404 :
6405 1568668 : tuple_width += get_expr_width(root, node);
6406 :
6407 : /* For non-Vars, account for evaluation cost */
6408 1568668 : if (!IsA(node, Var))
6409 : {
6410 : QualCost cost;
6411 :
6412 681204 : cost_qual_eval_node(&cost, node, root);
6413 681204 : target->cost.startup += cost.startup;
6414 681204 : target->cost.per_tuple += cost.per_tuple;
6415 : }
6416 : }
6417 :
6418 640172 : target->width = clamp_width_est(tuple_width);
6419 :
6420 640172 : return target;
6421 : }
6422 :
6423 : /*
6424 : * get_expr_width
6425 : * Estimate the width of the given expr attempting to use the width
6426 : * cached in a Var's owning RelOptInfo, else fallback on the type's
6427 : * average width when unable to or when the given Node is not a Var.
6428 : */
6429 : static int32
6430 1902448 : get_expr_width(PlannerInfo *root, const Node *expr)
6431 : {
6432 : int32 width;
6433 :
6434 1902448 : if (IsA(expr, Var))
6435 : {
6436 1208288 : const Var *var = (const Var *) expr;
6437 :
6438 : /* We should not see any upper-level Vars here */
6439 : Assert(var->varlevelsup == 0);
6440 :
6441 : /* Try to get data from RelOptInfo cache */
6442 1208288 : if (!IS_SPECIAL_VARNO(var->varno) &&
6443 1202398 : var->varno < root->simple_rel_array_size)
6444 : {
6445 1202398 : RelOptInfo *rel = root->simple_rel_array[var->varno];
6446 :
6447 1202398 : if (rel != NULL &&
6448 1184468 : var->varattno >= rel->min_attr &&
6449 1184468 : var->varattno <= rel->max_attr)
6450 : {
6451 1184468 : int ndx = var->varattno - rel->min_attr;
6452 :
6453 1184468 : if (rel->attr_widths[ndx] > 0)
6454 1152080 : return rel->attr_widths[ndx];
6455 : }
6456 : }
6457 :
6458 : /*
6459 : * No cached data available, so estimate using just the type info.
6460 : */
6461 56208 : width = get_typavgwidth(var->vartype, var->vartypmod);
6462 : Assert(width > 0);
6463 :
6464 56208 : return width;
6465 : }
6466 :
6467 694160 : width = get_typavgwidth(exprType(expr), exprTypmod(expr));
6468 : Assert(width > 0);
6469 694160 : return width;
6470 : }
6471 :
6472 : /*
6473 : * relation_byte_size
6474 : * Estimate the storage space in bytes for a given number of tuples
6475 : * of a given width (size in bytes).
6476 : */
6477 : static double
6478 5084220 : relation_byte_size(double tuples, int width)
6479 : {
6480 5084220 : return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
6481 : }
6482 :
6483 : /*
6484 : * page_size
6485 : * Returns an estimate of the number of pages covered by a given
6486 : * number of tuples of a given width (size in bytes).
6487 : */
6488 : static double
6489 9316 : page_size(double tuples, int width)
6490 : {
6491 9316 : return ceil(relation_byte_size(tuples, width) / BLCKSZ);
6492 : }
6493 :
6494 : /*
6495 : * Estimate the fraction of the work that each worker will do given the
6496 : * number of workers budgeted for the path.
6497 : */
6498 : static double
6499 468754 : get_parallel_divisor(Path *path)
6500 : {
6501 468754 : double parallel_divisor = path->parallel_workers;
6502 :
6503 : /*
6504 : * Early experience with parallel query suggests that when there is only
6505 : * one worker, the leader often makes a very substantial contribution to
6506 : * executing the parallel portion of the plan, but as more workers are
6507 : * added, it does less and less, because it's busy reading tuples from the
6508 : * workers and doing whatever non-parallel post-processing is needed. By
6509 : * the time we reach 4 workers, the leader no longer makes a meaningful
6510 : * contribution. Thus, for now, estimate that the leader spends 30% of
6511 : * its time servicing each worker, and the remainder executing the
6512 : * parallel plan.
6513 : */
6514 468754 : if (parallel_leader_participation)
6515 : {
6516 : double leader_contribution;
6517 :
6518 467452 : leader_contribution = 1.0 - (0.3 * path->parallel_workers);
6519 467452 : if (leader_contribution > 0)
6520 464872 : parallel_divisor += leader_contribution;
6521 : }
6522 :
6523 468754 : return parallel_divisor;
6524 : }
6525 :
6526 : /*
6527 : * compute_bitmap_pages
6528 : * Estimate number of pages fetched from heap in a bitmap heap scan.
6529 : *
6530 : * 'baserel' is the relation to be scanned
6531 : * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
6532 : * 'loop_count' is the number of repetitions of the indexscan to factor into
6533 : * estimates of caching behavior
6534 : *
6535 : * If cost_p isn't NULL, the indexTotalCost estimate is returned in *cost_p.
6536 : * If tuples_p isn't NULL, the tuples_fetched estimate is returned in *tuples_p.
6537 : */
6538 : double
6539 702956 : compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel,
6540 : Path *bitmapqual, double loop_count,
6541 : Cost *cost_p, double *tuples_p)
6542 : {
6543 : Cost indexTotalCost;
6544 : Selectivity indexSelectivity;
6545 : double T;
6546 : double pages_fetched;
6547 : double tuples_fetched;
6548 : double heap_pages;
6549 : double maxentries;
6550 :
6551 : /*
6552 : * Fetch total cost of obtaining the bitmap, as well as its total
6553 : * selectivity.
6554 : */
6555 702956 : cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
6556 :
6557 : /*
6558 : * Estimate number of main-table pages fetched.
6559 : */
6560 702956 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
6561 :
6562 702956 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
6563 :
6564 : /*
6565 : * For a single scan, the number of heap pages that need to be fetched is
6566 : * the same as the Mackert and Lohman formula for the case T <= b (ie, no
6567 : * re-reads needed).
6568 : */
6569 702956 : pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
6570 :
6571 : /*
6572 : * Calculate the number of pages fetched from the heap. Then based on
6573 : * current work_mem estimate get the estimated maxentries in the bitmap.
6574 : * (Note that we always do this calculation based on the number of pages
6575 : * that would be fetched in a single iteration, even if loop_count > 1.
6576 : * That's correct, because only that number of entries will be stored in
6577 : * the bitmap at one time.)
6578 : */
6579 702956 : heap_pages = Min(pages_fetched, baserel->pages);
6580 702956 : maxentries = tbm_calculate_entries(work_mem * (Size) 1024);
6581 :
6582 702956 : if (loop_count > 1)
6583 : {
6584 : /*
6585 : * For repeated bitmap scans, scale up the number of tuples fetched in
6586 : * the Mackert and Lohman formula by the number of scans, so that we
6587 : * estimate the number of pages fetched by all the scans. Then
6588 : * pro-rate for one scan.
6589 : */
6590 147466 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
6591 : baserel->pages,
6592 : get_indexpath_pages(bitmapqual),
6593 : root);
6594 147466 : pages_fetched /= loop_count;
6595 : }
6596 :
6597 702956 : if (pages_fetched >= T)
6598 71726 : pages_fetched = T;
6599 : else
6600 631230 : pages_fetched = ceil(pages_fetched);
6601 :
6602 702956 : if (maxentries < heap_pages)
6603 : {
6604 : double exact_pages;
6605 : double lossy_pages;
6606 :
6607 : /*
6608 : * Crude approximation of the number of lossy pages. Because of the
6609 : * way tbm_lossify() is coded, the number of lossy pages increases
6610 : * very sharply as soon as we run short of memory; this formula has
6611 : * that property and seems to perform adequately in testing, but it's
6612 : * possible we could do better somehow.
6613 : */
6614 18 : lossy_pages = Max(0, heap_pages - maxentries / 2);
6615 18 : exact_pages = heap_pages - lossy_pages;
6616 :
6617 : /*
6618 : * If there are lossy pages then recompute the number of tuples
6619 : * processed by the bitmap heap node. We assume here that the chance
6620 : * of a given tuple coming from an exact page is the same as the
6621 : * chance that a given page is exact. This might not be true, but
6622 : * it's not clear how we can do any better.
6623 : */
6624 18 : if (lossy_pages > 0)
6625 : tuples_fetched =
6626 18 : clamp_row_est(indexSelectivity *
6627 18 : (exact_pages / heap_pages) * baserel->tuples +
6628 18 : (lossy_pages / heap_pages) * baserel->tuples);
6629 : }
6630 :
6631 702956 : if (cost_p)
6632 556780 : *cost_p = indexTotalCost;
6633 702956 : if (tuples_p)
6634 556780 : *tuples_p = tuples_fetched;
6635 :
6636 702956 : return pages_fetched;
6637 : }
6638 :
6639 : /*
6640 : * compute_gather_rows
6641 : * Estimate number of rows for gather (merge) nodes.
6642 : *
6643 : * In a parallel plan, each worker's row estimate is determined by dividing the
6644 : * total number of rows by parallel_divisor, which accounts for the leader's
6645 : * contribution in addition to the number of workers. Accordingly, when
6646 : * estimating the number of rows for gather (merge) nodes, we multiply the rows
6647 : * per worker by the same parallel_divisor to undo the division.
6648 : */
6649 : double
6650 44286 : compute_gather_rows(Path *path)
6651 : {
6652 : Assert(path->parallel_workers > 0);
6653 :
6654 44286 : return clamp_row_est(path->rows * get_parallel_divisor(path));
6655 : }
|