Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * costsize.c
4 : * Routines to compute (and set) relation sizes and path costs
5 : *
6 : * Path costs are measured in arbitrary units established by these basic
7 : * parameters:
8 : *
9 : * seq_page_cost Cost of a sequential page fetch
10 : * random_page_cost Cost of a non-sequential page fetch
11 : * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 : * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 : * cpu_operator_cost Cost of CPU time to execute an operator or function
14 : * parallel_tuple_cost Cost of CPU time to pass a tuple from worker to leader backend
15 : * parallel_setup_cost Cost of setting up shared memory for parallelism
16 : *
17 : * We expect that the kernel will typically do some amount of read-ahead
18 : * optimization; this in conjunction with seek costs means that seq_page_cost
19 : * is normally considerably less than random_page_cost. (However, if the
20 : * database is fully cached in RAM, it is reasonable to set them equal.)
21 : *
22 : * We also use a rough estimate "effective_cache_size" of the number of
23 : * disk pages in Postgres + OS-level disk cache. (We can't simply use
24 : * NBuffers for this purpose because that would ignore the effects of
25 : * the kernel's disk cache.)
26 : *
27 : * Obviously, taking constants for these values is an oversimplification,
28 : * but it's tough enough to get any useful estimates even at this level of
29 : * detail. Note that all of these parameters are user-settable, in case
30 : * the default values are drastically off for a particular platform.
31 : *
32 : * seq_page_cost and random_page_cost can also be overridden for an individual
33 : * tablespace, in case some data is on a fast disk and other data is on a slow
34 : * disk. Per-tablespace overrides never apply to temporary work files such as
35 : * an external sort or a materialize node that overflows work_mem.
36 : *
37 : * We compute two separate costs for each path:
38 : * total_cost: total estimated cost to fetch all tuples
39 : * startup_cost: cost that is expended before first tuple is fetched
40 : * In some scenarios, such as when there is a LIMIT or we are implementing
41 : * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
42 : * path's result. A caller can estimate the cost of fetching a partial
43 : * result by interpolating between startup_cost and total_cost. In detail:
44 : * actual_cost = startup_cost +
45 : * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
46 : * Note that a base relation's rows count (and, by extension, plan_rows for
47 : * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
48 : * that this equation works properly. (Note: while path->rows is never zero
49 : * for ordinary relations, it is zero for paths for provably-empty relations,
50 : * so beware of division-by-zero.) The LIMIT is applied as a top-level
51 : * plan node.
52 : *
53 : * Each path stores the total number of disabled nodes that exist at or
54 : * below that point in the plan tree. This is regarded as a component of
55 : * the cost, and paths with fewer disabled nodes should be regarded as
56 : * cheaper than those with more. Disabled nodes occur when the user sets
57 : * a GUC like enable_seqscan=false. We can't necessarily respect such a
58 : * setting in every part of the plan tree, but we want to respect in as many
59 : * parts of the plan tree as possible. Simpler schemes like storing a Boolean
60 : * here rather than a count fail to do that. We used to disable nodes by
61 : * adding a large constant to the startup cost, but that distorted planning
62 : * in other ways.
63 : *
64 : * For largely historical reasons, most of the routines in this module use
65 : * the passed result Path only to store their results (rows, startup_cost and
66 : * total_cost) into. All the input data they need is passed as separate
67 : * parameters, even though much of it could be extracted from the Path.
68 : * An exception is made for the cost_XXXjoin() routines, which expect all
69 : * the other fields of the passed XXXPath to be filled in, and similarly
70 : * cost_index() assumes the passed IndexPath is valid except for its output
71 : * values.
72 : *
73 : *
74 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
75 : * Portions Copyright (c) 1994, Regents of the University of California
76 : *
77 : * IDENTIFICATION
78 : * src/backend/optimizer/path/costsize.c
79 : *
80 : *-------------------------------------------------------------------------
81 : */
82 :
83 : #include "postgres.h"
84 :
85 : #include <limits.h>
86 : #include <math.h>
87 :
88 : #include "access/amapi.h"
89 : #include "access/htup_details.h"
90 : #include "access/tsmapi.h"
91 : #include "executor/executor.h"
92 : #include "executor/nodeAgg.h"
93 : #include "executor/nodeHash.h"
94 : #include "executor/nodeMemoize.h"
95 : #include "miscadmin.h"
96 : #include "nodes/makefuncs.h"
97 : #include "nodes/nodeFuncs.h"
98 : #include "optimizer/clauses.h"
99 : #include "optimizer/cost.h"
100 : #include "optimizer/optimizer.h"
101 : #include "optimizer/pathnode.h"
102 : #include "optimizer/paths.h"
103 : #include "optimizer/placeholder.h"
104 : #include "optimizer/plancat.h"
105 : #include "optimizer/restrictinfo.h"
106 : #include "parser/parsetree.h"
107 : #include "utils/lsyscache.h"
108 : #include "utils/selfuncs.h"
109 : #include "utils/spccache.h"
110 : #include "utils/tuplesort.h"
111 :
112 :
113 : #define LOG2(x) (log(x) / 0.693147180559945)
114 :
115 : /*
116 : * Append and MergeAppend nodes are less expensive than some other operations
117 : * which use cpu_tuple_cost; instead of adding a separate GUC, estimate the
118 : * per-tuple cost as cpu_tuple_cost multiplied by this value.
119 : */
120 : #define APPEND_CPU_COST_MULTIPLIER 0.5
121 :
122 : /*
123 : * Maximum value for row estimates. We cap row estimates to this to help
124 : * ensure that costs based on these estimates remain within the range of what
125 : * double can represent. add_path() wouldn't act sanely given infinite or NaN
126 : * cost values.
127 : */
128 : #define MAXIMUM_ROWCOUNT 1e100
129 :
130 : double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
131 : double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
132 : double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
133 : double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
134 : double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
135 : double parallel_tuple_cost = DEFAULT_PARALLEL_TUPLE_COST;
136 : double parallel_setup_cost = DEFAULT_PARALLEL_SETUP_COST;
137 : double recursive_worktable_factor = DEFAULT_RECURSIVE_WORKTABLE_FACTOR;
138 :
139 : int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
140 :
141 : Cost disable_cost = 1.0e10;
142 :
143 : int max_parallel_workers_per_gather = 2;
144 :
145 : bool enable_seqscan = true;
146 : bool enable_indexscan = true;
147 : bool enable_indexonlyscan = true;
148 : bool enable_bitmapscan = true;
149 : bool enable_tidscan = true;
150 : bool enable_sort = true;
151 : bool enable_incremental_sort = true;
152 : bool enable_hashagg = true;
153 : bool enable_nestloop = true;
154 : bool enable_material = true;
155 : bool enable_memoize = true;
156 : bool enable_mergejoin = true;
157 : bool enable_hashjoin = true;
158 : bool enable_gathermerge = true;
159 : bool enable_partitionwise_join = false;
160 : bool enable_partitionwise_aggregate = false;
161 : bool enable_parallel_append = true;
162 : bool enable_parallel_hash = true;
163 : bool enable_partition_pruning = true;
164 : bool enable_presorted_aggregate = true;
165 : bool enable_async_append = true;
166 :
167 : typedef struct
168 : {
169 : PlannerInfo *root;
170 : QualCost total;
171 : } cost_qual_eval_context;
172 :
173 : static List *extract_nonindex_conditions(List *qual_clauses, List *indexclauses);
174 : static MergeScanSelCache *cached_scansel(PlannerInfo *root,
175 : RestrictInfo *rinfo,
176 : PathKey *pathkey);
177 : static void cost_rescan(PlannerInfo *root, Path *path,
178 : Cost *rescan_startup_cost, Cost *rescan_total_cost);
179 : static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
180 : static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
181 : ParamPathInfo *param_info,
182 : QualCost *qpqual_cost);
183 : static bool has_indexed_join_quals(NestPath *path);
184 : static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
185 : List *quals);
186 : static double calc_joinrel_size_estimate(PlannerInfo *root,
187 : RelOptInfo *joinrel,
188 : RelOptInfo *outer_rel,
189 : RelOptInfo *inner_rel,
190 : double outer_rows,
191 : double inner_rows,
192 : SpecialJoinInfo *sjinfo,
193 : List *restrictlist);
194 : static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root,
195 : Relids outer_relids,
196 : Relids inner_relids,
197 : SpecialJoinInfo *sjinfo,
198 : List **restrictlist);
199 : static Cost append_nonpartial_cost(List *subpaths, int numpaths,
200 : int parallel_workers);
201 : static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
202 : static int32 get_expr_width(PlannerInfo *root, const Node *expr);
203 : static double relation_byte_size(double tuples, int width);
204 : static double page_size(double tuples, int width);
205 : static double get_parallel_divisor(Path *path);
206 :
207 :
208 : /*
209 : * clamp_row_est
210 : * Force a row-count estimate to a sane value.
211 : */
212 : double
213 10363278 : clamp_row_est(double nrows)
214 : {
215 : /*
216 : * Avoid infinite and NaN row estimates. Costs derived from such values
217 : * are going to be useless. Also force the estimate to be at least one
218 : * row, to make explain output look better and to avoid possible
219 : * divide-by-zero when interpolating costs. Make it an integer, too.
220 : */
221 10363278 : if (nrows > MAXIMUM_ROWCOUNT || isnan(nrows))
222 0 : nrows = MAXIMUM_ROWCOUNT;
223 10363278 : else if (nrows <= 1.0)
224 3318762 : nrows = 1.0;
225 : else
226 7044516 : nrows = rint(nrows);
227 :
228 10363278 : return nrows;
229 : }
230 :
231 : /*
232 : * clamp_width_est
233 : * Force a tuple-width estimate to a sane value.
234 : *
235 : * The planner represents datatype width and tuple width estimates as int32.
236 : * When summing column width estimates to create a tuple width estimate,
237 : * it's possible to reach integer overflow in edge cases. To ensure sane
238 : * behavior, we form such sums in int64 arithmetic and then apply this routine
239 : * to clamp to int32 range.
240 : */
241 : int32
242 1927672 : clamp_width_est(int64 tuple_width)
243 : {
244 : /*
245 : * Anything more than MaxAllocSize is clearly bogus, since we could not
246 : * create a tuple that large.
247 : */
248 1927672 : if (tuple_width > MaxAllocSize)
249 0 : return (int32) MaxAllocSize;
250 :
251 : /*
252 : * Unlike clamp_row_est, we just Assert that the value isn't negative,
253 : * rather than masking such errors.
254 : */
255 : Assert(tuple_width >= 0);
256 :
257 1927672 : return (int32) tuple_width;
258 : }
259 :
260 : /*
261 : * clamp_cardinality_to_long
262 : * Cast a Cardinality value to a sane long value.
263 : */
264 : long
265 46616 : clamp_cardinality_to_long(Cardinality x)
266 : {
267 : /*
268 : * Just for paranoia's sake, ensure we do something sane with negative or
269 : * NaN values.
270 : */
271 46616 : if (isnan(x))
272 0 : return LONG_MAX;
273 46616 : if (x <= 0)
274 590 : return 0;
275 :
276 : /*
277 : * If "long" is 64 bits, then LONG_MAX cannot be represented exactly as a
278 : * double. Casting it to double and back may well result in overflow due
279 : * to rounding, so avoid doing that. We trust that any double value that
280 : * compares strictly less than "(double) LONG_MAX" will cast to a
281 : * representable "long" value.
282 : */
283 46026 : return (x < (double) LONG_MAX) ? (long) x : LONG_MAX;
284 : }
285 :
286 :
287 : /*
288 : * cost_seqscan
289 : * Determines and returns the cost of scanning a relation sequentially.
290 : *
291 : * 'baserel' is the relation to be scanned
292 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
293 : */
294 : void
295 432198 : cost_seqscan(Path *path, PlannerInfo *root,
296 : RelOptInfo *baserel, ParamPathInfo *param_info)
297 : {
298 432198 : Cost startup_cost = 0;
299 : Cost cpu_run_cost;
300 : Cost disk_run_cost;
301 : double spc_seq_page_cost;
302 : QualCost qpqual_cost;
303 : Cost cpu_per_tuple;
304 :
305 : /* Should only be applied to base relations */
306 : Assert(baserel->relid > 0);
307 : Assert(baserel->rtekind == RTE_RELATION);
308 :
309 : /* Mark the path with the correct row estimate */
310 432198 : if (param_info)
311 840 : path->rows = param_info->ppi_rows;
312 : else
313 431358 : path->rows = baserel->rows;
314 :
315 : /* fetch estimated page cost for tablespace containing table */
316 432198 : get_tablespace_page_costs(baserel->reltablespace,
317 : NULL,
318 : &spc_seq_page_cost);
319 :
320 : /*
321 : * disk costs
322 : */
323 432198 : disk_run_cost = spc_seq_page_cost * baserel->pages;
324 :
325 : /* CPU costs */
326 432198 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
327 :
328 432198 : startup_cost += qpqual_cost.startup;
329 432198 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
330 432198 : cpu_run_cost = cpu_per_tuple * baserel->tuples;
331 : /* tlist eval costs are paid per output row, not per tuple scanned */
332 432198 : startup_cost += path->pathtarget->cost.startup;
333 432198 : cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
334 :
335 : /* Adjust costing for parallelism, if used. */
336 432198 : if (path->parallel_workers > 0)
337 : {
338 27388 : double parallel_divisor = get_parallel_divisor(path);
339 :
340 : /* The CPU cost is divided among all the workers. */
341 27388 : cpu_run_cost /= parallel_divisor;
342 :
343 : /*
344 : * It may be possible to amortize some of the I/O cost, but probably
345 : * not very much, because most operating systems already do aggressive
346 : * prefetching. For now, we assume that the disk run cost can't be
347 : * amortized at all.
348 : */
349 :
350 : /*
351 : * In the case of a parallel plan, the row count needs to represent
352 : * the number of tuples processed per worker.
353 : */
354 27388 : path->rows = clamp_row_est(path->rows / parallel_divisor);
355 : }
356 :
357 432198 : path->disabled_nodes = enable_seqscan ? 0 : 1;
358 432198 : path->startup_cost = startup_cost;
359 432198 : path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
360 432198 : }
361 :
362 : /*
363 : * cost_samplescan
364 : * Determines and returns the cost of scanning a relation using sampling.
365 : *
366 : * 'baserel' is the relation to be scanned
367 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
368 : */
369 : void
370 306 : cost_samplescan(Path *path, PlannerInfo *root,
371 : RelOptInfo *baserel, ParamPathInfo *param_info)
372 : {
373 306 : Cost startup_cost = 0;
374 306 : Cost run_cost = 0;
375 : RangeTblEntry *rte;
376 : TableSampleClause *tsc;
377 : TsmRoutine *tsm;
378 : double spc_seq_page_cost,
379 : spc_random_page_cost,
380 : spc_page_cost;
381 : QualCost qpqual_cost;
382 : Cost cpu_per_tuple;
383 :
384 : /* Should only be applied to base relations with tablesample clauses */
385 : Assert(baserel->relid > 0);
386 306 : rte = planner_rt_fetch(baserel->relid, root);
387 : Assert(rte->rtekind == RTE_RELATION);
388 306 : tsc = rte->tablesample;
389 : Assert(tsc != NULL);
390 306 : tsm = GetTsmRoutine(tsc->tsmhandler);
391 :
392 : /* Mark the path with the correct row estimate */
393 306 : if (param_info)
394 72 : path->rows = param_info->ppi_rows;
395 : else
396 234 : path->rows = baserel->rows;
397 :
398 : /* fetch estimated page cost for tablespace containing table */
399 306 : get_tablespace_page_costs(baserel->reltablespace,
400 : &spc_random_page_cost,
401 : &spc_seq_page_cost);
402 :
403 : /* if NextSampleBlock is used, assume random access, else sequential */
404 612 : spc_page_cost = (tsm->NextSampleBlock != NULL) ?
405 306 : spc_random_page_cost : spc_seq_page_cost;
406 :
407 : /*
408 : * disk costs (recall that baserel->pages has already been set to the
409 : * number of pages the sampling method will visit)
410 : */
411 306 : run_cost += spc_page_cost * baserel->pages;
412 :
413 : /*
414 : * CPU costs (recall that baserel->tuples has already been set to the
415 : * number of tuples the sampling method will select). Note that we ignore
416 : * execution cost of the TABLESAMPLE parameter expressions; they will be
417 : * evaluated only once per scan, and in most usages they'll likely be
418 : * simple constants anyway. We also don't charge anything for the
419 : * calculations the sampling method might do internally.
420 : */
421 306 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
422 :
423 306 : startup_cost += qpqual_cost.startup;
424 306 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
425 306 : run_cost += cpu_per_tuple * baserel->tuples;
426 : /* tlist eval costs are paid per output row, not per tuple scanned */
427 306 : startup_cost += path->pathtarget->cost.startup;
428 306 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
429 :
430 306 : path->disabled_nodes = 0;
431 306 : path->startup_cost = startup_cost;
432 306 : path->total_cost = startup_cost + run_cost;
433 306 : }
434 :
435 : /*
436 : * cost_gather
437 : * Determines and returns the cost of gather path.
438 : *
439 : * 'rel' is the relation to be operated upon
440 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
441 : * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
442 : * both 'rel' and 'param_info'. This is useful when the path doesn't exactly
443 : * correspond to any particular RelOptInfo.
444 : */
445 : void
446 24462 : cost_gather(GatherPath *path, PlannerInfo *root,
447 : RelOptInfo *rel, ParamPathInfo *param_info,
448 : double *rows)
449 : {
450 24462 : Cost startup_cost = 0;
451 24462 : Cost run_cost = 0;
452 :
453 : /* Mark the path with the correct row estimate */
454 24462 : if (rows)
455 6084 : path->path.rows = *rows;
456 18378 : else if (param_info)
457 0 : path->path.rows = param_info->ppi_rows;
458 : else
459 18378 : path->path.rows = rel->rows;
460 :
461 24462 : startup_cost = path->subpath->startup_cost;
462 :
463 24462 : run_cost = path->subpath->total_cost - path->subpath->startup_cost;
464 :
465 : /* Parallel setup and communication cost. */
466 24462 : startup_cost += parallel_setup_cost;
467 24462 : run_cost += parallel_tuple_cost * path->path.rows;
468 :
469 24462 : path->path.disabled_nodes = path->subpath->disabled_nodes;
470 24462 : path->path.startup_cost = startup_cost;
471 24462 : path->path.total_cost = (startup_cost + run_cost);
472 24462 : }
473 :
474 : /*
475 : * cost_gather_merge
476 : * Determines and returns the cost of gather merge path.
477 : *
478 : * GatherMerge merges several pre-sorted input streams, using a heap that at
479 : * any given instant holds the next tuple from each stream. If there are N
480 : * streams, we need about N*log2(N) tuple comparisons to construct the heap at
481 : * startup, and then for each output tuple, about log2(N) comparisons to
482 : * replace the top heap entry with the next tuple from the same stream.
483 : */
484 : void
485 17538 : cost_gather_merge(GatherMergePath *path, PlannerInfo *root,
486 : RelOptInfo *rel, ParamPathInfo *param_info,
487 : int input_disabled_nodes,
488 : Cost input_startup_cost, Cost input_total_cost,
489 : double *rows)
490 : {
491 17538 : Cost startup_cost = 0;
492 17538 : Cost run_cost = 0;
493 : Cost comparison_cost;
494 : double N;
495 : double logN;
496 :
497 : /* Mark the path with the correct row estimate */
498 17538 : if (rows)
499 10924 : path->path.rows = *rows;
500 6614 : else if (param_info)
501 0 : path->path.rows = param_info->ppi_rows;
502 : else
503 6614 : path->path.rows = rel->rows;
504 :
505 : /*
506 : * Add one to the number of workers to account for the leader. This might
507 : * be overgenerous since the leader will do less work than other workers
508 : * in typical cases, but we'll go with it for now.
509 : */
510 : Assert(path->num_workers > 0);
511 17538 : N = (double) path->num_workers + 1;
512 17538 : logN = LOG2(N);
513 :
514 : /* Assumed cost per tuple comparison */
515 17538 : comparison_cost = 2.0 * cpu_operator_cost;
516 :
517 : /* Heap creation cost */
518 17538 : startup_cost += comparison_cost * N * logN;
519 :
520 : /* Per-tuple heap maintenance cost */
521 17538 : run_cost += path->path.rows * comparison_cost * logN;
522 :
523 : /* small cost for heap management, like cost_merge_append */
524 17538 : run_cost += cpu_operator_cost * path->path.rows;
525 :
526 : /*
527 : * Parallel setup and communication cost. Since Gather Merge, unlike
528 : * Gather, requires us to block until a tuple is available from every
529 : * worker, we bump the IPC cost up a little bit as compared with Gather.
530 : * For lack of a better idea, charge an extra 5%.
531 : */
532 17538 : startup_cost += parallel_setup_cost;
533 17538 : run_cost += parallel_tuple_cost * path->path.rows * 1.05;
534 :
535 17538 : path->path.disabled_nodes = input_disabled_nodes
536 17538 : + (enable_gathermerge ? 0 : 1);
537 17538 : path->path.startup_cost = startup_cost + input_startup_cost;
538 17538 : path->path.total_cost = (startup_cost + run_cost + input_total_cost);
539 17538 : }
540 :
541 : /*
542 : * cost_index
543 : * Determines and returns the cost of scanning a relation using an index.
544 : *
545 : * 'path' describes the indexscan under consideration, and is complete
546 : * except for the fields to be set by this routine
547 : * 'loop_count' is the number of repetitions of the indexscan to factor into
548 : * estimates of caching behavior
549 : *
550 : * In addition to rows, startup_cost and total_cost, cost_index() sets the
551 : * path's indextotalcost and indexselectivity fields. These values will be
552 : * needed if the IndexPath is used in a BitmapIndexScan.
553 : *
554 : * NOTE: path->indexquals must contain only clauses usable as index
555 : * restrictions. Any additional quals evaluated as qpquals may reduce the
556 : * number of returned tuples, but they won't reduce the number of tuples
557 : * we have to fetch from the table, so they don't reduce the scan cost.
558 : */
559 : void
560 800538 : cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
561 : bool partial_path)
562 : {
563 800538 : IndexOptInfo *index = path->indexinfo;
564 800538 : RelOptInfo *baserel = index->rel;
565 800538 : bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
566 : amcostestimate_function amcostestimate;
567 : List *qpquals;
568 800538 : Cost startup_cost = 0;
569 800538 : Cost run_cost = 0;
570 800538 : Cost cpu_run_cost = 0;
571 : Cost indexStartupCost;
572 : Cost indexTotalCost;
573 : Selectivity indexSelectivity;
574 : double indexCorrelation,
575 : csquared;
576 : double spc_seq_page_cost,
577 : spc_random_page_cost;
578 : Cost min_IO_cost,
579 : max_IO_cost;
580 : QualCost qpqual_cost;
581 : Cost cpu_per_tuple;
582 : double tuples_fetched;
583 : double pages_fetched;
584 : double rand_heap_pages;
585 : double index_pages;
586 :
587 : /* Should only be applied to base relations */
588 : Assert(IsA(baserel, RelOptInfo) &&
589 : IsA(index, IndexOptInfo));
590 : Assert(baserel->relid > 0);
591 : Assert(baserel->rtekind == RTE_RELATION);
592 :
593 : /*
594 : * Mark the path with the correct row estimate, and identify which quals
595 : * will need to be enforced as qpquals. We need not check any quals that
596 : * are implied by the index's predicate, so we can use indrestrictinfo not
597 : * baserestrictinfo as the list of relevant restriction clauses for the
598 : * rel.
599 : */
600 800538 : if (path->path.param_info)
601 : {
602 153764 : path->path.rows = path->path.param_info->ppi_rows;
603 : /* qpquals come from the rel's restriction clauses and ppi_clauses */
604 153764 : qpquals = list_concat(extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
605 : path->indexclauses),
606 153764 : extract_nonindex_conditions(path->path.param_info->ppi_clauses,
607 : path->indexclauses));
608 : }
609 : else
610 : {
611 646774 : path->path.rows = baserel->rows;
612 : /* qpquals come from just the rel's restriction clauses */
613 646774 : qpquals = extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
614 : path->indexclauses);
615 : }
616 :
617 : /* we don't need to check enable_indexonlyscan; indxpath.c does that */
618 800538 : path->path.disabled_nodes = enable_indexscan ? 0 : 1;
619 :
620 : /*
621 : * Call index-access-method-specific code to estimate the processing cost
622 : * for scanning the index, as well as the selectivity of the index (ie,
623 : * the fraction of main-table tuples we will have to retrieve) and its
624 : * correlation to the main-table tuple order. We need a cast here because
625 : * pathnodes.h uses a weak function type to avoid including amapi.h.
626 : */
627 800538 : amcostestimate = (amcostestimate_function) index->amcostestimate;
628 800538 : amcostestimate(root, path, loop_count,
629 : &indexStartupCost, &indexTotalCost,
630 : &indexSelectivity, &indexCorrelation,
631 : &index_pages);
632 :
633 : /*
634 : * Save amcostestimate's results for possible use in bitmap scan planning.
635 : * We don't bother to save indexStartupCost or indexCorrelation, because a
636 : * bitmap scan doesn't care about either.
637 : */
638 800538 : path->indextotalcost = indexTotalCost;
639 800538 : path->indexselectivity = indexSelectivity;
640 :
641 : /* all costs for touching index itself included here */
642 800538 : startup_cost += indexStartupCost;
643 800538 : run_cost += indexTotalCost - indexStartupCost;
644 :
645 : /* estimate number of main-table tuples fetched */
646 800538 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
647 :
648 : /* fetch estimated page costs for tablespace containing table */
649 800538 : get_tablespace_page_costs(baserel->reltablespace,
650 : &spc_random_page_cost,
651 : &spc_seq_page_cost);
652 :
653 : /*----------
654 : * Estimate number of main-table pages fetched, and compute I/O cost.
655 : *
656 : * When the index ordering is uncorrelated with the table ordering,
657 : * we use an approximation proposed by Mackert and Lohman (see
658 : * index_pages_fetched() for details) to compute the number of pages
659 : * fetched, and then charge spc_random_page_cost per page fetched.
660 : *
661 : * When the index ordering is exactly correlated with the table ordering
662 : * (just after a CLUSTER, for example), the number of pages fetched should
663 : * be exactly selectivity * table_size. What's more, all but the first
664 : * will be sequential fetches, not the random fetches that occur in the
665 : * uncorrelated case. So if the number of pages is more than 1, we
666 : * ought to charge
667 : * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
668 : * For partially-correlated indexes, we ought to charge somewhere between
669 : * these two estimates. We currently interpolate linearly between the
670 : * estimates based on the correlation squared (XXX is that appropriate?).
671 : *
672 : * If it's an index-only scan, then we will not need to fetch any heap
673 : * pages for which the visibility map shows all tuples are visible.
674 : * Hence, reduce the estimated number of heap fetches accordingly.
675 : * We use the measured fraction of the entire heap that is all-visible,
676 : * which might not be particularly relevant to the subset of the heap
677 : * that this query will fetch; but it's not clear how to do better.
678 : *----------
679 : */
680 800538 : if (loop_count > 1)
681 : {
682 : /*
683 : * For repeated indexscans, the appropriate estimate for the
684 : * uncorrelated case is to scale up the number of tuples fetched in
685 : * the Mackert and Lohman formula by the number of scans, so that we
686 : * estimate the number of pages fetched by all the scans; then
687 : * pro-rate the costs for one scan. In this case we assume all the
688 : * fetches are random accesses.
689 : */
690 88162 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
691 : baserel->pages,
692 88162 : (double) index->pages,
693 : root);
694 :
695 88162 : if (indexonly)
696 11158 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
697 :
698 88162 : rand_heap_pages = pages_fetched;
699 :
700 88162 : max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
701 :
702 : /*
703 : * In the perfectly correlated case, the number of pages touched by
704 : * each scan is selectivity * table_size, and we can use the Mackert
705 : * and Lohman formula at the page level to estimate how much work is
706 : * saved by caching across scans. We still assume all the fetches are
707 : * random, though, which is an overestimate that's hard to correct for
708 : * without double-counting the cache effects. (But in most cases
709 : * where such a plan is actually interesting, only one page would get
710 : * fetched per scan anyway, so it shouldn't matter much.)
711 : */
712 88162 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
713 :
714 88162 : pages_fetched = index_pages_fetched(pages_fetched * loop_count,
715 : baserel->pages,
716 88162 : (double) index->pages,
717 : root);
718 :
719 88162 : if (indexonly)
720 11158 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
721 :
722 88162 : min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
723 : }
724 : else
725 : {
726 : /*
727 : * Normal case: apply the Mackert and Lohman formula, and then
728 : * interpolate between that and the correlation-derived result.
729 : */
730 712376 : pages_fetched = index_pages_fetched(tuples_fetched,
731 : baserel->pages,
732 712376 : (double) index->pages,
733 : root);
734 :
735 712376 : if (indexonly)
736 69562 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
737 :
738 712376 : rand_heap_pages = pages_fetched;
739 :
740 : /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
741 712376 : max_IO_cost = pages_fetched * spc_random_page_cost;
742 :
743 : /* min_IO_cost is for the perfectly correlated case (csquared=1) */
744 712376 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
745 :
746 712376 : if (indexonly)
747 69562 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
748 :
749 712376 : if (pages_fetched > 0)
750 : {
751 635122 : min_IO_cost = spc_random_page_cost;
752 635122 : if (pages_fetched > 1)
753 185512 : min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
754 : }
755 : else
756 77254 : min_IO_cost = 0;
757 : }
758 :
759 800538 : if (partial_path)
760 : {
761 : /*
762 : * For index only scans compute workers based on number of index pages
763 : * fetched; the number of heap pages we fetch might be so small as to
764 : * effectively rule out parallelism, which we don't want to do.
765 : */
766 276416 : if (indexonly)
767 25554 : rand_heap_pages = -1;
768 :
769 : /*
770 : * Estimate the number of parallel workers required to scan index. Use
771 : * the number of heap pages computed considering heap fetches won't be
772 : * sequential as for parallel scans the pages are accessed in random
773 : * order.
774 : */
775 276416 : path->path.parallel_workers = compute_parallel_worker(baserel,
776 : rand_heap_pages,
777 : index_pages,
778 : max_parallel_workers_per_gather);
779 :
780 : /*
781 : * Fall out if workers can't be assigned for parallel scan, because in
782 : * such a case this path will be rejected. So there is no benefit in
783 : * doing extra computation.
784 : */
785 276416 : if (path->path.parallel_workers <= 0)
786 266292 : return;
787 :
788 10124 : path->path.parallel_aware = true;
789 : }
790 :
791 : /*
792 : * Now interpolate based on estimated index order correlation to get total
793 : * disk I/O cost for main table accesses.
794 : */
795 534246 : csquared = indexCorrelation * indexCorrelation;
796 :
797 534246 : run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
798 :
799 : /*
800 : * Estimate CPU costs per tuple.
801 : *
802 : * What we want here is cpu_tuple_cost plus the evaluation costs of any
803 : * qual clauses that we have to evaluate as qpquals.
804 : */
805 534246 : cost_qual_eval(&qpqual_cost, qpquals, root);
806 :
807 534246 : startup_cost += qpqual_cost.startup;
808 534246 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
809 :
810 534246 : cpu_run_cost += cpu_per_tuple * tuples_fetched;
811 :
812 : /* tlist eval costs are paid per output row, not per tuple scanned */
813 534246 : startup_cost += path->path.pathtarget->cost.startup;
814 534246 : cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
815 :
816 : /* Adjust costing for parallelism, if used. */
817 534246 : if (path->path.parallel_workers > 0)
818 : {
819 10124 : double parallel_divisor = get_parallel_divisor(&path->path);
820 :
821 10124 : path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
822 :
823 : /* The CPU cost is divided among all the workers. */
824 10124 : cpu_run_cost /= parallel_divisor;
825 : }
826 :
827 534246 : run_cost += cpu_run_cost;
828 :
829 534246 : path->path.startup_cost = startup_cost;
830 534246 : path->path.total_cost = startup_cost + run_cost;
831 : }
832 :
833 : /*
834 : * extract_nonindex_conditions
835 : *
836 : * Given a list of quals to be enforced in an indexscan, extract the ones that
837 : * will have to be applied as qpquals (ie, the index machinery won't handle
838 : * them). Here we detect only whether a qual clause is directly redundant
839 : * with some indexclause. If the index path is chosen for use, createplan.c
840 : * will try a bit harder to get rid of redundant qual conditions; specifically
841 : * it will see if quals can be proven to be implied by the indexquals. But
842 : * it does not seem worth the cycles to try to factor that in at this stage,
843 : * since we're only trying to estimate qual eval costs. Otherwise this must
844 : * match the logic in create_indexscan_plan().
845 : *
846 : * qual_clauses, and the result, are lists of RestrictInfos.
847 : * indexclauses is a list of IndexClauses.
848 : */
849 : static List *
850 954302 : extract_nonindex_conditions(List *qual_clauses, List *indexclauses)
851 : {
852 954302 : List *result = NIL;
853 : ListCell *lc;
854 :
855 1981596 : foreach(lc, qual_clauses)
856 : {
857 1027294 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
858 :
859 1027294 : if (rinfo->pseudoconstant)
860 10106 : continue; /* we may drop pseudoconstants here */
861 1017188 : if (is_redundant_with_indexclauses(rinfo, indexclauses))
862 603274 : continue; /* dup or derived from same EquivalenceClass */
863 : /* ... skip the predicate proof attempt createplan.c will try ... */
864 413914 : result = lappend(result, rinfo);
865 : }
866 954302 : return result;
867 : }
868 :
869 : /*
870 : * index_pages_fetched
871 : * Estimate the number of pages actually fetched after accounting for
872 : * cache effects.
873 : *
874 : * We use an approximation proposed by Mackert and Lohman, "Index Scans
875 : * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
876 : * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
877 : * The Mackert and Lohman approximation is that the number of pages
878 : * fetched is
879 : * PF =
880 : * min(2TNs/(2T+Ns), T) when T <= b
881 : * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
882 : * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
883 : * where
884 : * T = # pages in table
885 : * N = # tuples in table
886 : * s = selectivity = fraction of table to be scanned
887 : * b = # buffer pages available (we include kernel space here)
888 : *
889 : * We assume that effective_cache_size is the total number of buffer pages
890 : * available for the whole query, and pro-rate that space across all the
891 : * tables in the query and the index currently under consideration. (This
892 : * ignores space needed for other indexes used by the query, but since we
893 : * don't know which indexes will get used, we can't estimate that very well;
894 : * and in any case counting all the tables may well be an overestimate, since
895 : * depending on the join plan not all the tables may be scanned concurrently.)
896 : *
897 : * The product Ns is the number of tuples fetched; we pass in that
898 : * product rather than calculating it here. "pages" is the number of pages
899 : * in the object under consideration (either an index or a table).
900 : * "index_pages" is the amount to add to the total table space, which was
901 : * computed for us by make_one_rel.
902 : *
903 : * Caller is expected to have ensured that tuples_fetched is greater than zero
904 : * and rounded to integer (see clamp_row_est). The result will likewise be
905 : * greater than zero and integral.
906 : */
907 : double
908 1126672 : index_pages_fetched(double tuples_fetched, BlockNumber pages,
909 : double index_pages, PlannerInfo *root)
910 : {
911 : double pages_fetched;
912 : double total_pages;
913 : double T,
914 : b;
915 :
916 : /* T is # pages in table, but don't allow it to be zero */
917 1126672 : T = (pages > 1) ? (double) pages : 1.0;
918 :
919 : /* Compute number of pages assumed to be competing for cache space */
920 1126672 : total_pages = root->total_table_pages + index_pages;
921 1126672 : total_pages = Max(total_pages, 1.0);
922 : Assert(T <= total_pages);
923 :
924 : /* b is pro-rated share of effective_cache_size */
925 1126672 : b = (double) effective_cache_size * T / total_pages;
926 :
927 : /* force it positive and integral */
928 1126672 : if (b <= 1.0)
929 0 : b = 1.0;
930 : else
931 1126672 : b = ceil(b);
932 :
933 : /* This part is the Mackert and Lohman formula */
934 1126672 : if (T <= b)
935 : {
936 1126672 : pages_fetched =
937 1126672 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
938 1126672 : if (pages_fetched >= T)
939 654448 : pages_fetched = T;
940 : else
941 472224 : pages_fetched = ceil(pages_fetched);
942 : }
943 : else
944 : {
945 : double lim;
946 :
947 0 : lim = (2.0 * T * b) / (2.0 * T - b);
948 0 : if (tuples_fetched <= lim)
949 : {
950 0 : pages_fetched =
951 0 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
952 : }
953 : else
954 : {
955 0 : pages_fetched =
956 0 : b + (tuples_fetched - lim) * (T - b) / T;
957 : }
958 0 : pages_fetched = ceil(pages_fetched);
959 : }
960 1126672 : return pages_fetched;
961 : }
962 :
963 : /*
964 : * get_indexpath_pages
965 : * Determine the total size of the indexes used in a bitmap index path.
966 : *
967 : * Note: if the same index is used more than once in a bitmap tree, we will
968 : * count it multiple times, which perhaps is the wrong thing ... but it's
969 : * not completely clear, and detecting duplicates is difficult, so ignore it
970 : * for now.
971 : */
972 : static double
973 190134 : get_indexpath_pages(Path *bitmapqual)
974 : {
975 190134 : double result = 0;
976 : ListCell *l;
977 :
978 190134 : if (IsA(bitmapqual, BitmapAndPath))
979 : {
980 22734 : BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
981 :
982 68202 : foreach(l, apath->bitmapquals)
983 : {
984 45468 : result += get_indexpath_pages((Path *) lfirst(l));
985 : }
986 : }
987 167400 : else if (IsA(bitmapqual, BitmapOrPath))
988 : {
989 70 : BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
990 :
991 222 : foreach(l, opath->bitmapquals)
992 : {
993 152 : result += get_indexpath_pages((Path *) lfirst(l));
994 : }
995 : }
996 167330 : else if (IsA(bitmapqual, IndexPath))
997 : {
998 167330 : IndexPath *ipath = (IndexPath *) bitmapqual;
999 :
1000 167330 : result = (double) ipath->indexinfo->pages;
1001 : }
1002 : else
1003 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
1004 :
1005 190134 : return result;
1006 : }
1007 :
1008 : /*
1009 : * cost_bitmap_heap_scan
1010 : * Determines and returns the cost of scanning a relation using a bitmap
1011 : * index-then-heap plan.
1012 : *
1013 : * 'baserel' is the relation to be scanned
1014 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1015 : * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
1016 : * 'loop_count' is the number of repetitions of the indexscan to factor into
1017 : * estimates of caching behavior
1018 : *
1019 : * Note: the component IndexPaths in bitmapqual should have been costed
1020 : * using the same loop_count.
1021 : */
1022 : void
1023 543892 : cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
1024 : ParamPathInfo *param_info,
1025 : Path *bitmapqual, double loop_count)
1026 : {
1027 543892 : Cost startup_cost = 0;
1028 543892 : Cost run_cost = 0;
1029 : Cost indexTotalCost;
1030 : QualCost qpqual_cost;
1031 : Cost cpu_per_tuple;
1032 : Cost cost_per_page;
1033 : Cost cpu_run_cost;
1034 : double tuples_fetched;
1035 : double pages_fetched;
1036 : double spc_seq_page_cost,
1037 : spc_random_page_cost;
1038 : double T;
1039 :
1040 : /* Should only be applied to base relations */
1041 : Assert(IsA(baserel, RelOptInfo));
1042 : Assert(baserel->relid > 0);
1043 : Assert(baserel->rtekind == RTE_RELATION);
1044 :
1045 : /* Mark the path with the correct row estimate */
1046 543892 : if (param_info)
1047 232110 : path->rows = param_info->ppi_rows;
1048 : else
1049 311782 : path->rows = baserel->rows;
1050 :
1051 543892 : pages_fetched = compute_bitmap_pages(root, baserel, bitmapqual,
1052 : loop_count, &indexTotalCost,
1053 : &tuples_fetched);
1054 :
1055 543892 : startup_cost += indexTotalCost;
1056 543892 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
1057 :
1058 : /* Fetch estimated page costs for tablespace containing table. */
1059 543892 : get_tablespace_page_costs(baserel->reltablespace,
1060 : &spc_random_page_cost,
1061 : &spc_seq_page_cost);
1062 :
1063 : /*
1064 : * For small numbers of pages we should charge spc_random_page_cost
1065 : * apiece, while if nearly all the table's pages are being read, it's more
1066 : * appropriate to charge spc_seq_page_cost apiece. The effect is
1067 : * nonlinear, too. For lack of a better idea, interpolate like this to
1068 : * determine the cost per page.
1069 : */
1070 543892 : if (pages_fetched >= 2.0)
1071 110788 : cost_per_page = spc_random_page_cost -
1072 110788 : (spc_random_page_cost - spc_seq_page_cost)
1073 110788 : * sqrt(pages_fetched / T);
1074 : else
1075 433104 : cost_per_page = spc_random_page_cost;
1076 :
1077 543892 : run_cost += pages_fetched * cost_per_page;
1078 :
1079 : /*
1080 : * Estimate CPU costs per tuple.
1081 : *
1082 : * Often the indexquals don't need to be rechecked at each tuple ... but
1083 : * not always, especially not if there are enough tuples involved that the
1084 : * bitmaps become lossy. For the moment, just assume they will be
1085 : * rechecked always. This means we charge the full freight for all the
1086 : * scan clauses.
1087 : */
1088 543892 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1089 :
1090 543892 : startup_cost += qpqual_cost.startup;
1091 543892 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1092 543892 : cpu_run_cost = cpu_per_tuple * tuples_fetched;
1093 :
1094 : /* Adjust costing for parallelism, if used. */
1095 543892 : if (path->parallel_workers > 0)
1096 : {
1097 4188 : double parallel_divisor = get_parallel_divisor(path);
1098 :
1099 : /* The CPU cost is divided among all the workers. */
1100 4188 : cpu_run_cost /= parallel_divisor;
1101 :
1102 4188 : path->rows = clamp_row_est(path->rows / parallel_divisor);
1103 : }
1104 :
1105 :
1106 543892 : run_cost += cpu_run_cost;
1107 :
1108 : /* tlist eval costs are paid per output row, not per tuple scanned */
1109 543892 : startup_cost += path->pathtarget->cost.startup;
1110 543892 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1111 :
1112 543892 : path->disabled_nodes = enable_bitmapscan ? 0 : 1;
1113 543892 : path->startup_cost = startup_cost;
1114 543892 : path->total_cost = startup_cost + run_cost;
1115 543892 : }
1116 :
1117 : /*
1118 : * cost_bitmap_tree_node
1119 : * Extract cost and selectivity from a bitmap tree node (index/and/or)
1120 : */
1121 : void
1122 1008814 : cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
1123 : {
1124 1008814 : if (IsA(path, IndexPath))
1125 : {
1126 954952 : *cost = ((IndexPath *) path)->indextotalcost;
1127 954952 : *selec = ((IndexPath *) path)->indexselectivity;
1128 :
1129 : /*
1130 : * Charge a small amount per retrieved tuple to reflect the costs of
1131 : * manipulating the bitmap. This is mostly to make sure that a bitmap
1132 : * scan doesn't look to be the same cost as an indexscan to retrieve a
1133 : * single tuple.
1134 : */
1135 954952 : *cost += 0.1 * cpu_operator_cost * path->rows;
1136 : }
1137 53862 : else if (IsA(path, BitmapAndPath))
1138 : {
1139 50326 : *cost = path->total_cost;
1140 50326 : *selec = ((BitmapAndPath *) path)->bitmapselectivity;
1141 : }
1142 3536 : else if (IsA(path, BitmapOrPath))
1143 : {
1144 3536 : *cost = path->total_cost;
1145 3536 : *selec = ((BitmapOrPath *) path)->bitmapselectivity;
1146 : }
1147 : else
1148 : {
1149 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(path));
1150 : *cost = *selec = 0; /* keep compiler quiet */
1151 : }
1152 1008814 : }
1153 :
1154 : /*
1155 : * cost_bitmap_and_node
1156 : * Estimate the cost of a BitmapAnd node
1157 : *
1158 : * Note that this considers only the costs of index scanning and bitmap
1159 : * creation, not the eventual heap access. In that sense the object isn't
1160 : * truly a Path, but it has enough path-like properties (costs in particular)
1161 : * to warrant treating it as one. We don't bother to set the path rows field,
1162 : * however.
1163 : */
1164 : void
1165 50120 : cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
1166 : {
1167 : Cost totalCost;
1168 : Selectivity selec;
1169 : ListCell *l;
1170 :
1171 : /*
1172 : * We estimate AND selectivity on the assumption that the inputs are
1173 : * independent. This is probably often wrong, but we don't have the info
1174 : * to do better.
1175 : *
1176 : * The runtime cost of the BitmapAnd itself is estimated at 100x
1177 : * cpu_operator_cost for each tbm_intersect needed. Probably too small,
1178 : * definitely too simplistic?
1179 : */
1180 50120 : totalCost = 0.0;
1181 50120 : selec = 1.0;
1182 150360 : foreach(l, path->bitmapquals)
1183 : {
1184 100240 : Path *subpath = (Path *) lfirst(l);
1185 : Cost subCost;
1186 : Selectivity subselec;
1187 :
1188 100240 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1189 :
1190 100240 : selec *= subselec;
1191 :
1192 100240 : totalCost += subCost;
1193 100240 : if (l != list_head(path->bitmapquals))
1194 50120 : totalCost += 100.0 * cpu_operator_cost;
1195 : }
1196 50120 : path->bitmapselectivity = selec;
1197 50120 : path->path.rows = 0; /* per above, not used */
1198 50120 : path->path.disabled_nodes = 0;
1199 50120 : path->path.startup_cost = totalCost;
1200 50120 : path->path.total_cost = totalCost;
1201 50120 : }
1202 :
1203 : /*
1204 : * cost_bitmap_or_node
1205 : * Estimate the cost of a BitmapOr node
1206 : *
1207 : * See comments for cost_bitmap_and_node.
1208 : */
1209 : void
1210 1016 : cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
1211 : {
1212 : Cost totalCost;
1213 : Selectivity selec;
1214 : ListCell *l;
1215 :
1216 : /*
1217 : * We estimate OR selectivity on the assumption that the inputs are
1218 : * non-overlapping, since that's often the case in "x IN (list)" type
1219 : * situations. Of course, we clamp to 1.0 at the end.
1220 : *
1221 : * The runtime cost of the BitmapOr itself is estimated at 100x
1222 : * cpu_operator_cost for each tbm_union needed. Probably too small,
1223 : * definitely too simplistic? We are aware that the tbm_unions are
1224 : * optimized out when the inputs are BitmapIndexScans.
1225 : */
1226 1016 : totalCost = 0.0;
1227 1016 : selec = 0.0;
1228 2850 : foreach(l, path->bitmapquals)
1229 : {
1230 1834 : Path *subpath = (Path *) lfirst(l);
1231 : Cost subCost;
1232 : Selectivity subselec;
1233 :
1234 1834 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1235 :
1236 1834 : selec += subselec;
1237 :
1238 1834 : totalCost += subCost;
1239 1834 : if (l != list_head(path->bitmapquals) &&
1240 818 : !IsA(subpath, IndexPath))
1241 0 : totalCost += 100.0 * cpu_operator_cost;
1242 : }
1243 1016 : path->bitmapselectivity = Min(selec, 1.0);
1244 1016 : path->path.rows = 0; /* per above, not used */
1245 1016 : path->path.startup_cost = totalCost;
1246 1016 : path->path.total_cost = totalCost;
1247 1016 : }
1248 :
1249 : /*
1250 : * cost_tidscan
1251 : * Determines and returns the cost of scanning a relation using TIDs.
1252 : *
1253 : * 'baserel' is the relation to be scanned
1254 : * 'tidquals' is the list of TID-checkable quals
1255 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1256 : */
1257 : void
1258 872 : cost_tidscan(Path *path, PlannerInfo *root,
1259 : RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
1260 : {
1261 872 : Cost startup_cost = 0;
1262 872 : Cost run_cost = 0;
1263 : QualCost qpqual_cost;
1264 : Cost cpu_per_tuple;
1265 : QualCost tid_qual_cost;
1266 : double ntuples;
1267 : ListCell *l;
1268 : double spc_random_page_cost;
1269 :
1270 : /* Should only be applied to base relations */
1271 : Assert(baserel->relid > 0);
1272 : Assert(baserel->rtekind == RTE_RELATION);
1273 : Assert(tidquals != NIL);
1274 :
1275 : /* Mark the path with the correct row estimate */
1276 872 : if (param_info)
1277 144 : path->rows = param_info->ppi_rows;
1278 : else
1279 728 : path->rows = baserel->rows;
1280 :
1281 : /* Count how many tuples we expect to retrieve */
1282 872 : ntuples = 0;
1283 1770 : foreach(l, tidquals)
1284 : {
1285 898 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
1286 898 : Expr *qual = rinfo->clause;
1287 :
1288 : /*
1289 : * We must use a TID scan for CurrentOfExpr; in any other case, we
1290 : * should be generating a TID scan only if enable_tidscan=true. Also,
1291 : * if CurrentOfExpr is the qual, there should be only one.
1292 : */
1293 : Assert(enable_tidscan || IsA(qual, CurrentOfExpr));
1294 : Assert(list_length(tidquals) == 1 || !IsA(qual, CurrentOfExpr));
1295 :
1296 898 : if (IsA(qual, ScalarArrayOpExpr))
1297 : {
1298 : /* Each element of the array yields 1 tuple */
1299 50 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) qual;
1300 50 : Node *arraynode = (Node *) lsecond(saop->args);
1301 :
1302 50 : ntuples += estimate_array_length(root, arraynode);
1303 : }
1304 848 : else if (IsA(qual, CurrentOfExpr))
1305 : {
1306 : /* CURRENT OF yields 1 tuple */
1307 404 : ntuples++;
1308 : }
1309 : else
1310 : {
1311 : /* It's just CTID = something, count 1 tuple */
1312 444 : ntuples++;
1313 : }
1314 : }
1315 :
1316 : /*
1317 : * The TID qual expressions will be computed once, any other baserestrict
1318 : * quals once per retrieved tuple.
1319 : */
1320 872 : cost_qual_eval(&tid_qual_cost, tidquals, root);
1321 :
1322 : /* fetch estimated page cost for tablespace containing table */
1323 872 : get_tablespace_page_costs(baserel->reltablespace,
1324 : &spc_random_page_cost,
1325 : NULL);
1326 :
1327 : /* disk costs --- assume each tuple on a different page */
1328 872 : run_cost += spc_random_page_cost * ntuples;
1329 :
1330 : /* Add scanning CPU costs */
1331 872 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1332 :
1333 : /* XXX currently we assume TID quals are a subset of qpquals */
1334 872 : startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1335 872 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1336 872 : tid_qual_cost.per_tuple;
1337 872 : run_cost += cpu_per_tuple * ntuples;
1338 :
1339 : /* tlist eval costs are paid per output row, not per tuple scanned */
1340 872 : startup_cost += path->pathtarget->cost.startup;
1341 872 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1342 :
1343 : /*
1344 : * There are assertions above verifying that we only reach this function
1345 : * either when enable_tidscan=true or when the TID scan is the only legal
1346 : * path, so it's safe to set disabled_nodes to zero here.
1347 : */
1348 872 : path->disabled_nodes = 0;
1349 872 : path->startup_cost = startup_cost;
1350 872 : path->total_cost = startup_cost + run_cost;
1351 872 : }
1352 :
1353 : /*
1354 : * cost_tidrangescan
1355 : * Determines and sets the costs of scanning a relation using a range of
1356 : * TIDs for 'path'
1357 : *
1358 : * 'baserel' is the relation to be scanned
1359 : * 'tidrangequals' is the list of TID-checkable range quals
1360 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1361 : */
1362 : void
1363 1944 : cost_tidrangescan(Path *path, PlannerInfo *root,
1364 : RelOptInfo *baserel, List *tidrangequals,
1365 : ParamPathInfo *param_info)
1366 : {
1367 : Selectivity selectivity;
1368 : double pages;
1369 1944 : Cost startup_cost = 0;
1370 1944 : Cost run_cost = 0;
1371 : QualCost qpqual_cost;
1372 : Cost cpu_per_tuple;
1373 : QualCost tid_qual_cost;
1374 : double ntuples;
1375 : double nseqpages;
1376 : double spc_random_page_cost;
1377 : double spc_seq_page_cost;
1378 :
1379 : /* Should only be applied to base relations */
1380 : Assert(baserel->relid > 0);
1381 : Assert(baserel->rtekind == RTE_RELATION);
1382 :
1383 : /* Mark the path with the correct row estimate */
1384 1944 : if (param_info)
1385 0 : path->rows = param_info->ppi_rows;
1386 : else
1387 1944 : path->rows = baserel->rows;
1388 :
1389 : /* Count how many tuples and pages we expect to scan */
1390 1944 : selectivity = clauselist_selectivity(root, tidrangequals, baserel->relid,
1391 : JOIN_INNER, NULL);
1392 1944 : pages = ceil(selectivity * baserel->pages);
1393 :
1394 1944 : if (pages <= 0.0)
1395 42 : pages = 1.0;
1396 :
1397 : /*
1398 : * The first page in a range requires a random seek, but each subsequent
1399 : * page is just a normal sequential page read. NOTE: it's desirable for
1400 : * TID Range Scans to cost more than the equivalent Sequential Scans,
1401 : * because Seq Scans have some performance advantages such as scan
1402 : * synchronization and parallelizability, and we'd prefer one of them to
1403 : * be picked unless a TID Range Scan really is better.
1404 : */
1405 1944 : ntuples = selectivity * baserel->tuples;
1406 1944 : nseqpages = pages - 1.0;
1407 :
1408 : /*
1409 : * The TID qual expressions will be computed once, any other baserestrict
1410 : * quals once per retrieved tuple.
1411 : */
1412 1944 : cost_qual_eval(&tid_qual_cost, tidrangequals, root);
1413 :
1414 : /* fetch estimated page cost for tablespace containing table */
1415 1944 : get_tablespace_page_costs(baserel->reltablespace,
1416 : &spc_random_page_cost,
1417 : &spc_seq_page_cost);
1418 :
1419 : /* disk costs; 1 random page and the remainder as seq pages */
1420 1944 : run_cost += spc_random_page_cost + spc_seq_page_cost * nseqpages;
1421 :
1422 : /* Add scanning CPU costs */
1423 1944 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1424 :
1425 : /*
1426 : * XXX currently we assume TID quals are a subset of qpquals at this
1427 : * point; they will be removed (if possible) when we create the plan, so
1428 : * we subtract their cost from the total qpqual cost. (If the TID quals
1429 : * can't be removed, this is a mistake and we're going to underestimate
1430 : * the CPU cost a bit.)
1431 : */
1432 1944 : startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1433 1944 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1434 1944 : tid_qual_cost.per_tuple;
1435 1944 : run_cost += cpu_per_tuple * ntuples;
1436 :
1437 : /* tlist eval costs are paid per output row, not per tuple scanned */
1438 1944 : startup_cost += path->pathtarget->cost.startup;
1439 1944 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1440 :
1441 : /* we should not generate this path type when enable_tidscan=false */
1442 : Assert(enable_tidscan);
1443 1944 : path->disabled_nodes = 0;
1444 1944 : path->startup_cost = startup_cost;
1445 1944 : path->total_cost = startup_cost + run_cost;
1446 1944 : }
1447 :
1448 : /*
1449 : * cost_subqueryscan
1450 : * Determines and returns the cost of scanning a subquery RTE.
1451 : *
1452 : * 'baserel' is the relation to be scanned
1453 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1454 : * 'trivial_pathtarget' is true if the pathtarget is believed to be trivial.
1455 : */
1456 : void
1457 53882 : cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root,
1458 : RelOptInfo *baserel, ParamPathInfo *param_info,
1459 : bool trivial_pathtarget)
1460 : {
1461 : Cost startup_cost;
1462 : Cost run_cost;
1463 : List *qpquals;
1464 : QualCost qpqual_cost;
1465 : Cost cpu_per_tuple;
1466 :
1467 : /* Should only be applied to base relations that are subqueries */
1468 : Assert(baserel->relid > 0);
1469 : Assert(baserel->rtekind == RTE_SUBQUERY);
1470 :
1471 : /*
1472 : * We compute the rowcount estimate as the subplan's estimate times the
1473 : * selectivity of relevant restriction clauses. In simple cases this will
1474 : * come out the same as baserel->rows; but when dealing with parallelized
1475 : * paths we must do it like this to get the right answer.
1476 : */
1477 53882 : if (param_info)
1478 606 : qpquals = list_concat_copy(param_info->ppi_clauses,
1479 606 : baserel->baserestrictinfo);
1480 : else
1481 53276 : qpquals = baserel->baserestrictinfo;
1482 :
1483 53882 : path->path.rows = clamp_row_est(path->subpath->rows *
1484 53882 : clauselist_selectivity(root,
1485 : qpquals,
1486 : 0,
1487 : JOIN_INNER,
1488 : NULL));
1489 :
1490 : /*
1491 : * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1492 : * any restriction clauses and tlist that will be attached to the
1493 : * SubqueryScan node, plus cpu_tuple_cost to account for selection and
1494 : * projection overhead.
1495 : */
1496 53882 : path->path.disabled_nodes = path->subpath->disabled_nodes;
1497 53882 : path->path.startup_cost = path->subpath->startup_cost;
1498 53882 : path->path.total_cost = path->subpath->total_cost;
1499 :
1500 : /*
1501 : * However, if there are no relevant restriction clauses and the
1502 : * pathtarget is trivial, then we expect that setrefs.c will optimize away
1503 : * the SubqueryScan plan node altogether, so we should just make its cost
1504 : * and rowcount equal to the input path's.
1505 : *
1506 : * Note: there are some edge cases where createplan.c will apply a
1507 : * different targetlist to the SubqueryScan node, thus falsifying our
1508 : * current estimate of whether the target is trivial, and making the cost
1509 : * estimate (though not the rowcount) wrong. It does not seem worth the
1510 : * extra complication to try to account for that exactly, especially since
1511 : * that behavior falsifies other cost estimates as well.
1512 : */
1513 53882 : if (qpquals == NIL && trivial_pathtarget)
1514 25220 : return;
1515 :
1516 28662 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1517 :
1518 28662 : startup_cost = qpqual_cost.startup;
1519 28662 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1520 28662 : run_cost = cpu_per_tuple * path->subpath->rows;
1521 :
1522 : /* tlist eval costs are paid per output row, not per tuple scanned */
1523 28662 : startup_cost += path->path.pathtarget->cost.startup;
1524 28662 : run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
1525 :
1526 28662 : path->path.startup_cost += startup_cost;
1527 28662 : path->path.total_cost += startup_cost + run_cost;
1528 : }
1529 :
1530 : /*
1531 : * cost_functionscan
1532 : * Determines and returns the cost of scanning a function RTE.
1533 : *
1534 : * 'baserel' is the relation to be scanned
1535 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1536 : */
1537 : void
1538 51268 : cost_functionscan(Path *path, PlannerInfo *root,
1539 : RelOptInfo *baserel, ParamPathInfo *param_info)
1540 : {
1541 51268 : Cost startup_cost = 0;
1542 51268 : Cost run_cost = 0;
1543 : QualCost qpqual_cost;
1544 : Cost cpu_per_tuple;
1545 : RangeTblEntry *rte;
1546 : QualCost exprcost;
1547 :
1548 : /* Should only be applied to base relations that are functions */
1549 : Assert(baserel->relid > 0);
1550 51268 : rte = planner_rt_fetch(baserel->relid, root);
1551 : Assert(rte->rtekind == RTE_FUNCTION);
1552 :
1553 : /* Mark the path with the correct row estimate */
1554 51268 : if (param_info)
1555 8210 : path->rows = param_info->ppi_rows;
1556 : else
1557 43058 : path->rows = baserel->rows;
1558 :
1559 : /*
1560 : * Estimate costs of executing the function expression(s).
1561 : *
1562 : * Currently, nodeFunctionscan.c always executes the functions to
1563 : * completion before returning any rows, and caches the results in a
1564 : * tuplestore. So the function eval cost is all startup cost, and per-row
1565 : * costs are minimal.
1566 : *
1567 : * XXX in principle we ought to charge tuplestore spill costs if the
1568 : * number of rows is large. However, given how phony our rowcount
1569 : * estimates for functions tend to be, there's not a lot of point in that
1570 : * refinement right now.
1571 : */
1572 51268 : cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1573 :
1574 51268 : startup_cost += exprcost.startup + exprcost.per_tuple;
1575 :
1576 : /* Add scanning CPU costs */
1577 51268 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1578 :
1579 51268 : startup_cost += qpqual_cost.startup;
1580 51268 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1581 51268 : run_cost += cpu_per_tuple * baserel->tuples;
1582 :
1583 : /* tlist eval costs are paid per output row, not per tuple scanned */
1584 51268 : startup_cost += path->pathtarget->cost.startup;
1585 51268 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1586 :
1587 51268 : path->disabled_nodes = 0;
1588 51268 : path->startup_cost = startup_cost;
1589 51268 : path->total_cost = startup_cost + run_cost;
1590 51268 : }
1591 :
1592 : /*
1593 : * cost_tablefuncscan
1594 : * Determines and returns the cost of scanning a table function.
1595 : *
1596 : * 'baserel' is the relation to be scanned
1597 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1598 : */
1599 : void
1600 626 : cost_tablefuncscan(Path *path, PlannerInfo *root,
1601 : RelOptInfo *baserel, ParamPathInfo *param_info)
1602 : {
1603 626 : Cost startup_cost = 0;
1604 626 : Cost run_cost = 0;
1605 : QualCost qpqual_cost;
1606 : Cost cpu_per_tuple;
1607 : RangeTblEntry *rte;
1608 : QualCost exprcost;
1609 :
1610 : /* Should only be applied to base relations that are functions */
1611 : Assert(baserel->relid > 0);
1612 626 : rte = planner_rt_fetch(baserel->relid, root);
1613 : Assert(rte->rtekind == RTE_TABLEFUNC);
1614 :
1615 : /* Mark the path with the correct row estimate */
1616 626 : if (param_info)
1617 234 : path->rows = param_info->ppi_rows;
1618 : else
1619 392 : path->rows = baserel->rows;
1620 :
1621 : /*
1622 : * Estimate costs of executing the table func expression(s).
1623 : *
1624 : * XXX in principle we ought to charge tuplestore spill costs if the
1625 : * number of rows is large. However, given how phony our rowcount
1626 : * estimates for tablefuncs tend to be, there's not a lot of point in that
1627 : * refinement right now.
1628 : */
1629 626 : cost_qual_eval_node(&exprcost, (Node *) rte->tablefunc, root);
1630 :
1631 626 : startup_cost += exprcost.startup + exprcost.per_tuple;
1632 :
1633 : /* Add scanning CPU costs */
1634 626 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1635 :
1636 626 : startup_cost += qpqual_cost.startup;
1637 626 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1638 626 : run_cost += cpu_per_tuple * baserel->tuples;
1639 :
1640 : /* tlist eval costs are paid per output row, not per tuple scanned */
1641 626 : startup_cost += path->pathtarget->cost.startup;
1642 626 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1643 :
1644 626 : path->disabled_nodes = 0;
1645 626 : path->startup_cost = startup_cost;
1646 626 : path->total_cost = startup_cost + run_cost;
1647 626 : }
1648 :
1649 : /*
1650 : * cost_valuesscan
1651 : * Determines and returns the cost of scanning a VALUES RTE.
1652 : *
1653 : * 'baserel' is the relation to be scanned
1654 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1655 : */
1656 : void
1657 8264 : cost_valuesscan(Path *path, PlannerInfo *root,
1658 : RelOptInfo *baserel, ParamPathInfo *param_info)
1659 : {
1660 8264 : Cost startup_cost = 0;
1661 8264 : Cost run_cost = 0;
1662 : QualCost qpqual_cost;
1663 : Cost cpu_per_tuple;
1664 :
1665 : /* Should only be applied to base relations that are values lists */
1666 : Assert(baserel->relid > 0);
1667 : Assert(baserel->rtekind == RTE_VALUES);
1668 :
1669 : /* Mark the path with the correct row estimate */
1670 8264 : if (param_info)
1671 66 : path->rows = param_info->ppi_rows;
1672 : else
1673 8198 : path->rows = baserel->rows;
1674 :
1675 : /*
1676 : * For now, estimate list evaluation cost at one operator eval per list
1677 : * (probably pretty bogus, but is it worth being smarter?)
1678 : */
1679 8264 : cpu_per_tuple = cpu_operator_cost;
1680 :
1681 : /* Add scanning CPU costs */
1682 8264 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1683 :
1684 8264 : startup_cost += qpqual_cost.startup;
1685 8264 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1686 8264 : run_cost += cpu_per_tuple * baserel->tuples;
1687 :
1688 : /* tlist eval costs are paid per output row, not per tuple scanned */
1689 8264 : startup_cost += path->pathtarget->cost.startup;
1690 8264 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1691 :
1692 8264 : path->disabled_nodes = 0;
1693 8264 : path->startup_cost = startup_cost;
1694 8264 : path->total_cost = startup_cost + run_cost;
1695 8264 : }
1696 :
1697 : /*
1698 : * cost_ctescan
1699 : * Determines and returns the cost of scanning a CTE RTE.
1700 : *
1701 : * Note: this is used for both self-reference and regular CTEs; the
1702 : * possible cost differences are below the threshold of what we could
1703 : * estimate accurately anyway. Note that the costs of evaluating the
1704 : * referenced CTE query are added into the final plan as initplan costs,
1705 : * and should NOT be counted here.
1706 : */
1707 : void
1708 5190 : cost_ctescan(Path *path, PlannerInfo *root,
1709 : RelOptInfo *baserel, ParamPathInfo *param_info)
1710 : {
1711 5190 : Cost startup_cost = 0;
1712 5190 : Cost run_cost = 0;
1713 : QualCost qpqual_cost;
1714 : Cost cpu_per_tuple;
1715 :
1716 : /* Should only be applied to base relations that are CTEs */
1717 : Assert(baserel->relid > 0);
1718 : Assert(baserel->rtekind == RTE_CTE);
1719 :
1720 : /* Mark the path with the correct row estimate */
1721 5190 : if (param_info)
1722 0 : path->rows = param_info->ppi_rows;
1723 : else
1724 5190 : path->rows = baserel->rows;
1725 :
1726 : /* Charge one CPU tuple cost per row for tuplestore manipulation */
1727 5190 : cpu_per_tuple = cpu_tuple_cost;
1728 :
1729 : /* Add scanning CPU costs */
1730 5190 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1731 :
1732 5190 : startup_cost += qpqual_cost.startup;
1733 5190 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1734 5190 : run_cost += cpu_per_tuple * baserel->tuples;
1735 :
1736 : /* tlist eval costs are paid per output row, not per tuple scanned */
1737 5190 : startup_cost += path->pathtarget->cost.startup;
1738 5190 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1739 :
1740 5190 : path->disabled_nodes = 0;
1741 5190 : path->startup_cost = startup_cost;
1742 5190 : path->total_cost = startup_cost + run_cost;
1743 5190 : }
1744 :
1745 : /*
1746 : * cost_namedtuplestorescan
1747 : * Determines and returns the cost of scanning a named tuplestore.
1748 : */
1749 : void
1750 478 : cost_namedtuplestorescan(Path *path, PlannerInfo *root,
1751 : RelOptInfo *baserel, ParamPathInfo *param_info)
1752 : {
1753 478 : Cost startup_cost = 0;
1754 478 : Cost run_cost = 0;
1755 : QualCost qpqual_cost;
1756 : Cost cpu_per_tuple;
1757 :
1758 : /* Should only be applied to base relations that are Tuplestores */
1759 : Assert(baserel->relid > 0);
1760 : Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
1761 :
1762 : /* Mark the path with the correct row estimate */
1763 478 : if (param_info)
1764 0 : path->rows = param_info->ppi_rows;
1765 : else
1766 478 : path->rows = baserel->rows;
1767 :
1768 : /* Charge one CPU tuple cost per row for tuplestore manipulation */
1769 478 : cpu_per_tuple = cpu_tuple_cost;
1770 :
1771 : /* Add scanning CPU costs */
1772 478 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1773 :
1774 478 : startup_cost += qpqual_cost.startup;
1775 478 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1776 478 : run_cost += cpu_per_tuple * baserel->tuples;
1777 :
1778 478 : path->disabled_nodes = 0;
1779 478 : path->startup_cost = startup_cost;
1780 478 : path->total_cost = startup_cost + run_cost;
1781 478 : }
1782 :
1783 : /*
1784 : * cost_resultscan
1785 : * Determines and returns the cost of scanning an RTE_RESULT relation.
1786 : */
1787 : void
1788 4268 : cost_resultscan(Path *path, PlannerInfo *root,
1789 : RelOptInfo *baserel, ParamPathInfo *param_info)
1790 : {
1791 4268 : Cost startup_cost = 0;
1792 4268 : Cost run_cost = 0;
1793 : QualCost qpqual_cost;
1794 : Cost cpu_per_tuple;
1795 :
1796 : /* Should only be applied to RTE_RESULT base relations */
1797 : Assert(baserel->relid > 0);
1798 : Assert(baserel->rtekind == RTE_RESULT);
1799 :
1800 : /* Mark the path with the correct row estimate */
1801 4268 : if (param_info)
1802 156 : path->rows = param_info->ppi_rows;
1803 : else
1804 4112 : path->rows = baserel->rows;
1805 :
1806 : /* We charge qual cost plus cpu_tuple_cost */
1807 4268 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1808 :
1809 4268 : startup_cost += qpqual_cost.startup;
1810 4268 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1811 4268 : run_cost += cpu_per_tuple * baserel->tuples;
1812 :
1813 4268 : path->disabled_nodes = 0;
1814 4268 : path->startup_cost = startup_cost;
1815 4268 : path->total_cost = startup_cost + run_cost;
1816 4268 : }
1817 :
1818 : /*
1819 : * cost_recursive_union
1820 : * Determines and returns the cost of performing a recursive union,
1821 : * and also the estimated output size.
1822 : *
1823 : * We are given Paths for the nonrecursive and recursive terms.
1824 : */
1825 : void
1826 926 : cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
1827 : {
1828 : Cost startup_cost;
1829 : Cost total_cost;
1830 : double total_rows;
1831 :
1832 : /* We probably have decent estimates for the non-recursive term */
1833 926 : startup_cost = nrterm->startup_cost;
1834 926 : total_cost = nrterm->total_cost;
1835 926 : total_rows = nrterm->rows;
1836 :
1837 : /*
1838 : * We arbitrarily assume that about 10 recursive iterations will be
1839 : * needed, and that we've managed to get a good fix on the cost and output
1840 : * size of each one of them. These are mighty shaky assumptions but it's
1841 : * hard to see how to do better.
1842 : */
1843 926 : total_cost += 10 * rterm->total_cost;
1844 926 : total_rows += 10 * rterm->rows;
1845 :
1846 : /*
1847 : * Also charge cpu_tuple_cost per row to account for the costs of
1848 : * manipulating the tuplestores. (We don't worry about possible
1849 : * spill-to-disk costs.)
1850 : */
1851 926 : total_cost += cpu_tuple_cost * total_rows;
1852 :
1853 926 : runion->disabled_nodes = nrterm->disabled_nodes + rterm->disabled_nodes;
1854 926 : runion->startup_cost = startup_cost;
1855 926 : runion->total_cost = total_cost;
1856 926 : runion->rows = total_rows;
1857 926 : runion->pathtarget->width = Max(nrterm->pathtarget->width,
1858 : rterm->pathtarget->width);
1859 926 : }
1860 :
1861 : /*
1862 : * cost_tuplesort
1863 : * Determines and returns the cost of sorting a relation using tuplesort,
1864 : * not including the cost of reading the input data.
1865 : *
1866 : * If the total volume of data to sort is less than sort_mem, we will do
1867 : * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1868 : * comparisons for t tuples.
1869 : *
1870 : * If the total volume exceeds sort_mem, we switch to a tape-style merge
1871 : * algorithm. There will still be about t*log2(t) tuple comparisons in
1872 : * total, but we will also need to write and read each tuple once per
1873 : * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1874 : * number of initial runs formed and M is the merge order used by tuplesort.c.
1875 : * Since the average initial run should be about sort_mem, we have
1876 : * disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
1877 : * cpu = comparison_cost * t * log2(t)
1878 : *
1879 : * If the sort is bounded (i.e., only the first k result tuples are needed)
1880 : * and k tuples can fit into sort_mem, we use a heap method that keeps only
1881 : * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1882 : *
1883 : * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1884 : * accesses (XXX can't we refine that guess?)
1885 : *
1886 : * By default, we charge two operator evals per tuple comparison, which should
1887 : * be in the right ballpark in most cases. The caller can tweak this by
1888 : * specifying nonzero comparison_cost; typically that's used for any extra
1889 : * work that has to be done to prepare the inputs to the comparison operators.
1890 : *
1891 : * 'tuples' is the number of tuples in the relation
1892 : * 'width' is the average tuple width in bytes
1893 : * 'comparison_cost' is the extra cost per comparison, if any
1894 : * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1895 : * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1896 : */
1897 : static void
1898 2065884 : cost_tuplesort(Cost *startup_cost, Cost *run_cost,
1899 : double tuples, int width,
1900 : Cost comparison_cost, int sort_mem,
1901 : double limit_tuples)
1902 : {
1903 2065884 : double input_bytes = relation_byte_size(tuples, width);
1904 : double output_bytes;
1905 : double output_tuples;
1906 2065884 : int64 sort_mem_bytes = sort_mem * (int64) 1024;
1907 :
1908 : /*
1909 : * We want to be sure the cost of a sort is never estimated as zero, even
1910 : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1911 : */
1912 2065884 : if (tuples < 2.0)
1913 548158 : tuples = 2.0;
1914 :
1915 : /* Include the default cost-per-comparison */
1916 2065884 : comparison_cost += 2.0 * cpu_operator_cost;
1917 :
1918 : /* Do we have a useful LIMIT? */
1919 2065884 : if (limit_tuples > 0 && limit_tuples < tuples)
1920 : {
1921 1848 : output_tuples = limit_tuples;
1922 1848 : output_bytes = relation_byte_size(output_tuples, width);
1923 : }
1924 : else
1925 : {
1926 2064036 : output_tuples = tuples;
1927 2064036 : output_bytes = input_bytes;
1928 : }
1929 :
1930 2065884 : if (output_bytes > sort_mem_bytes)
1931 : {
1932 : /*
1933 : * We'll have to use a disk-based sort of all the tuples
1934 : */
1935 18448 : double npages = ceil(input_bytes / BLCKSZ);
1936 18448 : double nruns = input_bytes / sort_mem_bytes;
1937 18448 : double mergeorder = tuplesort_merge_order(sort_mem_bytes);
1938 : double log_runs;
1939 : double npageaccesses;
1940 :
1941 : /*
1942 : * CPU costs
1943 : *
1944 : * Assume about N log2 N comparisons
1945 : */
1946 18448 : *startup_cost = comparison_cost * tuples * LOG2(tuples);
1947 :
1948 : /* Disk costs */
1949 :
1950 : /* Compute logM(r) as log(r) / log(M) */
1951 18448 : if (nruns > mergeorder)
1952 4848 : log_runs = ceil(log(nruns) / log(mergeorder));
1953 : else
1954 13600 : log_runs = 1.0;
1955 18448 : npageaccesses = 2.0 * npages * log_runs;
1956 : /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1957 18448 : *startup_cost += npageaccesses *
1958 18448 : (seq_page_cost * 0.75 + random_page_cost * 0.25);
1959 : }
1960 2047436 : else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1961 : {
1962 : /*
1963 : * We'll use a bounded heap-sort keeping just K tuples in memory, for
1964 : * a total number of tuple comparisons of N log2 K; but the constant
1965 : * factor is a bit higher than for quicksort. Tweak it so that the
1966 : * cost curve is continuous at the crossover point.
1967 : */
1968 1370 : *startup_cost = comparison_cost * tuples * LOG2(2.0 * output_tuples);
1969 : }
1970 : else
1971 : {
1972 : /* We'll use plain quicksort on all the input tuples */
1973 2046066 : *startup_cost = comparison_cost * tuples * LOG2(tuples);
1974 : }
1975 :
1976 : /*
1977 : * Also charge a small amount (arbitrarily set equal to operator cost) per
1978 : * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
1979 : * doesn't do qual-checking or projection, so it has less overhead than
1980 : * most plan nodes. Note it's correct to use tuples not output_tuples
1981 : * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1982 : * counting the LIMIT otherwise.
1983 : */
1984 2065884 : *run_cost = cpu_operator_cost * tuples;
1985 2065884 : }
1986 :
1987 : /*
1988 : * cost_incremental_sort
1989 : * Determines and returns the cost of sorting a relation incrementally, when
1990 : * the input path is presorted by a prefix of the pathkeys.
1991 : *
1992 : * 'presorted_keys' is the number of leading pathkeys by which the input path
1993 : * is sorted.
1994 : *
1995 : * We estimate the number of groups into which the relation is divided by the
1996 : * leading pathkeys, and then calculate the cost of sorting a single group
1997 : * with tuplesort using cost_tuplesort().
1998 : */
1999 : void
2000 11372 : cost_incremental_sort(Path *path,
2001 : PlannerInfo *root, List *pathkeys, int presorted_keys,
2002 : int input_disabled_nodes,
2003 : Cost input_startup_cost, Cost input_total_cost,
2004 : double input_tuples, int width, Cost comparison_cost, int sort_mem,
2005 : double limit_tuples)
2006 : {
2007 : Cost startup_cost,
2008 : run_cost,
2009 11372 : input_run_cost = input_total_cost - input_startup_cost;
2010 : double group_tuples,
2011 : input_groups;
2012 : Cost group_startup_cost,
2013 : group_run_cost,
2014 : group_input_run_cost;
2015 11372 : List *presortedExprs = NIL;
2016 : ListCell *l;
2017 11372 : bool unknown_varno = false;
2018 :
2019 : Assert(presorted_keys > 0 && presorted_keys < list_length(pathkeys));
2020 :
2021 : /*
2022 : * We want to be sure the cost of a sort is never estimated as zero, even
2023 : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
2024 : */
2025 11372 : if (input_tuples < 2.0)
2026 6704 : input_tuples = 2.0;
2027 :
2028 : /* Default estimate of number of groups, capped to one group per row. */
2029 11372 : input_groups = Min(input_tuples, DEFAULT_NUM_DISTINCT);
2030 :
2031 : /*
2032 : * Extract presorted keys as list of expressions.
2033 : *
2034 : * We need to be careful about Vars containing "varno 0" which might have
2035 : * been introduced by generate_append_tlist, which would confuse
2036 : * estimate_num_groups (in fact it'd fail for such expressions). See
2037 : * recurse_set_operations which has to deal with the same issue.
2038 : *
2039 : * Unlike recurse_set_operations we can't access the original target list
2040 : * here, and even if we could it's not very clear how useful would that be
2041 : * for a set operation combining multiple tables. So we simply detect if
2042 : * there are any expressions with "varno 0" and use the default
2043 : * DEFAULT_NUM_DISTINCT in that case.
2044 : *
2045 : * We might also use either 1.0 (a single group) or input_tuples (each row
2046 : * being a separate group), pretty much the worst and best case for
2047 : * incremental sort. But those are extreme cases and using something in
2048 : * between seems reasonable. Furthermore, generate_append_tlist is used
2049 : * for set operations, which are likely to produce mostly unique output
2050 : * anyway - from that standpoint the DEFAULT_NUM_DISTINCT is defensive
2051 : * while maintaining lower startup cost.
2052 : */
2053 11468 : foreach(l, pathkeys)
2054 : {
2055 11468 : PathKey *key = (PathKey *) lfirst(l);
2056 11468 : EquivalenceMember *member = (EquivalenceMember *)
2057 11468 : linitial(key->pk_eclass->ec_members);
2058 :
2059 : /*
2060 : * Check if the expression contains Var with "varno 0" so that we
2061 : * don't call estimate_num_groups in that case.
2062 : */
2063 11468 : if (bms_is_member(0, pull_varnos(root, (Node *) member->em_expr)))
2064 : {
2065 10 : unknown_varno = true;
2066 10 : break;
2067 : }
2068 :
2069 : /* expression not containing any Vars with "varno 0" */
2070 11458 : presortedExprs = lappend(presortedExprs, member->em_expr);
2071 :
2072 11458 : if (foreach_current_index(l) + 1 >= presorted_keys)
2073 11362 : break;
2074 : }
2075 :
2076 : /* Estimate the number of groups with equal presorted keys. */
2077 11372 : if (!unknown_varno)
2078 11362 : input_groups = estimate_num_groups(root, presortedExprs, input_tuples,
2079 : NULL, NULL);
2080 :
2081 11372 : group_tuples = input_tuples / input_groups;
2082 11372 : group_input_run_cost = input_run_cost / input_groups;
2083 :
2084 : /*
2085 : * Estimate the average cost of sorting of one group where presorted keys
2086 : * are equal.
2087 : */
2088 11372 : cost_tuplesort(&group_startup_cost, &group_run_cost,
2089 : group_tuples, width, comparison_cost, sort_mem,
2090 : limit_tuples);
2091 :
2092 : /*
2093 : * Startup cost of incremental sort is the startup cost of its first group
2094 : * plus the cost of its input.
2095 : */
2096 11372 : startup_cost = group_startup_cost + input_startup_cost +
2097 : group_input_run_cost;
2098 :
2099 : /*
2100 : * After we started producing tuples from the first group, the cost of
2101 : * producing all the tuples is given by the cost to finish processing this
2102 : * group, plus the total cost to process the remaining groups, plus the
2103 : * remaining cost of input.
2104 : */
2105 11372 : run_cost = group_run_cost + (group_run_cost + group_startup_cost) *
2106 11372 : (input_groups - 1) + group_input_run_cost * (input_groups - 1);
2107 :
2108 : /*
2109 : * Incremental sort adds some overhead by itself. Firstly, it has to
2110 : * detect the sort groups. This is roughly equal to one extra copy and
2111 : * comparison per tuple.
2112 : */
2113 11372 : run_cost += (cpu_tuple_cost + comparison_cost) * input_tuples;
2114 :
2115 : /*
2116 : * Additionally, we charge double cpu_tuple_cost for each input group to
2117 : * account for the tuplesort_reset that's performed after each group.
2118 : */
2119 11372 : run_cost += 2.0 * cpu_tuple_cost * input_groups;
2120 :
2121 11372 : path->rows = input_tuples;
2122 :
2123 : /* should not generate these paths when enable_incremental_sort=false */
2124 : Assert(enable_incremental_sort);
2125 11372 : path->disabled_nodes = input_disabled_nodes;
2126 :
2127 11372 : path->startup_cost = startup_cost;
2128 11372 : path->total_cost = startup_cost + run_cost;
2129 11372 : }
2130 :
2131 : /*
2132 : * cost_sort
2133 : * Determines and returns the cost of sorting a relation, including
2134 : * the cost of reading the input data.
2135 : *
2136 : * NOTE: some callers currently pass NIL for pathkeys because they
2137 : * can't conveniently supply the sort keys. Since this routine doesn't
2138 : * currently do anything with pathkeys anyway, that doesn't matter...
2139 : * but if it ever does, it should react gracefully to lack of key data.
2140 : * (Actually, the thing we'd most likely be interested in is just the number
2141 : * of sort keys, which all callers *could* supply.)
2142 : */
2143 : void
2144 2054512 : cost_sort(Path *path, PlannerInfo *root,
2145 : List *pathkeys, int input_disabled_nodes,
2146 : Cost input_cost, double tuples, int width,
2147 : Cost comparison_cost, int sort_mem,
2148 : double limit_tuples)
2149 :
2150 : {
2151 : Cost startup_cost;
2152 : Cost run_cost;
2153 :
2154 2054512 : cost_tuplesort(&startup_cost, &run_cost,
2155 : tuples, width,
2156 : comparison_cost, sort_mem,
2157 : limit_tuples);
2158 :
2159 2054512 : startup_cost += input_cost;
2160 :
2161 2054512 : path->rows = tuples;
2162 2054512 : path->disabled_nodes = input_disabled_nodes + (enable_sort ? 0 : 1);
2163 2054512 : path->startup_cost = startup_cost;
2164 2054512 : path->total_cost = startup_cost + run_cost;
2165 2054512 : }
2166 :
2167 : /*
2168 : * append_nonpartial_cost
2169 : * Estimate the cost of the non-partial paths in a Parallel Append.
2170 : * The non-partial paths are assumed to be the first "numpaths" paths
2171 : * from the subpaths list, and to be in order of decreasing cost.
2172 : */
2173 : static Cost
2174 25258 : append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
2175 : {
2176 : Cost *costarr;
2177 : int arrlen;
2178 : ListCell *l;
2179 : ListCell *cell;
2180 : int path_index;
2181 : int min_index;
2182 : int max_index;
2183 :
2184 25258 : if (numpaths == 0)
2185 20594 : return 0;
2186 :
2187 : /*
2188 : * Array length is number of workers or number of relevant paths,
2189 : * whichever is less.
2190 : */
2191 4664 : arrlen = Min(parallel_workers, numpaths);
2192 4664 : costarr = (Cost *) palloc(sizeof(Cost) * arrlen);
2193 :
2194 : /* The first few paths will each be claimed by a different worker. */
2195 4664 : path_index = 0;
2196 13518 : foreach(cell, subpaths)
2197 : {
2198 10150 : Path *subpath = (Path *) lfirst(cell);
2199 :
2200 10150 : if (path_index == arrlen)
2201 1296 : break;
2202 8854 : costarr[path_index++] = subpath->total_cost;
2203 : }
2204 :
2205 : /*
2206 : * Since subpaths are sorted by decreasing cost, the last one will have
2207 : * the minimum cost.
2208 : */
2209 4664 : min_index = arrlen - 1;
2210 :
2211 : /*
2212 : * For each of the remaining subpaths, add its cost to the array element
2213 : * with minimum cost.
2214 : */
2215 9350 : for_each_cell(l, subpaths, cell)
2216 : {
2217 5232 : Path *subpath = (Path *) lfirst(l);
2218 :
2219 : /* Consider only the non-partial paths */
2220 5232 : if (path_index++ == numpaths)
2221 546 : break;
2222 :
2223 4686 : costarr[min_index] += subpath->total_cost;
2224 :
2225 : /* Update the new min cost array index */
2226 4686 : min_index = 0;
2227 14094 : for (int i = 0; i < arrlen; i++)
2228 : {
2229 9408 : if (costarr[i] < costarr[min_index])
2230 1526 : min_index = i;
2231 : }
2232 : }
2233 :
2234 : /* Return the highest cost from the array */
2235 4664 : max_index = 0;
2236 13518 : for (int i = 0; i < arrlen; i++)
2237 : {
2238 8854 : if (costarr[i] > costarr[max_index])
2239 406 : max_index = i;
2240 : }
2241 :
2242 4664 : return costarr[max_index];
2243 : }
2244 :
2245 : /*
2246 : * cost_append
2247 : * Determines and returns the cost of an Append node.
2248 : */
2249 : void
2250 68902 : cost_append(AppendPath *apath, PlannerInfo *root)
2251 : {
2252 : ListCell *l;
2253 :
2254 68902 : apath->path.disabled_nodes = 0;
2255 68902 : apath->path.startup_cost = 0;
2256 68902 : apath->path.total_cost = 0;
2257 68902 : apath->path.rows = 0;
2258 :
2259 68902 : if (apath->subpaths == NIL)
2260 1964 : return;
2261 :
2262 66938 : if (!apath->path.parallel_aware)
2263 : {
2264 41680 : List *pathkeys = apath->path.pathkeys;
2265 :
2266 41680 : if (pathkeys == NIL)
2267 : {
2268 39488 : Path *firstsubpath = (Path *) linitial(apath->subpaths);
2269 :
2270 : /*
2271 : * For an unordered, non-parallel-aware Append we take the startup
2272 : * cost as the startup cost of the first subpath.
2273 : */
2274 39488 : apath->path.startup_cost = firstsubpath->startup_cost;
2275 :
2276 : /*
2277 : * Compute rows, number of disabled nodes, and total cost as sums
2278 : * of underlying subplan values.
2279 : */
2280 154480 : foreach(l, apath->subpaths)
2281 : {
2282 114992 : Path *subpath = (Path *) lfirst(l);
2283 :
2284 114992 : apath->path.rows += subpath->rows;
2285 114992 : apath->path.disabled_nodes += subpath->disabled_nodes;
2286 114992 : apath->path.total_cost += subpath->total_cost;
2287 : }
2288 : }
2289 : else
2290 : {
2291 : /*
2292 : * For an ordered, non-parallel-aware Append we take the startup
2293 : * cost as the sum of the subpath startup costs. This ensures
2294 : * that we don't underestimate the startup cost when a query's
2295 : * LIMIT is such that several of the children have to be run to
2296 : * satisfy it. This might be overkill --- another plausible hack
2297 : * would be to take the Append's startup cost as the maximum of
2298 : * the child startup costs. But we don't want to risk believing
2299 : * that an ORDER BY LIMIT query can be satisfied at small cost
2300 : * when the first child has small startup cost but later ones
2301 : * don't. (If we had the ability to deal with nonlinear cost
2302 : * interpolation for partial retrievals, we would not need to be
2303 : * so conservative about this.)
2304 : *
2305 : * This case is also different from the above in that we have to
2306 : * account for possibly injecting sorts into subpaths that aren't
2307 : * natively ordered.
2308 : */
2309 8508 : foreach(l, apath->subpaths)
2310 : {
2311 6316 : Path *subpath = (Path *) lfirst(l);
2312 : int presorted_keys;
2313 : Path sort_path; /* dummy for result of
2314 : * cost_sort/cost_incremental_sort */
2315 :
2316 6316 : if (!pathkeys_count_contained_in(pathkeys, subpath->pathkeys,
2317 : &presorted_keys))
2318 : {
2319 : /*
2320 : * We'll need to insert a Sort node, so include costs for
2321 : * that. We choose to use incremental sort if it is
2322 : * enabled and there are presorted keys; otherwise we use
2323 : * full sort.
2324 : *
2325 : * We can use the parent's LIMIT if any, since we
2326 : * certainly won't pull more than that many tuples from
2327 : * any child.
2328 : */
2329 56 : if (enable_incremental_sort && presorted_keys > 0)
2330 : {
2331 12 : cost_incremental_sort(&sort_path,
2332 : root,
2333 : pathkeys,
2334 : presorted_keys,
2335 : subpath->disabled_nodes,
2336 : subpath->startup_cost,
2337 : subpath->total_cost,
2338 : subpath->rows,
2339 12 : subpath->pathtarget->width,
2340 : 0.0,
2341 : work_mem,
2342 : apath->limit_tuples);
2343 : }
2344 : else
2345 : {
2346 44 : cost_sort(&sort_path,
2347 : root,
2348 : pathkeys,
2349 : subpath->disabled_nodes,
2350 : subpath->total_cost,
2351 : subpath->rows,
2352 44 : subpath->pathtarget->width,
2353 : 0.0,
2354 : work_mem,
2355 : apath->limit_tuples);
2356 : }
2357 :
2358 56 : subpath = &sort_path;
2359 : }
2360 :
2361 6316 : apath->path.rows += subpath->rows;
2362 6316 : apath->path.disabled_nodes += subpath->disabled_nodes;
2363 6316 : apath->path.startup_cost += subpath->startup_cost;
2364 6316 : apath->path.total_cost += subpath->total_cost;
2365 : }
2366 : }
2367 : }
2368 : else /* parallel-aware */
2369 : {
2370 25258 : int i = 0;
2371 25258 : double parallel_divisor = get_parallel_divisor(&apath->path);
2372 :
2373 : /* Parallel-aware Append never produces ordered output. */
2374 : Assert(apath->path.pathkeys == NIL);
2375 :
2376 : /* Calculate startup cost. */
2377 100154 : foreach(l, apath->subpaths)
2378 : {
2379 74896 : Path *subpath = (Path *) lfirst(l);
2380 :
2381 : /*
2382 : * Append will start returning tuples when the child node having
2383 : * lowest startup cost is done setting up. We consider only the
2384 : * first few subplans that immediately get a worker assigned.
2385 : */
2386 74896 : if (i == 0)
2387 25258 : apath->path.startup_cost = subpath->startup_cost;
2388 49638 : else if (i < apath->path.parallel_workers)
2389 24700 : apath->path.startup_cost = Min(apath->path.startup_cost,
2390 : subpath->startup_cost);
2391 :
2392 : /*
2393 : * Apply parallel divisor to subpaths. Scale the number of rows
2394 : * for each partial subpath based on the ratio of the parallel
2395 : * divisor originally used for the subpath to the one we adopted.
2396 : * Also add the cost of partial paths to the total cost, but
2397 : * ignore non-partial paths for now.
2398 : */
2399 74896 : if (i < apath->first_partial_path)
2400 13540 : apath->path.rows += subpath->rows / parallel_divisor;
2401 : else
2402 : {
2403 : double subpath_parallel_divisor;
2404 :
2405 61356 : subpath_parallel_divisor = get_parallel_divisor(subpath);
2406 61356 : apath->path.rows += subpath->rows * (subpath_parallel_divisor /
2407 : parallel_divisor);
2408 61356 : apath->path.total_cost += subpath->total_cost;
2409 : }
2410 :
2411 74896 : apath->path.disabled_nodes += subpath->disabled_nodes;
2412 74896 : apath->path.rows = clamp_row_est(apath->path.rows);
2413 :
2414 74896 : i++;
2415 : }
2416 :
2417 : /* Add cost for non-partial subpaths. */
2418 25258 : apath->path.total_cost +=
2419 25258 : append_nonpartial_cost(apath->subpaths,
2420 : apath->first_partial_path,
2421 : apath->path.parallel_workers);
2422 : }
2423 :
2424 : /*
2425 : * Although Append does not do any selection or projection, it's not free;
2426 : * add a small per-tuple overhead.
2427 : */
2428 66938 : apath->path.total_cost +=
2429 66938 : cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * apath->path.rows;
2430 : }
2431 :
2432 : /*
2433 : * cost_merge_append
2434 : * Determines and returns the cost of a MergeAppend node.
2435 : *
2436 : * MergeAppend merges several pre-sorted input streams, using a heap that
2437 : * at any given instant holds the next tuple from each stream. If there
2438 : * are N streams, we need about N*log2(N) tuple comparisons to construct
2439 : * the heap at startup, and then for each output tuple, about log2(N)
2440 : * comparisons to replace the top entry.
2441 : *
2442 : * (The effective value of N will drop once some of the input streams are
2443 : * exhausted, but it seems unlikely to be worth trying to account for that.)
2444 : *
2445 : * The heap is never spilled to disk, since we assume N is not very large.
2446 : * So this is much simpler than cost_sort.
2447 : *
2448 : * As in cost_sort, we charge two operator evals per tuple comparison.
2449 : *
2450 : * 'pathkeys' is a list of sort keys
2451 : * 'n_streams' is the number of input streams
2452 : * 'input_disabled_nodes' is the sum of the input streams' disabled node counts
2453 : * 'input_startup_cost' is the sum of the input streams' startup costs
2454 : * 'input_total_cost' is the sum of the input streams' total costs
2455 : * 'tuples' is the number of tuples in all the streams
2456 : */
2457 : void
2458 10128 : cost_merge_append(Path *path, PlannerInfo *root,
2459 : List *pathkeys, int n_streams,
2460 : int input_disabled_nodes,
2461 : Cost input_startup_cost, Cost input_total_cost,
2462 : double tuples)
2463 : {
2464 10128 : Cost startup_cost = 0;
2465 10128 : Cost run_cost = 0;
2466 : Cost comparison_cost;
2467 : double N;
2468 : double logN;
2469 :
2470 : /*
2471 : * Avoid log(0)...
2472 : */
2473 10128 : N = (n_streams < 2) ? 2.0 : (double) n_streams;
2474 10128 : logN = LOG2(N);
2475 :
2476 : /* Assumed cost per tuple comparison */
2477 10128 : comparison_cost = 2.0 * cpu_operator_cost;
2478 :
2479 : /* Heap creation cost */
2480 10128 : startup_cost += comparison_cost * N * logN;
2481 :
2482 : /* Per-tuple heap maintenance cost */
2483 10128 : run_cost += tuples * comparison_cost * logN;
2484 :
2485 : /*
2486 : * Although MergeAppend does not do any selection or projection, it's not
2487 : * free; add a small per-tuple overhead.
2488 : */
2489 10128 : run_cost += cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * tuples;
2490 :
2491 10128 : path->disabled_nodes = input_disabled_nodes;
2492 10128 : path->startup_cost = startup_cost + input_startup_cost;
2493 10128 : path->total_cost = startup_cost + run_cost + input_total_cost;
2494 10128 : }
2495 :
2496 : /*
2497 : * cost_material
2498 : * Determines and returns the cost of materializing a relation, including
2499 : * the cost of reading the input data.
2500 : *
2501 : * If the total volume of data to materialize exceeds work_mem, we will need
2502 : * to write it to disk, so the cost is much higher in that case.
2503 : *
2504 : * Note that here we are estimating the costs for the first scan of the
2505 : * relation, so the materialization is all overhead --- any savings will
2506 : * occur only on rescan, which is estimated in cost_rescan.
2507 : */
2508 : void
2509 668058 : cost_material(Path *path,
2510 : int input_disabled_nodes,
2511 : Cost input_startup_cost, Cost input_total_cost,
2512 : double tuples, int width)
2513 : {
2514 668058 : Cost startup_cost = input_startup_cost;
2515 668058 : Cost run_cost = input_total_cost - input_startup_cost;
2516 668058 : double nbytes = relation_byte_size(tuples, width);
2517 668058 : double work_mem_bytes = work_mem * (Size) 1024;
2518 :
2519 668058 : path->rows = tuples;
2520 :
2521 : /*
2522 : * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
2523 : * reflect bookkeeping overhead. (This rate must be more than what
2524 : * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
2525 : * if it is exactly the same then there will be a cost tie between
2526 : * nestloop with A outer, materialized B inner and nestloop with B outer,
2527 : * materialized A inner. The extra cost ensures we'll prefer
2528 : * materializing the smaller rel.) Note that this is normally a good deal
2529 : * less than cpu_tuple_cost; which is OK because a Material plan node
2530 : * doesn't do qual-checking or projection, so it's got less overhead than
2531 : * most plan nodes.
2532 : */
2533 668058 : run_cost += 2 * cpu_operator_cost * tuples;
2534 :
2535 : /*
2536 : * If we will spill to disk, charge at the rate of seq_page_cost per page.
2537 : * This cost is assumed to be evenly spread through the plan run phase,
2538 : * which isn't exactly accurate but our cost model doesn't allow for
2539 : * nonuniform costs within the run phase.
2540 : */
2541 668058 : if (nbytes > work_mem_bytes)
2542 : {
2543 4978 : double npages = ceil(nbytes / BLCKSZ);
2544 :
2545 4978 : run_cost += seq_page_cost * npages;
2546 : }
2547 :
2548 668058 : path->disabled_nodes = input_disabled_nodes + (enable_material ? 0 : 1);
2549 668058 : path->startup_cost = startup_cost;
2550 668058 : path->total_cost = startup_cost + run_cost;
2551 668058 : }
2552 :
2553 : /*
2554 : * cost_memoize_rescan
2555 : * Determines the estimated cost of rescanning a Memoize node.
2556 : *
2557 : * In order to estimate this, we must gain knowledge of how often we expect to
2558 : * be called and how many distinct sets of parameters we are likely to be
2559 : * called with. If we expect a good cache hit ratio, then we can set our
2560 : * costs to account for that hit ratio, plus a little bit of cost for the
2561 : * caching itself. Caching will not work out well if we expect to be called
2562 : * with too many distinct parameter values. The worst-case here is that we
2563 : * never see any parameter value twice, in which case we'd never get a cache
2564 : * hit and caching would be a complete waste of effort.
2565 : */
2566 : static void
2567 285082 : cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
2568 : Cost *rescan_startup_cost, Cost *rescan_total_cost)
2569 : {
2570 : EstimationInfo estinfo;
2571 : ListCell *lc;
2572 285082 : Cost input_startup_cost = mpath->subpath->startup_cost;
2573 285082 : Cost input_total_cost = mpath->subpath->total_cost;
2574 285082 : double tuples = mpath->subpath->rows;
2575 285082 : Cardinality est_calls = mpath->est_calls;
2576 285082 : int width = mpath->subpath->pathtarget->width;
2577 :
2578 : double hash_mem_bytes;
2579 : double est_entry_bytes;
2580 : Cardinality est_cache_entries;
2581 : Cardinality ndistinct;
2582 : double evict_ratio;
2583 : double hit_ratio;
2584 : Cost startup_cost;
2585 : Cost total_cost;
2586 :
2587 : /* available cache space */
2588 285082 : hash_mem_bytes = get_hash_memory_limit();
2589 :
2590 : /*
2591 : * Set the number of bytes each cache entry should consume in the cache.
2592 : * To provide us with better estimations on how many cache entries we can
2593 : * store at once, we make a call to the executor here to ask it what
2594 : * memory overheads there are for a single cache entry.
2595 : */
2596 285082 : est_entry_bytes = relation_byte_size(tuples, width) +
2597 285082 : ExecEstimateCacheEntryOverheadBytes(tuples);
2598 :
2599 : /* include the estimated width for the cache keys */
2600 608390 : foreach(lc, mpath->param_exprs)
2601 323308 : est_entry_bytes += get_expr_width(root, (Node *) lfirst(lc));
2602 :
2603 : /* estimate on the upper limit of cache entries we can hold at once */
2604 285082 : est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
2605 :
2606 : /* estimate on the distinct number of parameter values */
2607 285082 : ndistinct = estimate_num_groups(root, mpath->param_exprs, est_calls, NULL,
2608 : &estinfo);
2609 :
2610 : /*
2611 : * When the estimation fell back on using a default value, it's a bit too
2612 : * risky to assume that it's ok to use a Memoize node. The use of a
2613 : * default could cause us to use a Memoize node when it's really
2614 : * inappropriate to do so. If we see that this has been done, then we'll
2615 : * assume that every call will have unique parameters, which will almost
2616 : * certainly mean a MemoizePath will never survive add_path().
2617 : */
2618 285082 : if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
2619 16262 : ndistinct = est_calls;
2620 :
2621 : /* Remember the ndistinct estimate for EXPLAIN */
2622 285082 : mpath->est_unique_keys = ndistinct;
2623 :
2624 : /*
2625 : * Since we've already estimated the maximum number of entries we can
2626 : * store at once and know the estimated number of distinct values we'll be
2627 : * called with, we'll take this opportunity to set the path's est_entries.
2628 : * This will ultimately determine the hash table size that the executor
2629 : * will use. If we leave this at zero, the executor will just choose the
2630 : * size itself. Really this is not the right place to do this, but it's
2631 : * convenient since everything is already calculated.
2632 : */
2633 285082 : mpath->est_entries = Min(Min(ndistinct, est_cache_entries),
2634 : PG_UINT32_MAX);
2635 :
2636 : /*
2637 : * When the number of distinct parameter values is above the amount we can
2638 : * store in the cache, then we'll have to evict some entries from the
2639 : * cache. This is not free. Here we estimate how often we'll incur the
2640 : * cost of that eviction.
2641 : */
2642 285082 : evict_ratio = 1.0 - Min(est_cache_entries, ndistinct) / ndistinct;
2643 :
2644 : /*
2645 : * In order to estimate how costly a single scan will be, we need to
2646 : * attempt to estimate what the cache hit ratio will be. To do that we
2647 : * must look at how many scans are estimated in total for this node and
2648 : * how many of those scans we expect to get a cache hit.
2649 : */
2650 570164 : hit_ratio = ((est_calls - ndistinct) / est_calls) *
2651 285082 : (est_cache_entries / Max(ndistinct, est_cache_entries));
2652 :
2653 : /* Remember the hit ratio estimate for EXPLAIN */
2654 285082 : mpath->est_hit_ratio = hit_ratio;
2655 :
2656 : Assert(hit_ratio >= 0 && hit_ratio <= 1.0);
2657 :
2658 : /*
2659 : * Set the total_cost accounting for the expected cache hit ratio. We
2660 : * also add on a cpu_operator_cost to account for a cache lookup. This
2661 : * will happen regardless of whether it's a cache hit or not.
2662 : */
2663 285082 : total_cost = input_total_cost * (1.0 - hit_ratio) + cpu_operator_cost;
2664 :
2665 : /* Now adjust the total cost to account for cache evictions */
2666 :
2667 : /* Charge a cpu_tuple_cost for evicting the actual cache entry */
2668 285082 : total_cost += cpu_tuple_cost * evict_ratio;
2669 :
2670 : /*
2671 : * Charge a 10th of cpu_operator_cost to evict every tuple in that entry.
2672 : * The per-tuple eviction is really just a pfree, so charging a whole
2673 : * cpu_operator_cost seems a little excessive.
2674 : */
2675 285082 : total_cost += cpu_operator_cost / 10.0 * evict_ratio * tuples;
2676 :
2677 : /*
2678 : * Now adjust for storing things in the cache, since that's not free
2679 : * either. Everything must go in the cache. We don't proportion this
2680 : * over any ratio, just apply it once for the scan. We charge a
2681 : * cpu_tuple_cost for the creation of the cache entry and also a
2682 : * cpu_operator_cost for each tuple we expect to cache.
2683 : */
2684 285082 : total_cost += cpu_tuple_cost + cpu_operator_cost * tuples;
2685 :
2686 : /*
2687 : * Getting the first row must be also be proportioned according to the
2688 : * expected cache hit ratio.
2689 : */
2690 285082 : startup_cost = input_startup_cost * (1.0 - hit_ratio);
2691 :
2692 : /*
2693 : * Additionally we charge a cpu_tuple_cost to account for cache lookups,
2694 : * which we'll do regardless of whether it was a cache hit or not.
2695 : */
2696 285082 : startup_cost += cpu_tuple_cost;
2697 :
2698 285082 : *rescan_startup_cost = startup_cost;
2699 285082 : *rescan_total_cost = total_cost;
2700 285082 : }
2701 :
2702 : /*
2703 : * cost_agg
2704 : * Determines and returns the cost of performing an Agg plan node,
2705 : * including the cost of its input.
2706 : *
2707 : * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
2708 : * we are using a hashed Agg node just to do grouping).
2709 : *
2710 : * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
2711 : * are for appropriately-sorted input.
2712 : */
2713 : void
2714 85214 : cost_agg(Path *path, PlannerInfo *root,
2715 : AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
2716 : int numGroupCols, double numGroups,
2717 : List *quals,
2718 : int disabled_nodes,
2719 : Cost input_startup_cost, Cost input_total_cost,
2720 : double input_tuples, double input_width)
2721 : {
2722 : double output_tuples;
2723 : Cost startup_cost;
2724 : Cost total_cost;
2725 85214 : const AggClauseCosts dummy_aggcosts = {0};
2726 :
2727 : /* Use all-zero per-aggregate costs if NULL is passed */
2728 85214 : if (aggcosts == NULL)
2729 : {
2730 : Assert(aggstrategy == AGG_HASHED);
2731 18454 : aggcosts = &dummy_aggcosts;
2732 : }
2733 :
2734 : /*
2735 : * The transCost.per_tuple component of aggcosts should be charged once
2736 : * per input tuple, corresponding to the costs of evaluating the aggregate
2737 : * transfns and their input expressions. The finalCost.per_tuple component
2738 : * is charged once per output tuple, corresponding to the costs of
2739 : * evaluating the finalfns. Startup costs are of course charged but once.
2740 : *
2741 : * If we are grouping, we charge an additional cpu_operator_cost per
2742 : * grouping column per input tuple for grouping comparisons.
2743 : *
2744 : * We will produce a single output tuple if not grouping, and a tuple per
2745 : * group otherwise. We charge cpu_tuple_cost for each output tuple.
2746 : *
2747 : * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
2748 : * same total CPU cost, but AGG_SORTED has lower startup cost. If the
2749 : * input path is already sorted appropriately, AGG_SORTED should be
2750 : * preferred (since it has no risk of memory overflow). This will happen
2751 : * as long as the computed total costs are indeed exactly equal --- but if
2752 : * there's roundoff error we might do the wrong thing. So be sure that
2753 : * the computations below form the same intermediate values in the same
2754 : * order.
2755 : */
2756 85214 : if (aggstrategy == AGG_PLAIN)
2757 : {
2758 37038 : startup_cost = input_total_cost;
2759 37038 : startup_cost += aggcosts->transCost.startup;
2760 37038 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2761 37038 : startup_cost += aggcosts->finalCost.startup;
2762 37038 : startup_cost += aggcosts->finalCost.per_tuple;
2763 : /* we aren't grouping */
2764 37038 : total_cost = startup_cost + cpu_tuple_cost;
2765 37038 : output_tuples = 1;
2766 : }
2767 48176 : else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
2768 : {
2769 : /* Here we are able to deliver output on-the-fly */
2770 17402 : startup_cost = input_startup_cost;
2771 17402 : total_cost = input_total_cost;
2772 17402 : if (aggstrategy == AGG_MIXED && !enable_hashagg)
2773 456 : ++disabled_nodes;
2774 : /* calcs phrased this way to match HASHED case, see note above */
2775 17402 : total_cost += aggcosts->transCost.startup;
2776 17402 : total_cost += aggcosts->transCost.per_tuple * input_tuples;
2777 17402 : total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2778 17402 : total_cost += aggcosts->finalCost.startup;
2779 17402 : total_cost += aggcosts->finalCost.per_tuple * numGroups;
2780 17402 : total_cost += cpu_tuple_cost * numGroups;
2781 17402 : output_tuples = numGroups;
2782 : }
2783 : else
2784 : {
2785 : /* must be AGG_HASHED */
2786 30774 : startup_cost = input_total_cost;
2787 30774 : if (!enable_hashagg)
2788 1770 : ++disabled_nodes;
2789 30774 : startup_cost += aggcosts->transCost.startup;
2790 30774 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2791 : /* cost of computing hash value */
2792 30774 : startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2793 30774 : startup_cost += aggcosts->finalCost.startup;
2794 :
2795 30774 : total_cost = startup_cost;
2796 30774 : total_cost += aggcosts->finalCost.per_tuple * numGroups;
2797 : /* cost of retrieving from hash table */
2798 30774 : total_cost += cpu_tuple_cost * numGroups;
2799 30774 : output_tuples = numGroups;
2800 : }
2801 :
2802 : /*
2803 : * Add the disk costs of hash aggregation that spills to disk.
2804 : *
2805 : * Groups that go into the hash table stay in memory until finalized, so
2806 : * spilling and reprocessing tuples doesn't incur additional invocations
2807 : * of transCost or finalCost. Furthermore, the computed hash value is
2808 : * stored with the spilled tuples, so we don't incur extra invocations of
2809 : * the hash function.
2810 : *
2811 : * Hash Agg begins returning tuples after the first batch is complete.
2812 : * Accrue writes (spilled tuples) to startup_cost and to total_cost;
2813 : * accrue reads only to total_cost.
2814 : */
2815 85214 : if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
2816 : {
2817 : double pages;
2818 31690 : double pages_written = 0.0;
2819 31690 : double pages_read = 0.0;
2820 : double spill_cost;
2821 : double hashentrysize;
2822 : double nbatches;
2823 : Size mem_limit;
2824 : uint64 ngroups_limit;
2825 : int num_partitions;
2826 : int depth;
2827 :
2828 : /*
2829 : * Estimate number of batches based on the computed limits. If less
2830 : * than or equal to one, all groups are expected to fit in memory;
2831 : * otherwise we expect to spill.
2832 : */
2833 31690 : hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
2834 : input_width,
2835 31690 : aggcosts->transitionSpace);
2836 31690 : hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
2837 : &ngroups_limit, &num_partitions);
2838 :
2839 31690 : nbatches = Max((numGroups * hashentrysize) / mem_limit,
2840 : numGroups / ngroups_limit);
2841 :
2842 31690 : nbatches = Max(ceil(nbatches), 1.0);
2843 31690 : num_partitions = Max(num_partitions, 2);
2844 :
2845 : /*
2846 : * The number of partitions can change at different levels of
2847 : * recursion; but for the purposes of this calculation assume it stays
2848 : * constant.
2849 : */
2850 31690 : depth = ceil(log(nbatches) / log(num_partitions));
2851 :
2852 : /*
2853 : * Estimate number of pages read and written. For each level of
2854 : * recursion, a tuple must be written and then later read.
2855 : */
2856 31690 : pages = relation_byte_size(input_tuples, input_width) / BLCKSZ;
2857 31690 : pages_written = pages_read = pages * depth;
2858 :
2859 : /*
2860 : * HashAgg has somewhat worse IO behavior than Sort on typical
2861 : * hardware/OS combinations. Account for this with a generic penalty.
2862 : */
2863 31690 : pages_read *= 2.0;
2864 31690 : pages_written *= 2.0;
2865 :
2866 31690 : startup_cost += pages_written * random_page_cost;
2867 31690 : total_cost += pages_written * random_page_cost;
2868 31690 : total_cost += pages_read * seq_page_cost;
2869 :
2870 : /* account for CPU cost of spilling a tuple and reading it back */
2871 31690 : spill_cost = depth * input_tuples * 2.0 * cpu_tuple_cost;
2872 31690 : startup_cost += spill_cost;
2873 31690 : total_cost += spill_cost;
2874 : }
2875 :
2876 : /*
2877 : * If there are quals (HAVING quals), account for their cost and
2878 : * selectivity.
2879 : */
2880 85214 : if (quals)
2881 : {
2882 : QualCost qual_cost;
2883 :
2884 4560 : cost_qual_eval(&qual_cost, quals, root);
2885 4560 : startup_cost += qual_cost.startup;
2886 4560 : total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
2887 :
2888 4560 : output_tuples = clamp_row_est(output_tuples *
2889 4560 : clauselist_selectivity(root,
2890 : quals,
2891 : 0,
2892 : JOIN_INNER,
2893 : NULL));
2894 : }
2895 :
2896 85214 : path->rows = output_tuples;
2897 85214 : path->disabled_nodes = disabled_nodes;
2898 85214 : path->startup_cost = startup_cost;
2899 85214 : path->total_cost = total_cost;
2900 85214 : }
2901 :
2902 : /*
2903 : * get_windowclause_startup_tuples
2904 : * Estimate how many tuples we'll need to fetch from a WindowAgg's
2905 : * subnode before we can output the first WindowAgg tuple.
2906 : *
2907 : * How many tuples need to be read depends on the WindowClause. For example,
2908 : * a WindowClause with no PARTITION BY and no ORDER BY requires that all
2909 : * subnode tuples are read and aggregated before the WindowAgg can output
2910 : * anything. If there's a PARTITION BY, then we only need to look at tuples
2911 : * in the first partition. Here we attempt to estimate just how many
2912 : * 'input_tuples' the WindowAgg will need to read for the given WindowClause
2913 : * before the first tuple can be output.
2914 : */
2915 : static double
2916 2964 : get_windowclause_startup_tuples(PlannerInfo *root, WindowClause *wc,
2917 : double input_tuples)
2918 : {
2919 2964 : int frameOptions = wc->frameOptions;
2920 : double partition_tuples;
2921 : double return_tuples;
2922 : double peer_tuples;
2923 :
2924 : /*
2925 : * First, figure out how many partitions there are likely to be and set
2926 : * partition_tuples according to that estimate.
2927 : */
2928 2964 : if (wc->partitionClause != NIL)
2929 : {
2930 : double num_partitions;
2931 734 : List *partexprs = get_sortgrouplist_exprs(wc->partitionClause,
2932 734 : root->parse->targetList);
2933 :
2934 734 : num_partitions = estimate_num_groups(root, partexprs, input_tuples,
2935 : NULL, NULL);
2936 734 : list_free(partexprs);
2937 :
2938 734 : partition_tuples = input_tuples / num_partitions;
2939 : }
2940 : else
2941 : {
2942 : /* all tuples belong to the same partition */
2943 2230 : partition_tuples = input_tuples;
2944 : }
2945 :
2946 : /* estimate the number of tuples in each peer group */
2947 2964 : if (wc->orderClause != NIL)
2948 : {
2949 : double num_groups;
2950 : List *orderexprs;
2951 :
2952 2358 : orderexprs = get_sortgrouplist_exprs(wc->orderClause,
2953 2358 : root->parse->targetList);
2954 :
2955 : /* estimate out how many peer groups there are in the partition */
2956 2358 : num_groups = estimate_num_groups(root, orderexprs,
2957 : partition_tuples, NULL,
2958 : NULL);
2959 2358 : list_free(orderexprs);
2960 2358 : peer_tuples = partition_tuples / num_groups;
2961 : }
2962 : else
2963 : {
2964 : /* no ORDER BY so only 1 tuple belongs in each peer group */
2965 606 : peer_tuples = 1.0;
2966 : }
2967 :
2968 2964 : if (frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING)
2969 : {
2970 : /* include all partition rows */
2971 364 : return_tuples = partition_tuples;
2972 : }
2973 2600 : else if (frameOptions & FRAMEOPTION_END_CURRENT_ROW)
2974 : {
2975 1562 : if (frameOptions & FRAMEOPTION_ROWS)
2976 : {
2977 : /* just count the current row */
2978 722 : return_tuples = 1.0;
2979 : }
2980 840 : else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
2981 : {
2982 : /*
2983 : * When in RANGE/GROUPS mode, it's more complex. If there's no
2984 : * ORDER BY, then all rows in the partition are peers, otherwise
2985 : * we'll need to read the first group of peers.
2986 : */
2987 840 : if (wc->orderClause == NIL)
2988 326 : return_tuples = partition_tuples;
2989 : else
2990 514 : return_tuples = peer_tuples;
2991 : }
2992 : else
2993 : {
2994 : /*
2995 : * Something new we don't support yet? This needs attention.
2996 : * We'll just return 1.0 in the meantime.
2997 : */
2998 : Assert(false);
2999 0 : return_tuples = 1.0;
3000 : }
3001 : }
3002 1038 : else if (frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING)
3003 : {
3004 : /*
3005 : * BETWEEN ... AND N PRECEDING will only need to read the WindowAgg's
3006 : * subnode after N ROWS/RANGES/GROUPS. N can be 0, but not negative,
3007 : * so we'll just assume only the current row needs to be read to fetch
3008 : * the first WindowAgg row.
3009 : */
3010 108 : return_tuples = 1.0;
3011 : }
3012 930 : else if (frameOptions & FRAMEOPTION_END_OFFSET_FOLLOWING)
3013 : {
3014 930 : Const *endOffset = (Const *) wc->endOffset;
3015 : double end_offset_value;
3016 :
3017 : /* try and figure out the value specified in the endOffset. */
3018 930 : if (IsA(endOffset, Const))
3019 : {
3020 930 : if (endOffset->constisnull)
3021 : {
3022 : /*
3023 : * NULLs are not allowed, but currently, there's no code to
3024 : * error out if there's a NULL Const. We'll only discover
3025 : * this during execution. For now, just pretend everything is
3026 : * fine and assume that just the first row/range/group will be
3027 : * needed.
3028 : */
3029 0 : end_offset_value = 1.0;
3030 : }
3031 : else
3032 : {
3033 930 : switch (endOffset->consttype)
3034 : {
3035 24 : case INT2OID:
3036 24 : end_offset_value =
3037 24 : (double) DatumGetInt16(endOffset->constvalue);
3038 24 : break;
3039 132 : case INT4OID:
3040 132 : end_offset_value =
3041 132 : (double) DatumGetInt32(endOffset->constvalue);
3042 132 : break;
3043 432 : case INT8OID:
3044 432 : end_offset_value =
3045 432 : (double) DatumGetInt64(endOffset->constvalue);
3046 432 : break;
3047 342 : default:
3048 342 : end_offset_value =
3049 342 : partition_tuples / peer_tuples *
3050 : DEFAULT_INEQ_SEL;
3051 342 : break;
3052 : }
3053 : }
3054 : }
3055 : else
3056 : {
3057 : /*
3058 : * When the end bound is not a Const, we'll just need to guess. We
3059 : * just make use of DEFAULT_INEQ_SEL.
3060 : */
3061 0 : end_offset_value =
3062 0 : partition_tuples / peer_tuples * DEFAULT_INEQ_SEL;
3063 : }
3064 :
3065 930 : if (frameOptions & FRAMEOPTION_ROWS)
3066 : {
3067 : /* include the N FOLLOWING and the current row */
3068 270 : return_tuples = end_offset_value + 1.0;
3069 : }
3070 660 : else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
3071 : {
3072 : /* include N FOLLOWING ranges/group and the initial range/group */
3073 660 : return_tuples = peer_tuples * (end_offset_value + 1.0);
3074 : }
3075 : else
3076 : {
3077 : /*
3078 : * Something new we don't support yet? This needs attention.
3079 : * We'll just return 1.0 in the meantime.
3080 : */
3081 : Assert(false);
3082 0 : return_tuples = 1.0;
3083 : }
3084 : }
3085 : else
3086 : {
3087 : /*
3088 : * Something new we don't support yet? This needs attention. We'll
3089 : * just return 1.0 in the meantime.
3090 : */
3091 : Assert(false);
3092 0 : return_tuples = 1.0;
3093 : }
3094 :
3095 2964 : if (wc->partitionClause != NIL || wc->orderClause != NIL)
3096 : {
3097 : /*
3098 : * Cap the return value to the estimated partition tuples and account
3099 : * for the extra tuple WindowAgg will need to read to confirm the next
3100 : * tuple does not belong to the same partition or peer group.
3101 : */
3102 2570 : return_tuples = Min(return_tuples + 1.0, partition_tuples);
3103 : }
3104 : else
3105 : {
3106 : /*
3107 : * Cap the return value so it's never higher than the expected tuples
3108 : * in the partition.
3109 : */
3110 394 : return_tuples = Min(return_tuples, partition_tuples);
3111 : }
3112 :
3113 : /*
3114 : * We needn't worry about any EXCLUDE options as those only exclude rows
3115 : * from being aggregated, not from being read from the WindowAgg's
3116 : * subnode.
3117 : */
3118 :
3119 2964 : return clamp_row_est(return_tuples);
3120 : }
3121 :
3122 : /*
3123 : * cost_windowagg
3124 : * Determines and returns the cost of performing a WindowAgg plan node,
3125 : * including the cost of its input.
3126 : *
3127 : * Input is assumed already properly sorted.
3128 : */
3129 : void
3130 2964 : cost_windowagg(Path *path, PlannerInfo *root,
3131 : List *windowFuncs, WindowClause *winclause,
3132 : int input_disabled_nodes,
3133 : Cost input_startup_cost, Cost input_total_cost,
3134 : double input_tuples)
3135 : {
3136 : Cost startup_cost;
3137 : Cost total_cost;
3138 : double startup_tuples;
3139 : int numPartCols;
3140 : int numOrderCols;
3141 : ListCell *lc;
3142 :
3143 2964 : numPartCols = list_length(winclause->partitionClause);
3144 2964 : numOrderCols = list_length(winclause->orderClause);
3145 :
3146 2964 : startup_cost = input_startup_cost;
3147 2964 : total_cost = input_total_cost;
3148 :
3149 : /*
3150 : * Window functions are assumed to cost their stated execution cost, plus
3151 : * the cost of evaluating their input expressions, per tuple. Since they
3152 : * may in fact evaluate their inputs at multiple rows during each cycle,
3153 : * this could be a drastic underestimate; but without a way to know how
3154 : * many rows the window function will fetch, it's hard to do better. In
3155 : * any case, it's a good estimate for all the built-in window functions,
3156 : * so we'll just do this for now.
3157 : */
3158 6798 : foreach(lc, windowFuncs)
3159 : {
3160 3834 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc);
3161 : Cost wfunccost;
3162 : QualCost argcosts;
3163 :
3164 3834 : argcosts.startup = argcosts.per_tuple = 0;
3165 3834 : add_function_cost(root, wfunc->winfnoid, (Node *) wfunc,
3166 : &argcosts);
3167 3834 : startup_cost += argcosts.startup;
3168 3834 : wfunccost = argcosts.per_tuple;
3169 :
3170 : /* also add the input expressions' cost to per-input-row costs */
3171 3834 : cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
3172 3834 : startup_cost += argcosts.startup;
3173 3834 : wfunccost += argcosts.per_tuple;
3174 :
3175 : /*
3176 : * Add the filter's cost to per-input-row costs. XXX We should reduce
3177 : * input expression costs according to filter selectivity.
3178 : */
3179 3834 : cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
3180 3834 : startup_cost += argcosts.startup;
3181 3834 : wfunccost += argcosts.per_tuple;
3182 :
3183 3834 : total_cost += wfunccost * input_tuples;
3184 : }
3185 :
3186 : /*
3187 : * We also charge cpu_operator_cost per grouping column per tuple for
3188 : * grouping comparisons, plus cpu_tuple_cost per tuple for general
3189 : * overhead.
3190 : *
3191 : * XXX this neglects costs of spooling the data to disk when it overflows
3192 : * work_mem. Sooner or later that should get accounted for.
3193 : */
3194 2964 : total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
3195 2964 : total_cost += cpu_tuple_cost * input_tuples;
3196 :
3197 2964 : path->rows = input_tuples;
3198 2964 : path->disabled_nodes = input_disabled_nodes;
3199 2964 : path->startup_cost = startup_cost;
3200 2964 : path->total_cost = total_cost;
3201 :
3202 : /*
3203 : * Also, take into account how many tuples we need to read from the
3204 : * subnode in order to produce the first tuple from the WindowAgg. To do
3205 : * this we proportion the run cost (total cost not including startup cost)
3206 : * over the estimated startup tuples. We already included the startup
3207 : * cost of the subnode, so we only need to do this when the estimated
3208 : * startup tuples is above 1.0.
3209 : */
3210 2964 : startup_tuples = get_windowclause_startup_tuples(root, winclause,
3211 : input_tuples);
3212 :
3213 2964 : if (startup_tuples > 1.0)
3214 2556 : path->startup_cost += (total_cost - startup_cost) / input_tuples *
3215 2556 : (startup_tuples - 1.0);
3216 2964 : }
3217 :
3218 : /*
3219 : * cost_group
3220 : * Determines and returns the cost of performing a Group plan node,
3221 : * including the cost of its input.
3222 : *
3223 : * Note: caller must ensure that input costs are for appropriately-sorted
3224 : * input.
3225 : */
3226 : void
3227 1226 : cost_group(Path *path, PlannerInfo *root,
3228 : int numGroupCols, double numGroups,
3229 : List *quals,
3230 : int input_disabled_nodes,
3231 : Cost input_startup_cost, Cost input_total_cost,
3232 : double input_tuples)
3233 : {
3234 : double output_tuples;
3235 : Cost startup_cost;
3236 : Cost total_cost;
3237 :
3238 1226 : output_tuples = numGroups;
3239 1226 : startup_cost = input_startup_cost;
3240 1226 : total_cost = input_total_cost;
3241 :
3242 : /*
3243 : * Charge one cpu_operator_cost per comparison per input tuple. We assume
3244 : * all columns get compared at most of the tuples.
3245 : */
3246 1226 : total_cost += cpu_operator_cost * input_tuples * numGroupCols;
3247 :
3248 : /*
3249 : * If there are quals (HAVING quals), account for their cost and
3250 : * selectivity.
3251 : */
3252 1226 : if (quals)
3253 : {
3254 : QualCost qual_cost;
3255 :
3256 0 : cost_qual_eval(&qual_cost, quals, root);
3257 0 : startup_cost += qual_cost.startup;
3258 0 : total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
3259 :
3260 0 : output_tuples = clamp_row_est(output_tuples *
3261 0 : clauselist_selectivity(root,
3262 : quals,
3263 : 0,
3264 : JOIN_INNER,
3265 : NULL));
3266 : }
3267 :
3268 1226 : path->rows = output_tuples;
3269 1226 : path->disabled_nodes = input_disabled_nodes;
3270 1226 : path->startup_cost = startup_cost;
3271 1226 : path->total_cost = total_cost;
3272 1226 : }
3273 :
3274 : /*
3275 : * initial_cost_nestloop
3276 : * Preliminary estimate of the cost of a nestloop join path.
3277 : *
3278 : * This must quickly produce lower-bound estimates of the path's startup and
3279 : * total costs. If we are unable to eliminate the proposed path from
3280 : * consideration using the lower bounds, final_cost_nestloop will be called
3281 : * to obtain the final estimates.
3282 : *
3283 : * The exact division of labor between this function and final_cost_nestloop
3284 : * is private to them, and represents a tradeoff between speed of the initial
3285 : * estimate and getting a tight lower bound. We choose to not examine the
3286 : * join quals here, since that's by far the most expensive part of the
3287 : * calculations. The end result is that CPU-cost considerations must be
3288 : * left for the second phase; and for SEMI/ANTI joins, we must also postpone
3289 : * incorporation of the inner path's run cost.
3290 : *
3291 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3292 : * other data to be used by final_cost_nestloop
3293 : * 'jointype' is the type of join to be performed
3294 : * 'outer_path' is the outer input to the join
3295 : * 'inner_path' is the inner input to the join
3296 : * 'extra' contains miscellaneous information about the join
3297 : */
3298 : void
3299 3222444 : initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
3300 : JoinType jointype,
3301 : Path *outer_path, Path *inner_path,
3302 : JoinPathExtraData *extra)
3303 : {
3304 : int disabled_nodes;
3305 3222444 : Cost startup_cost = 0;
3306 3222444 : Cost run_cost = 0;
3307 3222444 : double outer_path_rows = outer_path->rows;
3308 : Cost inner_rescan_start_cost;
3309 : Cost inner_rescan_total_cost;
3310 : Cost inner_run_cost;
3311 : Cost inner_rescan_run_cost;
3312 :
3313 : /* Count up disabled nodes. */
3314 3222444 : disabled_nodes = enable_nestloop ? 0 : 1;
3315 3222444 : disabled_nodes += inner_path->disabled_nodes;
3316 3222444 : disabled_nodes += outer_path->disabled_nodes;
3317 :
3318 : /* estimate costs to rescan the inner relation */
3319 3222444 : cost_rescan(root, inner_path,
3320 : &inner_rescan_start_cost,
3321 : &inner_rescan_total_cost);
3322 :
3323 : /* cost of source data */
3324 :
3325 : /*
3326 : * NOTE: clearly, we must pay both outer and inner paths' startup_cost
3327 : * before we can start returning tuples, so the join's startup cost is
3328 : * their sum. We'll also pay the inner path's rescan startup cost
3329 : * multiple times.
3330 : */
3331 3222444 : startup_cost += outer_path->startup_cost + inner_path->startup_cost;
3332 3222444 : run_cost += outer_path->total_cost - outer_path->startup_cost;
3333 3222444 : if (outer_path_rows > 1)
3334 2348376 : run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
3335 :
3336 3222444 : inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
3337 3222444 : inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
3338 :
3339 3222444 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI ||
3340 3161202 : extra->inner_unique)
3341 : {
3342 : /*
3343 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3344 : * executor will stop after the first match.
3345 : *
3346 : * Getting decent estimates requires inspection of the join quals,
3347 : * which we choose to postpone to final_cost_nestloop.
3348 : */
3349 :
3350 : /* Save private data for final_cost_nestloop */
3351 1312866 : workspace->inner_run_cost = inner_run_cost;
3352 1312866 : workspace->inner_rescan_run_cost = inner_rescan_run_cost;
3353 : }
3354 : else
3355 : {
3356 : /* Normal case; we'll scan whole input rel for each outer row */
3357 1909578 : run_cost += inner_run_cost;
3358 1909578 : if (outer_path_rows > 1)
3359 1477034 : run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
3360 : }
3361 :
3362 : /* CPU costs left for later */
3363 :
3364 : /* Public result fields */
3365 3222444 : workspace->disabled_nodes = disabled_nodes;
3366 3222444 : workspace->startup_cost = startup_cost;
3367 3222444 : workspace->total_cost = startup_cost + run_cost;
3368 : /* Save private data for final_cost_nestloop */
3369 3222444 : workspace->run_cost = run_cost;
3370 3222444 : }
3371 :
3372 : /*
3373 : * final_cost_nestloop
3374 : * Final estimate of the cost and result size of a nestloop join path.
3375 : *
3376 : * 'path' is already filled in except for the rows and cost fields
3377 : * 'workspace' is the result from initial_cost_nestloop
3378 : * 'extra' contains miscellaneous information about the join
3379 : */
3380 : void
3381 1446594 : final_cost_nestloop(PlannerInfo *root, NestPath *path,
3382 : JoinCostWorkspace *workspace,
3383 : JoinPathExtraData *extra)
3384 : {
3385 1446594 : Path *outer_path = path->jpath.outerjoinpath;
3386 1446594 : Path *inner_path = path->jpath.innerjoinpath;
3387 1446594 : double outer_path_rows = outer_path->rows;
3388 1446594 : double inner_path_rows = inner_path->rows;
3389 1446594 : Cost startup_cost = workspace->startup_cost;
3390 1446594 : Cost run_cost = workspace->run_cost;
3391 : Cost cpu_per_tuple;
3392 : QualCost restrict_qual_cost;
3393 : double ntuples;
3394 :
3395 : /* Set the number of disabled nodes. */
3396 1446594 : path->jpath.path.disabled_nodes = workspace->disabled_nodes;
3397 :
3398 : /* Protect some assumptions below that rowcounts aren't zero */
3399 1446594 : if (outer_path_rows <= 0)
3400 0 : outer_path_rows = 1;
3401 1446594 : if (inner_path_rows <= 0)
3402 726 : inner_path_rows = 1;
3403 : /* Mark the path with the correct row estimate */
3404 1446594 : if (path->jpath.path.param_info)
3405 30594 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3406 : else
3407 1416000 : path->jpath.path.rows = path->jpath.path.parent->rows;
3408 :
3409 : /* For partial paths, scale row estimate. */
3410 1446594 : if (path->jpath.path.parallel_workers > 0)
3411 : {
3412 43698 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3413 :
3414 43698 : path->jpath.path.rows =
3415 43698 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3416 : }
3417 :
3418 : /* cost of inner-relation source data (we already dealt with outer rel) */
3419 :
3420 1446594 : if (path->jpath.jointype == JOIN_SEMI || path->jpath.jointype == JOIN_ANTI ||
3421 1403934 : extra->inner_unique)
3422 905000 : {
3423 : /*
3424 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3425 : * executor will stop after the first match.
3426 : */
3427 905000 : Cost inner_run_cost = workspace->inner_run_cost;
3428 905000 : Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
3429 : double outer_matched_rows;
3430 : double outer_unmatched_rows;
3431 : Selectivity inner_scan_frac;
3432 :
3433 : /*
3434 : * For an outer-rel row that has at least one match, we can expect the
3435 : * inner scan to stop after a fraction 1/(match_count+1) of the inner
3436 : * rows, if the matches are evenly distributed. Since they probably
3437 : * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
3438 : * that fraction. (If we used a larger fuzz factor, we'd have to
3439 : * clamp inner_scan_frac to at most 1.0; but since match_count is at
3440 : * least 1, no such clamp is needed now.)
3441 : */
3442 905000 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
3443 905000 : outer_unmatched_rows = outer_path_rows - outer_matched_rows;
3444 905000 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
3445 :
3446 : /*
3447 : * Compute number of tuples processed (not number emitted!). First,
3448 : * account for successfully-matched outer rows.
3449 : */
3450 905000 : ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
3451 :
3452 : /*
3453 : * Now we need to estimate the actual costs of scanning the inner
3454 : * relation, which may be quite a bit less than N times inner_run_cost
3455 : * due to early scan stops. We consider two cases. If the inner path
3456 : * is an indexscan using all the joinquals as indexquals, then an
3457 : * unmatched outer row results in an indexscan returning no rows,
3458 : * which is probably quite cheap. Otherwise, the executor will have
3459 : * to scan the whole inner rel for an unmatched row; not so cheap.
3460 : */
3461 905000 : if (has_indexed_join_quals(path))
3462 : {
3463 : /*
3464 : * Successfully-matched outer rows will only require scanning
3465 : * inner_scan_frac of the inner relation. In this case, we don't
3466 : * need to charge the full inner_run_cost even when that's more
3467 : * than inner_rescan_run_cost, because we can assume that none of
3468 : * the inner scans ever scan the whole inner relation. So it's
3469 : * okay to assume that all the inner scan executions can be
3470 : * fractions of the full cost, even if materialization is reducing
3471 : * the rescan cost. At this writing, it's impossible to get here
3472 : * for a materialized inner scan, so inner_run_cost and
3473 : * inner_rescan_run_cost will be the same anyway; but just in
3474 : * case, use inner_run_cost for the first matched tuple and
3475 : * inner_rescan_run_cost for additional ones.
3476 : */
3477 149350 : run_cost += inner_run_cost * inner_scan_frac;
3478 149350 : if (outer_matched_rows > 1)
3479 22278 : run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
3480 :
3481 : /*
3482 : * Add the cost of inner-scan executions for unmatched outer rows.
3483 : * We estimate this as the same cost as returning the first tuple
3484 : * of a nonempty scan. We consider that these are all rescans,
3485 : * since we used inner_run_cost once already.
3486 : */
3487 149350 : run_cost += outer_unmatched_rows *
3488 149350 : inner_rescan_run_cost / inner_path_rows;
3489 :
3490 : /*
3491 : * We won't be evaluating any quals at all for unmatched rows, so
3492 : * don't add them to ntuples.
3493 : */
3494 : }
3495 : else
3496 : {
3497 : /*
3498 : * Here, a complicating factor is that rescans may be cheaper than
3499 : * first scans. If we never scan all the way to the end of the
3500 : * inner rel, it might be (depending on the plan type) that we'd
3501 : * never pay the whole inner first-scan run cost. However it is
3502 : * difficult to estimate whether that will happen (and it could
3503 : * not happen if there are any unmatched outer rows!), so be
3504 : * conservative and always charge the whole first-scan cost once.
3505 : * We consider this charge to correspond to the first unmatched
3506 : * outer row, unless there isn't one in our estimate, in which
3507 : * case blame it on the first matched row.
3508 : */
3509 :
3510 : /* First, count all unmatched join tuples as being processed */
3511 755650 : ntuples += outer_unmatched_rows * inner_path_rows;
3512 :
3513 : /* Now add the forced full scan, and decrement appropriate count */
3514 755650 : run_cost += inner_run_cost;
3515 755650 : if (outer_unmatched_rows >= 1)
3516 719220 : outer_unmatched_rows -= 1;
3517 : else
3518 36430 : outer_matched_rows -= 1;
3519 :
3520 : /* Add inner run cost for additional outer tuples having matches */
3521 755650 : if (outer_matched_rows > 0)
3522 269120 : run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
3523 :
3524 : /* Add inner run cost for additional unmatched outer tuples */
3525 755650 : if (outer_unmatched_rows > 0)
3526 475510 : run_cost += outer_unmatched_rows * inner_rescan_run_cost;
3527 : }
3528 : }
3529 : else
3530 : {
3531 : /* Normal-case source costs were included in preliminary estimate */
3532 :
3533 : /* Compute number of tuples processed (not number emitted!) */
3534 541594 : ntuples = outer_path_rows * inner_path_rows;
3535 : }
3536 :
3537 : /* CPU costs */
3538 1446594 : cost_qual_eval(&restrict_qual_cost, path->jpath.joinrestrictinfo, root);
3539 1446594 : startup_cost += restrict_qual_cost.startup;
3540 1446594 : cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
3541 1446594 : run_cost += cpu_per_tuple * ntuples;
3542 :
3543 : /* tlist eval costs are paid per output row, not per tuple scanned */
3544 1446594 : startup_cost += path->jpath.path.pathtarget->cost.startup;
3545 1446594 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3546 :
3547 1446594 : path->jpath.path.startup_cost = startup_cost;
3548 1446594 : path->jpath.path.total_cost = startup_cost + run_cost;
3549 1446594 : }
3550 :
3551 : /*
3552 : * initial_cost_mergejoin
3553 : * Preliminary estimate of the cost of a mergejoin path.
3554 : *
3555 : * This must quickly produce lower-bound estimates of the path's startup and
3556 : * total costs. If we are unable to eliminate the proposed path from
3557 : * consideration using the lower bounds, final_cost_mergejoin will be called
3558 : * to obtain the final estimates.
3559 : *
3560 : * The exact division of labor between this function and final_cost_mergejoin
3561 : * is private to them, and represents a tradeoff between speed of the initial
3562 : * estimate and getting a tight lower bound. We choose to not examine the
3563 : * join quals here, except for obtaining the scan selectivity estimate which
3564 : * is really essential (but fortunately, use of caching keeps the cost of
3565 : * getting that down to something reasonable).
3566 : * We also assume that cost_sort/cost_incremental_sort is cheap enough to use
3567 : * here.
3568 : *
3569 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3570 : * other data to be used by final_cost_mergejoin
3571 : * 'jointype' is the type of join to be performed
3572 : * 'mergeclauses' is the list of joinclauses to be used as merge clauses
3573 : * 'outer_path' is the outer input to the join
3574 : * 'inner_path' is the inner input to the join
3575 : * 'outersortkeys' is the list of sort keys for the outer path
3576 : * 'innersortkeys' is the list of sort keys for the inner path
3577 : * 'outer_presorted_keys' is the number of presorted keys of the outer path
3578 : * 'extra' contains miscellaneous information about the join
3579 : *
3580 : * Note: outersortkeys and innersortkeys should be NIL if no explicit
3581 : * sort is needed because the respective source path is already ordered.
3582 : */
3583 : void
3584 1466278 : initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
3585 : JoinType jointype,
3586 : List *mergeclauses,
3587 : Path *outer_path, Path *inner_path,
3588 : List *outersortkeys, List *innersortkeys,
3589 : int outer_presorted_keys,
3590 : JoinPathExtraData *extra)
3591 : {
3592 : int disabled_nodes;
3593 1466278 : Cost startup_cost = 0;
3594 1466278 : Cost run_cost = 0;
3595 1466278 : double outer_path_rows = outer_path->rows;
3596 1466278 : double inner_path_rows = inner_path->rows;
3597 : Cost inner_run_cost;
3598 : double outer_rows,
3599 : inner_rows,
3600 : outer_skip_rows,
3601 : inner_skip_rows;
3602 : Selectivity outerstartsel,
3603 : outerendsel,
3604 : innerstartsel,
3605 : innerendsel;
3606 : Path sort_path; /* dummy for result of
3607 : * cost_sort/cost_incremental_sort */
3608 :
3609 : /* Protect some assumptions below that rowcounts aren't zero */
3610 1466278 : if (outer_path_rows <= 0)
3611 96 : outer_path_rows = 1;
3612 1466278 : if (inner_path_rows <= 0)
3613 126 : inner_path_rows = 1;
3614 :
3615 : /*
3616 : * A merge join will stop as soon as it exhausts either input stream
3617 : * (unless it's an outer join, in which case the outer side has to be
3618 : * scanned all the way anyway). Estimate fraction of the left and right
3619 : * inputs that will actually need to be scanned. Likewise, we can
3620 : * estimate the number of rows that will be skipped before the first join
3621 : * pair is found, which should be factored into startup cost. We use only
3622 : * the first (most significant) merge clause for this purpose. Since
3623 : * mergejoinscansel() is a fairly expensive computation, we cache the
3624 : * results in the merge clause RestrictInfo.
3625 : */
3626 1466278 : if (mergeclauses && jointype != JOIN_FULL)
3627 1460138 : {
3628 1460138 : RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
3629 : List *opathkeys;
3630 : List *ipathkeys;
3631 : PathKey *opathkey;
3632 : PathKey *ipathkey;
3633 : MergeScanSelCache *cache;
3634 :
3635 : /* Get the input pathkeys to determine the sort-order details */
3636 1460138 : opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
3637 1460138 : ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
3638 : Assert(opathkeys);
3639 : Assert(ipathkeys);
3640 1460138 : opathkey = (PathKey *) linitial(opathkeys);
3641 1460138 : ipathkey = (PathKey *) linitial(ipathkeys);
3642 : /* debugging check */
3643 1460138 : if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
3644 1460138 : opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
3645 1460138 : opathkey->pk_cmptype != ipathkey->pk_cmptype ||
3646 1460138 : opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
3647 0 : elog(ERROR, "left and right pathkeys do not match in mergejoin");
3648 :
3649 : /* Get the selectivity with caching */
3650 1460138 : cache = cached_scansel(root, firstclause, opathkey);
3651 :
3652 1460138 : if (bms_is_subset(firstclause->left_relids,
3653 1460138 : outer_path->parent->relids))
3654 : {
3655 : /* left side of clause is outer */
3656 761710 : outerstartsel = cache->leftstartsel;
3657 761710 : outerendsel = cache->leftendsel;
3658 761710 : innerstartsel = cache->rightstartsel;
3659 761710 : innerendsel = cache->rightendsel;
3660 : }
3661 : else
3662 : {
3663 : /* left side of clause is inner */
3664 698428 : outerstartsel = cache->rightstartsel;
3665 698428 : outerendsel = cache->rightendsel;
3666 698428 : innerstartsel = cache->leftstartsel;
3667 698428 : innerendsel = cache->leftendsel;
3668 : }
3669 1460138 : if (jointype == JOIN_LEFT ||
3670 : jointype == JOIN_ANTI)
3671 : {
3672 196876 : outerstartsel = 0.0;
3673 196876 : outerendsel = 1.0;
3674 : }
3675 1263262 : else if (jointype == JOIN_RIGHT ||
3676 : jointype == JOIN_RIGHT_ANTI)
3677 : {
3678 197212 : innerstartsel = 0.0;
3679 197212 : innerendsel = 1.0;
3680 : }
3681 : }
3682 : else
3683 : {
3684 : /* cope with clauseless or full mergejoin */
3685 6140 : outerstartsel = innerstartsel = 0.0;
3686 6140 : outerendsel = innerendsel = 1.0;
3687 : }
3688 :
3689 : /*
3690 : * Convert selectivities to row counts. We force outer_rows and
3691 : * inner_rows to be at least 1, but the skip_rows estimates can be zero.
3692 : */
3693 1466278 : outer_skip_rows = rint(outer_path_rows * outerstartsel);
3694 1466278 : inner_skip_rows = rint(inner_path_rows * innerstartsel);
3695 1466278 : outer_rows = clamp_row_est(outer_path_rows * outerendsel);
3696 1466278 : inner_rows = clamp_row_est(inner_path_rows * innerendsel);
3697 :
3698 : Assert(outer_skip_rows <= outer_rows);
3699 : Assert(inner_skip_rows <= inner_rows);
3700 :
3701 : /*
3702 : * Readjust scan selectivities to account for above rounding. This is
3703 : * normally an insignificant effect, but when there are only a few rows in
3704 : * the inputs, failing to do this makes for a large percentage error.
3705 : */
3706 1466278 : outerstartsel = outer_skip_rows / outer_path_rows;
3707 1466278 : innerstartsel = inner_skip_rows / inner_path_rows;
3708 1466278 : outerendsel = outer_rows / outer_path_rows;
3709 1466278 : innerendsel = inner_rows / inner_path_rows;
3710 :
3711 : Assert(outerstartsel <= outerendsel);
3712 : Assert(innerstartsel <= innerendsel);
3713 :
3714 1466278 : disabled_nodes = enable_mergejoin ? 0 : 1;
3715 :
3716 : /* cost of source data */
3717 :
3718 1466278 : if (outersortkeys) /* do we need to sort outer? */
3719 : {
3720 : /*
3721 : * We can assert that the outer path is not already ordered
3722 : * appropriately for the mergejoin; otherwise, outersortkeys would
3723 : * have been set to NIL.
3724 : */
3725 : Assert(!pathkeys_contained_in(outersortkeys, outer_path->pathkeys));
3726 :
3727 : /*
3728 : * We choose to use incremental sort if it is enabled and there are
3729 : * presorted keys; otherwise we use full sort.
3730 : */
3731 748926 : if (enable_incremental_sort && outer_presorted_keys > 0)
3732 : {
3733 1638 : cost_incremental_sort(&sort_path,
3734 : root,
3735 : outersortkeys,
3736 : outer_presorted_keys,
3737 : outer_path->disabled_nodes,
3738 : outer_path->startup_cost,
3739 : outer_path->total_cost,
3740 : outer_path_rows,
3741 1638 : outer_path->pathtarget->width,
3742 : 0.0,
3743 : work_mem,
3744 : -1.0);
3745 : }
3746 : else
3747 : {
3748 747288 : cost_sort(&sort_path,
3749 : root,
3750 : outersortkeys,
3751 : outer_path->disabled_nodes,
3752 : outer_path->total_cost,
3753 : outer_path_rows,
3754 747288 : outer_path->pathtarget->width,
3755 : 0.0,
3756 : work_mem,
3757 : -1.0);
3758 : }
3759 :
3760 748926 : disabled_nodes += sort_path.disabled_nodes;
3761 748926 : startup_cost += sort_path.startup_cost;
3762 748926 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3763 748926 : * outerstartsel;
3764 748926 : run_cost += (sort_path.total_cost - sort_path.startup_cost)
3765 748926 : * (outerendsel - outerstartsel);
3766 : }
3767 : else
3768 : {
3769 717352 : disabled_nodes += outer_path->disabled_nodes;
3770 717352 : startup_cost += outer_path->startup_cost;
3771 717352 : startup_cost += (outer_path->total_cost - outer_path->startup_cost)
3772 717352 : * outerstartsel;
3773 717352 : run_cost += (outer_path->total_cost - outer_path->startup_cost)
3774 717352 : * (outerendsel - outerstartsel);
3775 : }
3776 :
3777 1466278 : if (innersortkeys) /* do we need to sort inner? */
3778 : {
3779 : /*
3780 : * We can assert that the inner path is not already ordered
3781 : * appropriately for the mergejoin; otherwise, innersortkeys would
3782 : * have been set to NIL.
3783 : */
3784 : Assert(!pathkeys_contained_in(innersortkeys, inner_path->pathkeys));
3785 :
3786 : /*
3787 : * We do not consider incremental sort for inner path, because
3788 : * incremental sort does not support mark/restore.
3789 : */
3790 :
3791 1179646 : cost_sort(&sort_path,
3792 : root,
3793 : innersortkeys,
3794 : inner_path->disabled_nodes,
3795 : inner_path->total_cost,
3796 : inner_path_rows,
3797 1179646 : inner_path->pathtarget->width,
3798 : 0.0,
3799 : work_mem,
3800 : -1.0);
3801 1179646 : disabled_nodes += sort_path.disabled_nodes;
3802 1179646 : startup_cost += sort_path.startup_cost;
3803 1179646 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3804 1179646 : * innerstartsel;
3805 1179646 : inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
3806 1179646 : * (innerendsel - innerstartsel);
3807 : }
3808 : else
3809 : {
3810 286632 : disabled_nodes += inner_path->disabled_nodes;
3811 286632 : startup_cost += inner_path->startup_cost;
3812 286632 : startup_cost += (inner_path->total_cost - inner_path->startup_cost)
3813 286632 : * innerstartsel;
3814 286632 : inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
3815 286632 : * (innerendsel - innerstartsel);
3816 : }
3817 :
3818 : /*
3819 : * We can't yet determine whether rescanning occurs, or whether
3820 : * materialization of the inner input should be done. The minimum
3821 : * possible inner input cost, regardless of rescan and materialization
3822 : * considerations, is inner_run_cost. We include that in
3823 : * workspace->total_cost, but not yet in run_cost.
3824 : */
3825 :
3826 : /* CPU costs left for later */
3827 :
3828 : /* Public result fields */
3829 1466278 : workspace->disabled_nodes = disabled_nodes;
3830 1466278 : workspace->startup_cost = startup_cost;
3831 1466278 : workspace->total_cost = startup_cost + run_cost + inner_run_cost;
3832 : /* Save private data for final_cost_mergejoin */
3833 1466278 : workspace->run_cost = run_cost;
3834 1466278 : workspace->inner_run_cost = inner_run_cost;
3835 1466278 : workspace->outer_rows = outer_rows;
3836 1466278 : workspace->inner_rows = inner_rows;
3837 1466278 : workspace->outer_skip_rows = outer_skip_rows;
3838 1466278 : workspace->inner_skip_rows = inner_skip_rows;
3839 1466278 : }
3840 :
3841 : /*
3842 : * final_cost_mergejoin
3843 : * Final estimate of the cost and result size of a mergejoin path.
3844 : *
3845 : * Unlike other costsize functions, this routine makes two actual decisions:
3846 : * whether the executor will need to do mark/restore, and whether we should
3847 : * materialize the inner path. It would be logically cleaner to build
3848 : * separate paths testing these alternatives, but that would require repeating
3849 : * most of the cost calculations, which are not all that cheap. Since the
3850 : * choice will not affect output pathkeys or startup cost, only total cost,
3851 : * there is no possibility of wanting to keep more than one path. So it seems
3852 : * best to make the decisions here and record them in the path's
3853 : * skip_mark_restore and materialize_inner fields.
3854 : *
3855 : * Mark/restore overhead is usually required, but can be skipped if we know
3856 : * that the executor need find only one match per outer tuple, and that the
3857 : * mergeclauses are sufficient to identify a match.
3858 : *
3859 : * We materialize the inner path if we need mark/restore and either the inner
3860 : * path can't support mark/restore, or it's cheaper to use an interposed
3861 : * Material node to handle mark/restore.
3862 : *
3863 : * 'path' is already filled in except for the rows and cost fields and
3864 : * skip_mark_restore and materialize_inner
3865 : * 'workspace' is the result from initial_cost_mergejoin
3866 : * 'extra' contains miscellaneous information about the join
3867 : */
3868 : void
3869 458468 : final_cost_mergejoin(PlannerInfo *root, MergePath *path,
3870 : JoinCostWorkspace *workspace,
3871 : JoinPathExtraData *extra)
3872 : {
3873 458468 : Path *outer_path = path->jpath.outerjoinpath;
3874 458468 : Path *inner_path = path->jpath.innerjoinpath;
3875 458468 : double inner_path_rows = inner_path->rows;
3876 458468 : List *mergeclauses = path->path_mergeclauses;
3877 458468 : List *innersortkeys = path->innersortkeys;
3878 458468 : Cost startup_cost = workspace->startup_cost;
3879 458468 : Cost run_cost = workspace->run_cost;
3880 458468 : Cost inner_run_cost = workspace->inner_run_cost;
3881 458468 : double outer_rows = workspace->outer_rows;
3882 458468 : double inner_rows = workspace->inner_rows;
3883 458468 : double outer_skip_rows = workspace->outer_skip_rows;
3884 458468 : double inner_skip_rows = workspace->inner_skip_rows;
3885 : Cost cpu_per_tuple,
3886 : bare_inner_cost,
3887 : mat_inner_cost;
3888 : QualCost merge_qual_cost;
3889 : QualCost qp_qual_cost;
3890 : double mergejointuples,
3891 : rescannedtuples;
3892 : double rescanratio;
3893 :
3894 : /* Set the number of disabled nodes. */
3895 458468 : path->jpath.path.disabled_nodes = workspace->disabled_nodes;
3896 :
3897 : /* Protect some assumptions below that rowcounts aren't zero */
3898 458468 : if (inner_path_rows <= 0)
3899 90 : inner_path_rows = 1;
3900 :
3901 : /* Mark the path with the correct row estimate */
3902 458468 : if (path->jpath.path.param_info)
3903 920 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3904 : else
3905 457548 : path->jpath.path.rows = path->jpath.path.parent->rows;
3906 :
3907 : /* For partial paths, scale row estimate. */
3908 458468 : if (path->jpath.path.parallel_workers > 0)
3909 : {
3910 65860 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3911 :
3912 65860 : path->jpath.path.rows =
3913 65860 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3914 : }
3915 :
3916 : /*
3917 : * Compute cost of the mergequals and qpquals (other restriction clauses)
3918 : * separately.
3919 : */
3920 458468 : cost_qual_eval(&merge_qual_cost, mergeclauses, root);
3921 458468 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
3922 458468 : qp_qual_cost.startup -= merge_qual_cost.startup;
3923 458468 : qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
3924 :
3925 : /*
3926 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3927 : * executor will stop scanning for matches after the first match. When
3928 : * all the joinclauses are merge clauses, this means we don't ever need to
3929 : * back up the merge, and so we can skip mark/restore overhead.
3930 : */
3931 458468 : if ((path->jpath.jointype == JOIN_SEMI ||
3932 451390 : path->jpath.jointype == JOIN_ANTI ||
3933 602410 : extra->inner_unique) &&
3934 158006 : (list_length(path->jpath.joinrestrictinfo) ==
3935 158006 : list_length(path->path_mergeclauses)))
3936 136044 : path->skip_mark_restore = true;
3937 : else
3938 322424 : path->skip_mark_restore = false;
3939 :
3940 : /*
3941 : * Get approx # tuples passing the mergequals. We use approx_tuple_count
3942 : * here because we need an estimate done with JOIN_INNER semantics.
3943 : */
3944 458468 : mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
3945 :
3946 : /*
3947 : * When there are equal merge keys in the outer relation, the mergejoin
3948 : * must rescan any matching tuples in the inner relation. This means
3949 : * re-fetching inner tuples; we have to estimate how often that happens.
3950 : *
3951 : * For regular inner and outer joins, the number of re-fetches can be
3952 : * estimated approximately as size of merge join output minus size of
3953 : * inner relation. Assume that the distinct key values are 1, 2, ..., and
3954 : * denote the number of values of each key in the outer relation as m1,
3955 : * m2, ...; in the inner relation, n1, n2, ... Then we have
3956 : *
3957 : * size of join = m1 * n1 + m2 * n2 + ...
3958 : *
3959 : * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
3960 : * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
3961 : * relation
3962 : *
3963 : * This equation works correctly for outer tuples having no inner match
3964 : * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
3965 : * are effectively subtracting those from the number of rescanned tuples,
3966 : * when we should not. Can we do better without expensive selectivity
3967 : * computations?
3968 : *
3969 : * The whole issue is moot if we know we don't need to mark/restore at
3970 : * all, or if we are working from a unique-ified outer input.
3971 : */
3972 458468 : if (path->skip_mark_restore ||
3973 322424 : RELATION_WAS_MADE_UNIQUE(outer_path->parent, extra->sjinfo,
3974 : path->jpath.jointype))
3975 140272 : rescannedtuples = 0;
3976 : else
3977 : {
3978 318196 : rescannedtuples = mergejointuples - inner_path_rows;
3979 : /* Must clamp because of possible underestimate */
3980 318196 : if (rescannedtuples < 0)
3981 77604 : rescannedtuples = 0;
3982 : }
3983 :
3984 : /*
3985 : * We'll inflate various costs this much to account for rescanning. Note
3986 : * that this is to be multiplied by something involving inner_rows, or
3987 : * another number related to the portion of the inner rel we'll scan.
3988 : */
3989 458468 : rescanratio = 1.0 + (rescannedtuples / inner_rows);
3990 :
3991 : /*
3992 : * Decide whether we want to materialize the inner input to shield it from
3993 : * mark/restore and performing re-fetches. Our cost model for regular
3994 : * re-fetches is that a re-fetch costs the same as an original fetch,
3995 : * which is probably an overestimate; but on the other hand we ignore the
3996 : * bookkeeping costs of mark/restore. Not clear if it's worth developing
3997 : * a more refined model. So we just need to inflate the inner run cost by
3998 : * rescanratio.
3999 : */
4000 458468 : bare_inner_cost = inner_run_cost * rescanratio;
4001 :
4002 : /*
4003 : * When we interpose a Material node the re-fetch cost is assumed to be
4004 : * just cpu_operator_cost per tuple, independently of the underlying
4005 : * plan's cost; and we charge an extra cpu_operator_cost per original
4006 : * fetch as well. Note that we're assuming the materialize node will
4007 : * never spill to disk, since it only has to remember tuples back to the
4008 : * last mark. (If there are a huge number of duplicates, our other cost
4009 : * factors will make the path so expensive that it probably won't get
4010 : * chosen anyway.) So we don't use cost_rescan here.
4011 : *
4012 : * Note: keep this estimate in sync with create_mergejoin_plan's labeling
4013 : * of the generated Material node.
4014 : */
4015 458468 : mat_inner_cost = inner_run_cost +
4016 458468 : cpu_operator_cost * inner_rows * rescanratio;
4017 :
4018 : /*
4019 : * If we don't need mark/restore at all, we don't need materialization.
4020 : */
4021 458468 : if (path->skip_mark_restore)
4022 136044 : path->materialize_inner = false;
4023 :
4024 : /*
4025 : * Prefer materializing if it looks cheaper, unless the user has asked to
4026 : * suppress materialization.
4027 : */
4028 322424 : else if (enable_material && mat_inner_cost < bare_inner_cost)
4029 3548 : path->materialize_inner = true;
4030 :
4031 : /*
4032 : * Even if materializing doesn't look cheaper, we *must* do it if the
4033 : * inner path is to be used directly (without sorting) and it doesn't
4034 : * support mark/restore.
4035 : *
4036 : * Since the inner side must be ordered, and only Sorts and IndexScans can
4037 : * create order to begin with, and they both support mark/restore, you
4038 : * might think there's no problem --- but you'd be wrong. Nestloop and
4039 : * merge joins can *preserve* the order of their inputs, so they can be
4040 : * selected as the input of a mergejoin, and they don't support
4041 : * mark/restore at present.
4042 : *
4043 : * We don't test the value of enable_material here, because
4044 : * materialization is required for correctness in this case, and turning
4045 : * it off does not entitle us to deliver an invalid plan.
4046 : */
4047 318876 : else if (innersortkeys == NIL &&
4048 8788 : !ExecSupportsMarkRestore(inner_path))
4049 1848 : path->materialize_inner = true;
4050 :
4051 : /*
4052 : * Also, force materializing if the inner path is to be sorted and the
4053 : * sort is expected to spill to disk. This is because the final merge
4054 : * pass can be done on-the-fly if it doesn't have to support mark/restore.
4055 : * We don't try to adjust the cost estimates for this consideration,
4056 : * though.
4057 : *
4058 : * Since materialization is a performance optimization in this case,
4059 : * rather than necessary for correctness, we skip it if enable_material is
4060 : * off.
4061 : */
4062 317028 : else if (enable_material && innersortkeys != NIL &&
4063 310040 : relation_byte_size(inner_path_rows,
4064 310040 : inner_path->pathtarget->width) >
4065 310040 : work_mem * (Size) 1024)
4066 284 : path->materialize_inner = true;
4067 : else
4068 316744 : path->materialize_inner = false;
4069 :
4070 : /* Charge the right incremental cost for the chosen case */
4071 458468 : if (path->materialize_inner)
4072 5680 : run_cost += mat_inner_cost;
4073 : else
4074 452788 : run_cost += bare_inner_cost;
4075 :
4076 : /* CPU costs */
4077 :
4078 : /*
4079 : * The number of tuple comparisons needed is approximately number of outer
4080 : * rows plus number of inner rows plus number of rescanned tuples (can we
4081 : * refine this?). At each one, we need to evaluate the mergejoin quals.
4082 : */
4083 458468 : startup_cost += merge_qual_cost.startup;
4084 458468 : startup_cost += merge_qual_cost.per_tuple *
4085 458468 : (outer_skip_rows + inner_skip_rows * rescanratio);
4086 458468 : run_cost += merge_qual_cost.per_tuple *
4087 458468 : ((outer_rows - outer_skip_rows) +
4088 458468 : (inner_rows - inner_skip_rows) * rescanratio);
4089 :
4090 : /*
4091 : * For each tuple that gets through the mergejoin proper, we charge
4092 : * cpu_tuple_cost plus the cost of evaluating additional restriction
4093 : * clauses that are to be applied at the join. (This is pessimistic since
4094 : * not all of the quals may get evaluated at each tuple.)
4095 : *
4096 : * Note: we could adjust for SEMI/ANTI joins skipping some qual
4097 : * evaluations here, but it's probably not worth the trouble.
4098 : */
4099 458468 : startup_cost += qp_qual_cost.startup;
4100 458468 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
4101 458468 : run_cost += cpu_per_tuple * mergejointuples;
4102 :
4103 : /* tlist eval costs are paid per output row, not per tuple scanned */
4104 458468 : startup_cost += path->jpath.path.pathtarget->cost.startup;
4105 458468 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
4106 :
4107 458468 : path->jpath.path.startup_cost = startup_cost;
4108 458468 : path->jpath.path.total_cost = startup_cost + run_cost;
4109 458468 : }
4110 :
4111 : /*
4112 : * run mergejoinscansel() with caching
4113 : */
4114 : static MergeScanSelCache *
4115 1460138 : cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
4116 : {
4117 : MergeScanSelCache *cache;
4118 : ListCell *lc;
4119 : Selectivity leftstartsel,
4120 : leftendsel,
4121 : rightstartsel,
4122 : rightendsel;
4123 : MemoryContext oldcontext;
4124 :
4125 : /* Do we have this result already? */
4126 1460216 : foreach(lc, rinfo->scansel_cache)
4127 : {
4128 1319158 : cache = (MergeScanSelCache *) lfirst(lc);
4129 1319158 : if (cache->opfamily == pathkey->pk_opfamily &&
4130 1319158 : cache->collation == pathkey->pk_eclass->ec_collation &&
4131 1319158 : cache->cmptype == pathkey->pk_cmptype &&
4132 1319080 : cache->nulls_first == pathkey->pk_nulls_first)
4133 1319080 : return cache;
4134 : }
4135 :
4136 : /* Nope, do the computation */
4137 141058 : mergejoinscansel(root,
4138 141058 : (Node *) rinfo->clause,
4139 : pathkey->pk_opfamily,
4140 : pathkey->pk_cmptype,
4141 141058 : pathkey->pk_nulls_first,
4142 : &leftstartsel,
4143 : &leftendsel,
4144 : &rightstartsel,
4145 : &rightendsel);
4146 :
4147 : /* Cache the result in suitably long-lived workspace */
4148 141058 : oldcontext = MemoryContextSwitchTo(root->planner_cxt);
4149 :
4150 141058 : cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
4151 141058 : cache->opfamily = pathkey->pk_opfamily;
4152 141058 : cache->collation = pathkey->pk_eclass->ec_collation;
4153 141058 : cache->cmptype = pathkey->pk_cmptype;
4154 141058 : cache->nulls_first = pathkey->pk_nulls_first;
4155 141058 : cache->leftstartsel = leftstartsel;
4156 141058 : cache->leftendsel = leftendsel;
4157 141058 : cache->rightstartsel = rightstartsel;
4158 141058 : cache->rightendsel = rightendsel;
4159 :
4160 141058 : rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
4161 :
4162 141058 : MemoryContextSwitchTo(oldcontext);
4163 :
4164 141058 : return cache;
4165 : }
4166 :
4167 : /*
4168 : * initial_cost_hashjoin
4169 : * Preliminary estimate of the cost of a hashjoin path.
4170 : *
4171 : * This must quickly produce lower-bound estimates of the path's startup and
4172 : * total costs. If we are unable to eliminate the proposed path from
4173 : * consideration using the lower bounds, final_cost_hashjoin will be called
4174 : * to obtain the final estimates.
4175 : *
4176 : * The exact division of labor between this function and final_cost_hashjoin
4177 : * is private to them, and represents a tradeoff between speed of the initial
4178 : * estimate and getting a tight lower bound. We choose to not examine the
4179 : * join quals here (other than by counting the number of hash clauses),
4180 : * so we can't do much with CPU costs. We do assume that
4181 : * ExecChooseHashTableSize is cheap enough to use here.
4182 : *
4183 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
4184 : * other data to be used by final_cost_hashjoin
4185 : * 'jointype' is the type of join to be performed
4186 : * 'hashclauses' is the list of joinclauses to be used as hash clauses
4187 : * 'outer_path' is the outer input to the join
4188 : * 'inner_path' is the inner input to the join
4189 : * 'extra' contains miscellaneous information about the join
4190 : * 'parallel_hash' indicates that inner_path is partial and that a shared
4191 : * hash table will be built in parallel
4192 : */
4193 : void
4194 869020 : initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
4195 : JoinType jointype,
4196 : List *hashclauses,
4197 : Path *outer_path, Path *inner_path,
4198 : JoinPathExtraData *extra,
4199 : bool parallel_hash)
4200 : {
4201 : int disabled_nodes;
4202 869020 : Cost startup_cost = 0;
4203 869020 : Cost run_cost = 0;
4204 869020 : double outer_path_rows = outer_path->rows;
4205 869020 : double inner_path_rows = inner_path->rows;
4206 869020 : double inner_path_rows_total = inner_path_rows;
4207 869020 : int num_hashclauses = list_length(hashclauses);
4208 : int numbuckets;
4209 : int numbatches;
4210 : int num_skew_mcvs;
4211 : size_t space_allowed; /* unused */
4212 :
4213 : /* Count up disabled nodes. */
4214 869020 : disabled_nodes = enable_hashjoin ? 0 : 1;
4215 869020 : disabled_nodes += inner_path->disabled_nodes;
4216 869020 : disabled_nodes += outer_path->disabled_nodes;
4217 :
4218 : /* cost of source data */
4219 869020 : startup_cost += outer_path->startup_cost;
4220 869020 : run_cost += outer_path->total_cost - outer_path->startup_cost;
4221 869020 : startup_cost += inner_path->total_cost;
4222 :
4223 : /*
4224 : * Cost of computing hash function: must do it once per input tuple. We
4225 : * charge one cpu_operator_cost for each column's hash function. Also,
4226 : * tack on one cpu_tuple_cost per inner row, to model the costs of
4227 : * inserting the row into the hashtable.
4228 : *
4229 : * XXX when a hashclause is more complex than a single operator, we really
4230 : * should charge the extra eval costs of the left or right side, as
4231 : * appropriate, here. This seems more work than it's worth at the moment.
4232 : */
4233 869020 : startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
4234 869020 : * inner_path_rows;
4235 869020 : run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
4236 :
4237 : /*
4238 : * If this is a parallel hash build, then the value we have for
4239 : * inner_rows_total currently refers only to the rows returned by each
4240 : * participant. For shared hash table size estimation, we need the total
4241 : * number, so we need to undo the division.
4242 : */
4243 869020 : if (parallel_hash)
4244 75672 : inner_path_rows_total *= get_parallel_divisor(inner_path);
4245 :
4246 : /*
4247 : * Get hash table size that executor would use for inner relation.
4248 : *
4249 : * XXX for the moment, always assume that skew optimization will be
4250 : * performed. As long as SKEW_HASH_MEM_PERCENT is small, it's not worth
4251 : * trying to determine that for sure.
4252 : *
4253 : * XXX at some point it might be interesting to try to account for skew
4254 : * optimization in the cost estimate, but for now, we don't.
4255 : */
4256 869020 : ExecChooseHashTableSize(inner_path_rows_total,
4257 869020 : inner_path->pathtarget->width,
4258 : true, /* useskew */
4259 : parallel_hash, /* try_combined_hash_mem */
4260 : outer_path->parallel_workers,
4261 : &space_allowed,
4262 : &numbuckets,
4263 : &numbatches,
4264 : &num_skew_mcvs);
4265 :
4266 : /*
4267 : * If inner relation is too big then we will need to "batch" the join,
4268 : * which implies writing and reading most of the tuples to disk an extra
4269 : * time. Charge seq_page_cost per page, since the I/O should be nice and
4270 : * sequential. Writing the inner rel counts as startup cost, all the rest
4271 : * as run cost.
4272 : */
4273 869020 : if (numbatches > 1)
4274 : {
4275 4642 : double outerpages = page_size(outer_path_rows,
4276 4642 : outer_path->pathtarget->width);
4277 4642 : double innerpages = page_size(inner_path_rows,
4278 4642 : inner_path->pathtarget->width);
4279 :
4280 4642 : startup_cost += seq_page_cost * innerpages;
4281 4642 : run_cost += seq_page_cost * (innerpages + 2 * outerpages);
4282 : }
4283 :
4284 : /* CPU costs left for later */
4285 :
4286 : /* Public result fields */
4287 869020 : workspace->disabled_nodes = disabled_nodes;
4288 869020 : workspace->startup_cost = startup_cost;
4289 869020 : workspace->total_cost = startup_cost + run_cost;
4290 : /* Save private data for final_cost_hashjoin */
4291 869020 : workspace->run_cost = run_cost;
4292 869020 : workspace->numbuckets = numbuckets;
4293 869020 : workspace->numbatches = numbatches;
4294 869020 : workspace->inner_rows_total = inner_path_rows_total;
4295 869020 : }
4296 :
4297 : /*
4298 : * final_cost_hashjoin
4299 : * Final estimate of the cost and result size of a hashjoin path.
4300 : *
4301 : * Note: the numbatches estimate is also saved into 'path' for use later
4302 : *
4303 : * 'path' is already filled in except for the rows and cost fields and
4304 : * num_batches
4305 : * 'workspace' is the result from initial_cost_hashjoin
4306 : * 'extra' contains miscellaneous information about the join
4307 : */
4308 : void
4309 453254 : final_cost_hashjoin(PlannerInfo *root, HashPath *path,
4310 : JoinCostWorkspace *workspace,
4311 : JoinPathExtraData *extra)
4312 : {
4313 453254 : Path *outer_path = path->jpath.outerjoinpath;
4314 453254 : Path *inner_path = path->jpath.innerjoinpath;
4315 453254 : double outer_path_rows = outer_path->rows;
4316 453254 : double inner_path_rows = inner_path->rows;
4317 453254 : double inner_path_rows_total = workspace->inner_rows_total;
4318 453254 : List *hashclauses = path->path_hashclauses;
4319 453254 : Cost startup_cost = workspace->startup_cost;
4320 453254 : Cost run_cost = workspace->run_cost;
4321 453254 : int numbuckets = workspace->numbuckets;
4322 453254 : int numbatches = workspace->numbatches;
4323 : Cost cpu_per_tuple;
4324 : QualCost hash_qual_cost;
4325 : QualCost qp_qual_cost;
4326 : double hashjointuples;
4327 : double virtualbuckets;
4328 : Selectivity innerbucketsize;
4329 : Selectivity innermcvfreq;
4330 : ListCell *hcl;
4331 :
4332 : /* Set the number of disabled nodes. */
4333 453254 : path->jpath.path.disabled_nodes = workspace->disabled_nodes;
4334 :
4335 : /* Mark the path with the correct row estimate */
4336 453254 : if (path->jpath.path.param_info)
4337 2118 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
4338 : else
4339 451136 : path->jpath.path.rows = path->jpath.path.parent->rows;
4340 :
4341 : /* For partial paths, scale row estimate. */
4342 453254 : if (path->jpath.path.parallel_workers > 0)
4343 : {
4344 107536 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
4345 :
4346 107536 : path->jpath.path.rows =
4347 107536 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
4348 : }
4349 :
4350 : /* mark the path with estimated # of batches */
4351 453254 : path->num_batches = numbatches;
4352 :
4353 : /* store the total number of tuples (sum of partial row estimates) */
4354 453254 : path->inner_rows_total = inner_path_rows_total;
4355 :
4356 : /* and compute the number of "virtual" buckets in the whole join */
4357 453254 : virtualbuckets = (double) numbuckets * (double) numbatches;
4358 :
4359 : /*
4360 : * Determine bucketsize fraction and MCV frequency for the inner relation.
4361 : * We use the smallest bucketsize or MCV frequency estimated for any
4362 : * individual hashclause; this is undoubtedly conservative.
4363 : *
4364 : * BUT: if inner relation has been unique-ified, we can assume it's good
4365 : * for hashing. This is important both because it's the right answer, and
4366 : * because we avoid contaminating the cache with a value that's wrong for
4367 : * non-unique-ified paths.
4368 : */
4369 453254 : if (RELATION_WAS_MADE_UNIQUE(inner_path->parent, extra->sjinfo,
4370 : path->jpath.jointype))
4371 : {
4372 4216 : innerbucketsize = 1.0 / virtualbuckets;
4373 4216 : innermcvfreq = 0.0;
4374 : }
4375 : else
4376 : {
4377 : List *otherclauses;
4378 :
4379 449038 : innerbucketsize = 1.0;
4380 449038 : innermcvfreq = 1.0;
4381 :
4382 : /* At first, try to estimate bucket size using extended statistics. */
4383 449038 : otherclauses = estimate_multivariate_bucketsize(root,
4384 : inner_path->parent,
4385 : hashclauses,
4386 : &innerbucketsize);
4387 :
4388 : /* Pass through the remaining clauses */
4389 935346 : foreach(hcl, otherclauses)
4390 : {
4391 486308 : RestrictInfo *restrictinfo = lfirst_node(RestrictInfo, hcl);
4392 : Selectivity thisbucketsize;
4393 : Selectivity thismcvfreq;
4394 :
4395 : /*
4396 : * First we have to figure out which side of the hashjoin clause
4397 : * is the inner side.
4398 : *
4399 : * Since we tend to visit the same clauses over and over when
4400 : * planning a large query, we cache the bucket stats estimates in
4401 : * the RestrictInfo node to avoid repeated lookups of statistics.
4402 : */
4403 486308 : if (bms_is_subset(restrictinfo->right_relids,
4404 486308 : inner_path->parent->relids))
4405 : {
4406 : /* righthand side is inner */
4407 251658 : thisbucketsize = restrictinfo->right_bucketsize;
4408 251658 : if (thisbucketsize < 0)
4409 : {
4410 : /* not cached yet */
4411 108944 : estimate_hash_bucket_stats(root,
4412 108944 : get_rightop(restrictinfo->clause),
4413 : virtualbuckets,
4414 : &restrictinfo->right_mcvfreq,
4415 : &restrictinfo->right_bucketsize);
4416 108944 : thisbucketsize = restrictinfo->right_bucketsize;
4417 : }
4418 251658 : thismcvfreq = restrictinfo->right_mcvfreq;
4419 : }
4420 : else
4421 : {
4422 : Assert(bms_is_subset(restrictinfo->left_relids,
4423 : inner_path->parent->relids));
4424 : /* lefthand side is inner */
4425 234650 : thisbucketsize = restrictinfo->left_bucketsize;
4426 234650 : if (thisbucketsize < 0)
4427 : {
4428 : /* not cached yet */
4429 95558 : estimate_hash_bucket_stats(root,
4430 95558 : get_leftop(restrictinfo->clause),
4431 : virtualbuckets,
4432 : &restrictinfo->left_mcvfreq,
4433 : &restrictinfo->left_bucketsize);
4434 95558 : thisbucketsize = restrictinfo->left_bucketsize;
4435 : }
4436 234650 : thismcvfreq = restrictinfo->left_mcvfreq;
4437 : }
4438 :
4439 486308 : if (innerbucketsize > thisbucketsize)
4440 367074 : innerbucketsize = thisbucketsize;
4441 486308 : if (innermcvfreq > thismcvfreq)
4442 452638 : innermcvfreq = thismcvfreq;
4443 : }
4444 : }
4445 :
4446 : /*
4447 : * If the bucket holding the inner MCV would exceed hash_mem, we don't
4448 : * want to hash unless there is really no other alternative, so apply
4449 : * disable_cost. (The executor normally copes with excessive memory usage
4450 : * by splitting batches, but obviously it cannot separate equal values
4451 : * that way, so it will be unable to drive the batch size below hash_mem
4452 : * when this is true.)
4453 : */
4454 453254 : if (relation_byte_size(clamp_row_est(inner_path_rows * innermcvfreq),
4455 906508 : inner_path->pathtarget->width) > get_hash_memory_limit())
4456 8 : startup_cost += disable_cost;
4457 :
4458 : /*
4459 : * Compute cost of the hashquals and qpquals (other restriction clauses)
4460 : * separately.
4461 : */
4462 453254 : cost_qual_eval(&hash_qual_cost, hashclauses, root);
4463 453254 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
4464 453254 : qp_qual_cost.startup -= hash_qual_cost.startup;
4465 453254 : qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
4466 :
4467 : /* CPU costs */
4468 :
4469 453254 : if (path->jpath.jointype == JOIN_SEMI ||
4470 447070 : path->jpath.jointype == JOIN_ANTI ||
4471 442532 : extra->inner_unique)
4472 122388 : {
4473 : double outer_matched_rows;
4474 : Selectivity inner_scan_frac;
4475 :
4476 : /*
4477 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
4478 : * executor will stop after the first match.
4479 : *
4480 : * For an outer-rel row that has at least one match, we can expect the
4481 : * bucket scan to stop after a fraction 1/(match_count+1) of the
4482 : * bucket's rows, if the matches are evenly distributed. Since they
4483 : * probably aren't quite evenly distributed, we apply a fuzz factor of
4484 : * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
4485 : * to clamp inner_scan_frac to at most 1.0; but since match_count is
4486 : * at least 1, no such clamp is needed now.)
4487 : */
4488 122388 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
4489 122388 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
4490 :
4491 122388 : startup_cost += hash_qual_cost.startup;
4492 244776 : run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
4493 122388 : clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
4494 :
4495 : /*
4496 : * For unmatched outer-rel rows, the picture is quite a lot different.
4497 : * In the first place, there is no reason to assume that these rows
4498 : * preferentially hit heavily-populated buckets; instead assume they
4499 : * are uncorrelated with the inner distribution and so they see an
4500 : * average bucket size of inner_path_rows / virtualbuckets. In the
4501 : * second place, it seems likely that they will have few if any exact
4502 : * hash-code matches and so very few of the tuples in the bucket will
4503 : * actually require eval of the hash quals. We don't have any good
4504 : * way to estimate how many will, but for the moment assume that the
4505 : * effective cost per bucket entry is one-tenth what it is for
4506 : * matchable tuples.
4507 : */
4508 244776 : run_cost += hash_qual_cost.per_tuple *
4509 244776 : (outer_path_rows - outer_matched_rows) *
4510 122388 : clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
4511 :
4512 : /* Get # of tuples that will pass the basic join */
4513 122388 : if (path->jpath.jointype == JOIN_ANTI)
4514 4538 : hashjointuples = outer_path_rows - outer_matched_rows;
4515 : else
4516 117850 : hashjointuples = outer_matched_rows;
4517 : }
4518 : else
4519 : {
4520 : /*
4521 : * The number of tuple comparisons needed is the number of outer
4522 : * tuples times the typical number of tuples in a hash bucket, which
4523 : * is the inner relation size times its bucketsize fraction. At each
4524 : * one, we need to evaluate the hashjoin quals. But actually,
4525 : * charging the full qual eval cost at each tuple is pessimistic,
4526 : * since we don't evaluate the quals unless the hash values match
4527 : * exactly. For lack of a better idea, halve the cost estimate to
4528 : * allow for that.
4529 : */
4530 330866 : startup_cost += hash_qual_cost.startup;
4531 661732 : run_cost += hash_qual_cost.per_tuple * outer_path_rows *
4532 330866 : clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
4533 :
4534 : /*
4535 : * Get approx # tuples passing the hashquals. We use
4536 : * approx_tuple_count here because we need an estimate done with
4537 : * JOIN_INNER semantics.
4538 : */
4539 330866 : hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
4540 : }
4541 :
4542 : /*
4543 : * For each tuple that gets through the hashjoin proper, we charge
4544 : * cpu_tuple_cost plus the cost of evaluating additional restriction
4545 : * clauses that are to be applied at the join. (This is pessimistic since
4546 : * not all of the quals may get evaluated at each tuple.)
4547 : */
4548 453254 : startup_cost += qp_qual_cost.startup;
4549 453254 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
4550 453254 : run_cost += cpu_per_tuple * hashjointuples;
4551 :
4552 : /* tlist eval costs are paid per output row, not per tuple scanned */
4553 453254 : startup_cost += path->jpath.path.pathtarget->cost.startup;
4554 453254 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
4555 :
4556 453254 : path->jpath.path.startup_cost = startup_cost;
4557 453254 : path->jpath.path.total_cost = startup_cost + run_cost;
4558 453254 : }
4559 :
4560 :
4561 : /*
4562 : * cost_subplan
4563 : * Figure the costs for a SubPlan (or initplan).
4564 : *
4565 : * Note: we could dig the subplan's Plan out of the root list, but in practice
4566 : * all callers have it handy already, so we make them pass it.
4567 : */
4568 : void
4569 43662 : cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
4570 : {
4571 : QualCost sp_cost;
4572 :
4573 : /*
4574 : * Figure any cost for evaluating the testexpr.
4575 : *
4576 : * Usually, SubPlan nodes are built very early, before we have constructed
4577 : * any RelOptInfos for the parent query level, which means the parent root
4578 : * does not yet contain enough information to safely consult statistics.
4579 : * Therefore, we pass root as NULL here. cost_qual_eval() is already
4580 : * well-equipped to handle a NULL root.
4581 : *
4582 : * One exception is SubPlan nodes built for the initplans of MIN/MAX
4583 : * aggregates from indexes (cf. SS_make_initplan_from_plan). In this
4584 : * case, having a NULL root is safe because testexpr will be NULL.
4585 : * Besides, an initplan will by definition not consult anything from the
4586 : * parent plan.
4587 : */
4588 43662 : cost_qual_eval(&sp_cost,
4589 43662 : make_ands_implicit((Expr *) subplan->testexpr),
4590 : NULL);
4591 :
4592 43662 : if (subplan->useHashTable)
4593 : {
4594 : /*
4595 : * If we are using a hash table for the subquery outputs, then the
4596 : * cost of evaluating the query is a one-time cost. We charge one
4597 : * cpu_operator_cost per tuple for the work of loading the hashtable,
4598 : * too.
4599 : */
4600 2108 : sp_cost.startup += plan->total_cost +
4601 2108 : cpu_operator_cost * plan->plan_rows;
4602 :
4603 : /*
4604 : * The per-tuple costs include the cost of evaluating the lefthand
4605 : * expressions, plus the cost of probing the hashtable. We already
4606 : * accounted for the lefthand expressions as part of the testexpr, and
4607 : * will also have counted one cpu_operator_cost for each comparison
4608 : * operator. That is probably too low for the probing cost, but it's
4609 : * hard to make a better estimate, so live with it for now.
4610 : */
4611 : }
4612 : else
4613 : {
4614 : /*
4615 : * Otherwise we will be rescanning the subplan output on each
4616 : * evaluation. We need to estimate how much of the output we will
4617 : * actually need to scan. NOTE: this logic should agree with the
4618 : * tuple_fraction estimates used by make_subplan() in
4619 : * plan/subselect.c.
4620 : */
4621 41554 : Cost plan_run_cost = plan->total_cost - plan->startup_cost;
4622 :
4623 41554 : if (subplan->subLinkType == EXISTS_SUBLINK)
4624 : {
4625 : /* we only need to fetch 1 tuple; clamp to avoid zero divide */
4626 2496 : sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
4627 : }
4628 39058 : else if (subplan->subLinkType == ALL_SUBLINK ||
4629 39040 : subplan->subLinkType == ANY_SUBLINK)
4630 : {
4631 : /* assume we need 50% of the tuples */
4632 146 : sp_cost.per_tuple += 0.50 * plan_run_cost;
4633 : /* also charge a cpu_operator_cost per row examined */
4634 146 : sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
4635 : }
4636 : else
4637 : {
4638 : /* assume we need all tuples */
4639 38912 : sp_cost.per_tuple += plan_run_cost;
4640 : }
4641 :
4642 : /*
4643 : * Also account for subplan's startup cost. If the subplan is
4644 : * uncorrelated or undirect correlated, AND its topmost node is one
4645 : * that materializes its output, assume that we'll only need to pay
4646 : * its startup cost once; otherwise assume we pay the startup cost
4647 : * every time.
4648 : */
4649 54702 : if (subplan->parParam == NIL &&
4650 13148 : ExecMaterializesOutput(nodeTag(plan)))
4651 722 : sp_cost.startup += plan->startup_cost;
4652 : else
4653 40832 : sp_cost.per_tuple += plan->startup_cost;
4654 : }
4655 :
4656 43662 : subplan->startup_cost = sp_cost.startup;
4657 43662 : subplan->per_call_cost = sp_cost.per_tuple;
4658 43662 : }
4659 :
4660 :
4661 : /*
4662 : * cost_rescan
4663 : * Given a finished Path, estimate the costs of rescanning it after
4664 : * having done so the first time. For some Path types a rescan is
4665 : * cheaper than an original scan (if no parameters change), and this
4666 : * function embodies knowledge about that. The default is to return
4667 : * the same costs stored in the Path. (Note that the cost estimates
4668 : * actually stored in Paths are always for first scans.)
4669 : *
4670 : * This function is not currently intended to model effects such as rescans
4671 : * being cheaper due to disk block caching; what we are concerned with is
4672 : * plan types wherein the executor caches results explicitly, or doesn't
4673 : * redo startup calculations, etc.
4674 : */
4675 : static void
4676 3222444 : cost_rescan(PlannerInfo *root, Path *path,
4677 : Cost *rescan_startup_cost, /* output parameters */
4678 : Cost *rescan_total_cost)
4679 : {
4680 3222444 : switch (path->pathtype)
4681 : {
4682 49392 : case T_FunctionScan:
4683 :
4684 : /*
4685 : * Currently, nodeFunctionscan.c always executes the function to
4686 : * completion before returning any rows, and caches the results in
4687 : * a tuplestore. So the function eval cost is all startup cost
4688 : * and isn't paid over again on rescans. However, all run costs
4689 : * will be paid over again.
4690 : */
4691 49392 : *rescan_startup_cost = 0;
4692 49392 : *rescan_total_cost = path->total_cost - path->startup_cost;
4693 49392 : break;
4694 130722 : case T_HashJoin:
4695 :
4696 : /*
4697 : * If it's a single-batch join, we don't need to rebuild the hash
4698 : * table during a rescan.
4699 : */
4700 130722 : if (((HashPath *) path)->num_batches == 1)
4701 : {
4702 : /* Startup cost is exactly the cost of hash table building */
4703 130722 : *rescan_startup_cost = 0;
4704 130722 : *rescan_total_cost = path->total_cost - path->startup_cost;
4705 : }
4706 : else
4707 : {
4708 : /* Otherwise, no special treatment */
4709 0 : *rescan_startup_cost = path->startup_cost;
4710 0 : *rescan_total_cost = path->total_cost;
4711 : }
4712 130722 : break;
4713 7992 : case T_CteScan:
4714 : case T_WorkTableScan:
4715 : {
4716 : /*
4717 : * These plan types materialize their final result in a
4718 : * tuplestore or tuplesort object. So the rescan cost is only
4719 : * cpu_tuple_cost per tuple, unless the result is large enough
4720 : * to spill to disk.
4721 : */
4722 7992 : Cost run_cost = cpu_tuple_cost * path->rows;
4723 7992 : double nbytes = relation_byte_size(path->rows,
4724 7992 : path->pathtarget->width);
4725 7992 : double work_mem_bytes = work_mem * (Size) 1024;
4726 :
4727 7992 : if (nbytes > work_mem_bytes)
4728 : {
4729 : /* It will spill, so account for re-read cost */
4730 352 : double npages = ceil(nbytes / BLCKSZ);
4731 :
4732 352 : run_cost += seq_page_cost * npages;
4733 : }
4734 7992 : *rescan_startup_cost = 0;
4735 7992 : *rescan_total_cost = run_cost;
4736 : }
4737 7992 : break;
4738 1166388 : case T_Material:
4739 : case T_Sort:
4740 : {
4741 : /*
4742 : * These plan types not only materialize their results, but do
4743 : * not implement qual filtering or projection. So they are
4744 : * even cheaper to rescan than the ones above. We charge only
4745 : * cpu_operator_cost per tuple. (Note: keep that in sync with
4746 : * the run_cost charge in cost_sort, and also see comments in
4747 : * cost_material before you change it.)
4748 : */
4749 1166388 : Cost run_cost = cpu_operator_cost * path->rows;
4750 1166388 : double nbytes = relation_byte_size(path->rows,
4751 1166388 : path->pathtarget->width);
4752 1166388 : double work_mem_bytes = work_mem * (Size) 1024;
4753 :
4754 1166388 : if (nbytes > work_mem_bytes)
4755 : {
4756 : /* It will spill, so account for re-read cost */
4757 9914 : double npages = ceil(nbytes / BLCKSZ);
4758 :
4759 9914 : run_cost += seq_page_cost * npages;
4760 : }
4761 1166388 : *rescan_startup_cost = 0;
4762 1166388 : *rescan_total_cost = run_cost;
4763 : }
4764 1166388 : break;
4765 285082 : case T_Memoize:
4766 : /* All the hard work is done by cost_memoize_rescan */
4767 285082 : cost_memoize_rescan(root, (MemoizePath *) path,
4768 : rescan_startup_cost, rescan_total_cost);
4769 285082 : break;
4770 1582868 : default:
4771 1582868 : *rescan_startup_cost = path->startup_cost;
4772 1582868 : *rescan_total_cost = path->total_cost;
4773 1582868 : break;
4774 : }
4775 3222444 : }
4776 :
4777 :
4778 : /*
4779 : * cost_qual_eval
4780 : * Estimate the CPU costs of evaluating a WHERE clause.
4781 : * The input can be either an implicitly-ANDed list of boolean
4782 : * expressions, or a list of RestrictInfo nodes. (The latter is
4783 : * preferred since it allows caching of the results.)
4784 : * The result includes both a one-time (startup) component,
4785 : * and a per-evaluation component.
4786 : *
4787 : * Note: in some code paths root can be passed as NULL, resulting in
4788 : * slightly worse estimates.
4789 : */
4790 : void
4791 4617088 : cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
4792 : {
4793 : cost_qual_eval_context context;
4794 : ListCell *l;
4795 :
4796 4617088 : context.root = root;
4797 4617088 : context.total.startup = 0;
4798 4617088 : context.total.per_tuple = 0;
4799 :
4800 : /* We don't charge any cost for the implicit ANDing at top level ... */
4801 :
4802 8814400 : foreach(l, quals)
4803 : {
4804 4197312 : Node *qual = (Node *) lfirst(l);
4805 :
4806 4197312 : cost_qual_eval_walker(qual, &context);
4807 : }
4808 :
4809 4617088 : *cost = context.total;
4810 4617088 : }
4811 :
4812 : /*
4813 : * cost_qual_eval_node
4814 : * As above, for a single RestrictInfo or expression.
4815 : */
4816 : void
4817 1821710 : cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
4818 : {
4819 : cost_qual_eval_context context;
4820 :
4821 1821710 : context.root = root;
4822 1821710 : context.total.startup = 0;
4823 1821710 : context.total.per_tuple = 0;
4824 :
4825 1821710 : cost_qual_eval_walker(qual, &context);
4826 :
4827 1821710 : *cost = context.total;
4828 1821710 : }
4829 :
4830 : static bool
4831 9465704 : cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
4832 : {
4833 9465704 : if (node == NULL)
4834 87052 : return false;
4835 :
4836 : /*
4837 : * RestrictInfo nodes contain an eval_cost field reserved for this
4838 : * routine's use, so that it's not necessary to evaluate the qual clause's
4839 : * cost more than once. If the clause's cost hasn't been computed yet,
4840 : * the field's startup value will contain -1.
4841 : */
4842 9378652 : if (IsA(node, RestrictInfo))
4843 : {
4844 4385084 : RestrictInfo *rinfo = (RestrictInfo *) node;
4845 :
4846 4385084 : if (rinfo->eval_cost.startup < 0)
4847 : {
4848 : cost_qual_eval_context locContext;
4849 :
4850 596642 : locContext.root = context->root;
4851 596642 : locContext.total.startup = 0;
4852 596642 : locContext.total.per_tuple = 0;
4853 :
4854 : /*
4855 : * For an OR clause, recurse into the marked-up tree so that we
4856 : * set the eval_cost for contained RestrictInfos too.
4857 : */
4858 596642 : if (rinfo->orclause)
4859 9564 : cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
4860 : else
4861 587078 : cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
4862 :
4863 : /*
4864 : * If the RestrictInfo is marked pseudoconstant, it will be tested
4865 : * only once, so treat its cost as all startup cost.
4866 : */
4867 596642 : if (rinfo->pseudoconstant)
4868 : {
4869 : /* count one execution during startup */
4870 10100 : locContext.total.startup += locContext.total.per_tuple;
4871 10100 : locContext.total.per_tuple = 0;
4872 : }
4873 596642 : rinfo->eval_cost = locContext.total;
4874 : }
4875 4385084 : context->total.startup += rinfo->eval_cost.startup;
4876 4385084 : context->total.per_tuple += rinfo->eval_cost.per_tuple;
4877 : /* do NOT recurse into children */
4878 4385084 : return false;
4879 : }
4880 :
4881 : /*
4882 : * For each operator or function node in the given tree, we charge the
4883 : * estimated execution cost given by pg_proc.procost (remember to multiply
4884 : * this by cpu_operator_cost).
4885 : *
4886 : * Vars and Consts are charged zero, and so are boolean operators (AND,
4887 : * OR, NOT). Simplistic, but a lot better than no model at all.
4888 : *
4889 : * Should we try to account for the possibility of short-circuit
4890 : * evaluation of AND/OR? Probably *not*, because that would make the
4891 : * results depend on the clause ordering, and we are not in any position
4892 : * to expect that the current ordering of the clauses is the one that's
4893 : * going to end up being used. The above per-RestrictInfo caching would
4894 : * not mix well with trying to re-order clauses anyway.
4895 : *
4896 : * Another issue that is entirely ignored here is that if a set-returning
4897 : * function is below top level in the tree, the functions/operators above
4898 : * it will need to be evaluated multiple times. In practical use, such
4899 : * cases arise so seldom as to not be worth the added complexity needed;
4900 : * moreover, since our rowcount estimates for functions tend to be pretty
4901 : * phony, the results would also be pretty phony.
4902 : */
4903 4993568 : if (IsA(node, FuncExpr))
4904 : {
4905 340904 : add_function_cost(context->root, ((FuncExpr *) node)->funcid, node,
4906 : &context->total);
4907 : }
4908 4652664 : else if (IsA(node, OpExpr) ||
4909 3999656 : IsA(node, DistinctExpr) ||
4910 3998586 : IsA(node, NullIfExpr))
4911 : {
4912 : /* rely on struct equivalence to treat these all alike */
4913 654202 : set_opfuncid((OpExpr *) node);
4914 654202 : add_function_cost(context->root, ((OpExpr *) node)->opfuncid, node,
4915 : &context->total);
4916 : }
4917 3998462 : else if (IsA(node, ScalarArrayOpExpr))
4918 : {
4919 44088 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
4920 44088 : Node *arraynode = (Node *) lsecond(saop->args);
4921 : QualCost sacosts;
4922 : QualCost hcosts;
4923 44088 : double estarraylen = estimate_array_length(context->root, arraynode);
4924 :
4925 44088 : set_sa_opfuncid(saop);
4926 44088 : sacosts.startup = sacosts.per_tuple = 0;
4927 44088 : add_function_cost(context->root, saop->opfuncid, NULL,
4928 : &sacosts);
4929 :
4930 44088 : if (OidIsValid(saop->hashfuncid))
4931 : {
4932 : /* Handle costs for hashed ScalarArrayOpExpr */
4933 440 : hcosts.startup = hcosts.per_tuple = 0;
4934 :
4935 440 : add_function_cost(context->root, saop->hashfuncid, NULL, &hcosts);
4936 440 : context->total.startup += sacosts.startup + hcosts.startup;
4937 :
4938 : /* Estimate the cost of building the hashtable. */
4939 440 : context->total.startup += estarraylen * hcosts.per_tuple;
4940 :
4941 : /*
4942 : * XXX should we charge a little bit for sacosts.per_tuple when
4943 : * building the table, or is it ok to assume there will be zero
4944 : * hash collision?
4945 : */
4946 :
4947 : /*
4948 : * Charge for hashtable lookups. Charge a single hash and a
4949 : * single comparison.
4950 : */
4951 440 : context->total.per_tuple += hcosts.per_tuple + sacosts.per_tuple;
4952 : }
4953 : else
4954 : {
4955 : /*
4956 : * Estimate that the operator will be applied to about half of the
4957 : * array elements before the answer is determined.
4958 : */
4959 43648 : context->total.startup += sacosts.startup;
4960 87296 : context->total.per_tuple += sacosts.per_tuple *
4961 43648 : estimate_array_length(context->root, arraynode) * 0.5;
4962 : }
4963 : }
4964 3954374 : else if (IsA(node, Aggref) ||
4965 3889406 : IsA(node, WindowFunc))
4966 : {
4967 : /*
4968 : * Aggref and WindowFunc nodes are (and should be) treated like Vars,
4969 : * ie, zero execution cost in the current model, because they behave
4970 : * essentially like Vars at execution. We disregard the costs of
4971 : * their input expressions for the same reason. The actual execution
4972 : * costs of the aggregate/window functions and their arguments have to
4973 : * be factored into plan-node-specific costing of the Agg or WindowAgg
4974 : * plan node.
4975 : */
4976 68834 : return false; /* don't recurse into children */
4977 : }
4978 3885540 : else if (IsA(node, GroupingFunc))
4979 : {
4980 : /* Treat this as having cost 1 */
4981 422 : context->total.per_tuple += cpu_operator_cost;
4982 422 : return false; /* don't recurse into children */
4983 : }
4984 3885118 : else if (IsA(node, CoerceViaIO))
4985 : {
4986 22128 : CoerceViaIO *iocoerce = (CoerceViaIO *) node;
4987 : Oid iofunc;
4988 : Oid typioparam;
4989 : bool typisvarlena;
4990 :
4991 : /* check the result type's input function */
4992 22128 : getTypeInputInfo(iocoerce->resulttype,
4993 : &iofunc, &typioparam);
4994 22128 : add_function_cost(context->root, iofunc, NULL,
4995 : &context->total);
4996 : /* check the input type's output function */
4997 22128 : getTypeOutputInfo(exprType((Node *) iocoerce->arg),
4998 : &iofunc, &typisvarlena);
4999 22128 : add_function_cost(context->root, iofunc, NULL,
5000 : &context->total);
5001 : }
5002 3862990 : else if (IsA(node, ArrayCoerceExpr))
5003 : {
5004 5098 : ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
5005 : QualCost perelemcost;
5006 :
5007 5098 : cost_qual_eval_node(&perelemcost, (Node *) acoerce->elemexpr,
5008 : context->root);
5009 5098 : context->total.startup += perelemcost.startup;
5010 5098 : if (perelemcost.per_tuple > 0)
5011 66 : context->total.per_tuple += perelemcost.per_tuple *
5012 66 : estimate_array_length(context->root, (Node *) acoerce->arg);
5013 : }
5014 3857892 : else if (IsA(node, RowCompareExpr))
5015 : {
5016 : /* Conservatively assume we will check all the columns */
5017 252 : RowCompareExpr *rcexpr = (RowCompareExpr *) node;
5018 : ListCell *lc;
5019 :
5020 810 : foreach(lc, rcexpr->opnos)
5021 : {
5022 558 : Oid opid = lfirst_oid(lc);
5023 :
5024 558 : add_function_cost(context->root, get_opcode(opid), NULL,
5025 : &context->total);
5026 : }
5027 : }
5028 3857640 : else if (IsA(node, MinMaxExpr) ||
5029 3857368 : IsA(node, SQLValueFunction) ||
5030 3852614 : IsA(node, XmlExpr) ||
5031 3851912 : IsA(node, CoerceToDomain) ||
5032 3842194 : IsA(node, NextValueExpr) ||
5033 3841832 : IsA(node, JsonExpr))
5034 : {
5035 : /* Treat all these as having cost 1 */
5036 18380 : context->total.per_tuple += cpu_operator_cost;
5037 : }
5038 3839260 : else if (IsA(node, SubLink))
5039 : {
5040 : /* This routine should not be applied to un-planned expressions */
5041 0 : elog(ERROR, "cannot handle unplanned sub-select");
5042 : }
5043 3839260 : else if (IsA(node, SubPlan))
5044 : {
5045 : /*
5046 : * A subplan node in an expression typically indicates that the
5047 : * subplan will be executed on each evaluation, so charge accordingly.
5048 : * (Sub-selects that can be executed as InitPlans have already been
5049 : * removed from the expression.)
5050 : */
5051 43152 : SubPlan *subplan = (SubPlan *) node;
5052 :
5053 43152 : context->total.startup += subplan->startup_cost;
5054 43152 : context->total.per_tuple += subplan->per_call_cost;
5055 :
5056 : /*
5057 : * We don't want to recurse into the testexpr, because it was already
5058 : * counted in the SubPlan node's costs. So we're done.
5059 : */
5060 43152 : return false;
5061 : }
5062 3796108 : else if (IsA(node, AlternativeSubPlan))
5063 : {
5064 : /*
5065 : * Arbitrarily use the first alternative plan for costing. (We should
5066 : * certainly only include one alternative, and we don't yet have
5067 : * enough information to know which one the executor is most likely to
5068 : * use.)
5069 : */
5070 1834 : AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
5071 :
5072 1834 : return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
5073 : context);
5074 : }
5075 3794274 : else if (IsA(node, PlaceHolderVar))
5076 : {
5077 : /*
5078 : * A PlaceHolderVar should be given cost zero when considering general
5079 : * expression evaluation costs. The expense of doing the contained
5080 : * expression is charged as part of the tlist eval costs of the scan
5081 : * or join where the PHV is first computed (see set_rel_width and
5082 : * add_placeholders_to_joinrel). If we charged it again here, we'd be
5083 : * double-counting the cost for each level of plan that the PHV
5084 : * bubbles up through. Hence, return without recursing into the
5085 : * phexpr.
5086 : */
5087 5172 : return false;
5088 : }
5089 :
5090 : /* recurse into children */
5091 4874154 : return expression_tree_walker(node, cost_qual_eval_walker, context);
5092 : }
5093 :
5094 : /*
5095 : * get_restriction_qual_cost
5096 : * Compute evaluation costs of a baserel's restriction quals, plus any
5097 : * movable join quals that have been pushed down to the scan.
5098 : * Results are returned into *qpqual_cost.
5099 : *
5100 : * This is a convenience subroutine that works for seqscans and other cases
5101 : * where all the given quals will be evaluated the hard way. It's not useful
5102 : * for cost_index(), for example, where the index machinery takes care of
5103 : * some of the quals. We assume baserestrictcost was previously set by
5104 : * set_baserel_size_estimates().
5105 : */
5106 : static void
5107 1077968 : get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
5108 : ParamPathInfo *param_info,
5109 : QualCost *qpqual_cost)
5110 : {
5111 1077968 : if (param_info)
5112 : {
5113 : /* Include costs of pushed-down clauses */
5114 242064 : cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
5115 :
5116 242064 : qpqual_cost->startup += baserel->baserestrictcost.startup;
5117 242064 : qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
5118 : }
5119 : else
5120 835904 : *qpqual_cost = baserel->baserestrictcost;
5121 1077968 : }
5122 :
5123 :
5124 : /*
5125 : * compute_semi_anti_join_factors
5126 : * Estimate how much of the inner input a SEMI, ANTI, or inner_unique join
5127 : * can be expected to scan.
5128 : *
5129 : * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
5130 : * inner rows as soon as it finds a match to the current outer row.
5131 : * The same happens if we have detected the inner rel is unique.
5132 : * We should therefore adjust some of the cost components for this effect.
5133 : * This function computes some estimates needed for these adjustments.
5134 : * These estimates will be the same regardless of the particular paths used
5135 : * for the outer and inner relation, so we compute these once and then pass
5136 : * them to all the join cost estimation functions.
5137 : *
5138 : * Input parameters:
5139 : * joinrel: join relation under consideration
5140 : * outerrel: outer relation under consideration
5141 : * innerrel: inner relation under consideration
5142 : * jointype: if not JOIN_SEMI or JOIN_ANTI, we assume it's inner_unique
5143 : * sjinfo: SpecialJoinInfo relevant to this join
5144 : * restrictlist: join quals
5145 : * Output parameters:
5146 : * *semifactors is filled in (see pathnodes.h for field definitions)
5147 : */
5148 : void
5149 211468 : compute_semi_anti_join_factors(PlannerInfo *root,
5150 : RelOptInfo *joinrel,
5151 : RelOptInfo *outerrel,
5152 : RelOptInfo *innerrel,
5153 : JoinType jointype,
5154 : SpecialJoinInfo *sjinfo,
5155 : List *restrictlist,
5156 : SemiAntiJoinFactors *semifactors)
5157 : {
5158 : Selectivity jselec;
5159 : Selectivity nselec;
5160 : Selectivity avgmatch;
5161 : SpecialJoinInfo norm_sjinfo;
5162 : List *joinquals;
5163 : ListCell *l;
5164 :
5165 : /*
5166 : * In an ANTI join, we must ignore clauses that are "pushed down", since
5167 : * those won't affect the match logic. In a SEMI join, we do not
5168 : * distinguish joinquals from "pushed down" quals, so just use the whole
5169 : * restrictinfo list. For other outer join types, we should consider only
5170 : * non-pushed-down quals, so that this devolves to an IS_OUTER_JOIN check.
5171 : */
5172 211468 : if (IS_OUTER_JOIN(jointype))
5173 : {
5174 74782 : joinquals = NIL;
5175 163780 : foreach(l, restrictlist)
5176 : {
5177 88998 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
5178 :
5179 88998 : if (!RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5180 84122 : joinquals = lappend(joinquals, rinfo);
5181 : }
5182 : }
5183 : else
5184 136686 : joinquals = restrictlist;
5185 :
5186 : /*
5187 : * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
5188 : */
5189 211468 : jselec = clauselist_selectivity(root,
5190 : joinquals,
5191 : 0,
5192 : (jointype == JOIN_ANTI) ? JOIN_ANTI : JOIN_SEMI,
5193 : sjinfo);
5194 :
5195 : /*
5196 : * Also get the normal inner-join selectivity of the join clauses.
5197 : */
5198 211468 : init_dummy_sjinfo(&norm_sjinfo, outerrel->relids, innerrel->relids);
5199 :
5200 211468 : nselec = clauselist_selectivity(root,
5201 : joinquals,
5202 : 0,
5203 : JOIN_INNER,
5204 : &norm_sjinfo);
5205 :
5206 : /* Avoid leaking a lot of ListCells */
5207 211468 : if (IS_OUTER_JOIN(jointype))
5208 74782 : list_free(joinquals);
5209 :
5210 : /*
5211 : * jselec can be interpreted as the fraction of outer-rel rows that have
5212 : * any matches (this is true for both SEMI and ANTI cases). And nselec is
5213 : * the fraction of the Cartesian product that matches. So, the average
5214 : * number of matches for each outer-rel row that has at least one match is
5215 : * nselec * inner_rows / jselec.
5216 : *
5217 : * Note: it is correct to use the inner rel's "rows" count here, even
5218 : * though we might later be considering a parameterized inner path with
5219 : * fewer rows. This is because we have included all the join clauses in
5220 : * the selectivity estimate.
5221 : */
5222 211468 : if (jselec > 0) /* protect against zero divide */
5223 : {
5224 211040 : avgmatch = nselec * innerrel->rows / jselec;
5225 : /* Clamp to sane range */
5226 211040 : avgmatch = Max(1.0, avgmatch);
5227 : }
5228 : else
5229 428 : avgmatch = 1.0;
5230 :
5231 211468 : semifactors->outer_match_frac = jselec;
5232 211468 : semifactors->match_count = avgmatch;
5233 211468 : }
5234 :
5235 : /*
5236 : * has_indexed_join_quals
5237 : * Check whether all the joinquals of a nestloop join are used as
5238 : * inner index quals.
5239 : *
5240 : * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
5241 : * indexscan) that uses all the joinquals as indexquals, we can assume that an
5242 : * unmatched outer tuple is cheap to process, whereas otherwise it's probably
5243 : * expensive.
5244 : */
5245 : static bool
5246 905000 : has_indexed_join_quals(NestPath *path)
5247 : {
5248 905000 : JoinPath *joinpath = &path->jpath;
5249 905000 : Relids joinrelids = joinpath->path.parent->relids;
5250 905000 : Path *innerpath = joinpath->innerjoinpath;
5251 : List *indexclauses;
5252 : bool found_one;
5253 : ListCell *lc;
5254 :
5255 : /* If join still has quals to evaluate, it's not fast */
5256 905000 : if (joinpath->joinrestrictinfo != NIL)
5257 642534 : return false;
5258 : /* Nor if the inner path isn't parameterized at all */
5259 262466 : if (innerpath->param_info == NULL)
5260 3300 : return false;
5261 :
5262 : /* Find the indexclauses list for the inner scan */
5263 259166 : switch (innerpath->pathtype)
5264 : {
5265 158358 : case T_IndexScan:
5266 : case T_IndexOnlyScan:
5267 158358 : indexclauses = ((IndexPath *) innerpath)->indexclauses;
5268 158358 : break;
5269 270 : case T_BitmapHeapScan:
5270 : {
5271 : /* Accept only a simple bitmap scan, not AND/OR cases */
5272 270 : Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
5273 :
5274 270 : if (IsA(bmqual, IndexPath))
5275 222 : indexclauses = ((IndexPath *) bmqual)->indexclauses;
5276 : else
5277 48 : return false;
5278 222 : break;
5279 : }
5280 100538 : default:
5281 :
5282 : /*
5283 : * If it's not a simple indexscan, it probably doesn't run quickly
5284 : * for zero rows out, even if it's a parameterized path using all
5285 : * the joinquals.
5286 : */
5287 100538 : return false;
5288 : }
5289 :
5290 : /*
5291 : * Examine the inner path's param clauses. Any that are from the outer
5292 : * path must be found in the indexclauses list, either exactly or in an
5293 : * equivalent form generated by equivclass.c. Also, we must find at least
5294 : * one such clause, else it's a clauseless join which isn't fast.
5295 : */
5296 158580 : found_one = false;
5297 313064 : foreach(lc, innerpath->param_info->ppi_clauses)
5298 : {
5299 163162 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
5300 :
5301 163162 : if (join_clause_is_movable_into(rinfo,
5302 163162 : innerpath->parent->relids,
5303 : joinrelids))
5304 : {
5305 162610 : if (!is_redundant_with_indexclauses(rinfo, indexclauses))
5306 8678 : return false;
5307 153932 : found_one = true;
5308 : }
5309 : }
5310 149902 : return found_one;
5311 : }
5312 :
5313 :
5314 : /*
5315 : * approx_tuple_count
5316 : * Quick-and-dirty estimation of the number of join rows passing
5317 : * a set of qual conditions.
5318 : *
5319 : * The quals can be either an implicitly-ANDed list of boolean expressions,
5320 : * or a list of RestrictInfo nodes (typically the latter).
5321 : *
5322 : * We intentionally compute the selectivity under JOIN_INNER rules, even
5323 : * if it's some type of outer join. This is appropriate because we are
5324 : * trying to figure out how many tuples pass the initial merge or hash
5325 : * join step.
5326 : *
5327 : * This is quick-and-dirty because we bypass clauselist_selectivity, and
5328 : * simply multiply the independent clause selectivities together. Now
5329 : * clauselist_selectivity often can't do any better than that anyhow, but
5330 : * for some situations (such as range constraints) it is smarter. However,
5331 : * we can't effectively cache the results of clauselist_selectivity, whereas
5332 : * the individual clause selectivities can be and are cached.
5333 : *
5334 : * Since we are only using the results to estimate how many potential
5335 : * output tuples are generated and passed through qpqual checking, it
5336 : * seems OK to live with the approximation.
5337 : */
5338 : static double
5339 789334 : approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
5340 : {
5341 : double tuples;
5342 789334 : double outer_tuples = path->outerjoinpath->rows;
5343 789334 : double inner_tuples = path->innerjoinpath->rows;
5344 : SpecialJoinInfo sjinfo;
5345 789334 : Selectivity selec = 1.0;
5346 : ListCell *l;
5347 :
5348 : /*
5349 : * Make up a SpecialJoinInfo for JOIN_INNER semantics.
5350 : */
5351 789334 : init_dummy_sjinfo(&sjinfo, path->outerjoinpath->parent->relids,
5352 789334 : path->innerjoinpath->parent->relids);
5353 :
5354 : /* Get the approximate selectivity */
5355 1669696 : foreach(l, quals)
5356 : {
5357 880362 : Node *qual = (Node *) lfirst(l);
5358 :
5359 : /* Note that clause_selectivity will be able to cache its result */
5360 880362 : selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
5361 : }
5362 :
5363 : /* Apply it to the input relation sizes */
5364 789334 : tuples = selec * outer_tuples * inner_tuples;
5365 :
5366 789334 : return clamp_row_est(tuples);
5367 : }
5368 :
5369 :
5370 : /*
5371 : * set_baserel_size_estimates
5372 : * Set the size estimates for the given base relation.
5373 : *
5374 : * The rel's targetlist and restrictinfo list must have been constructed
5375 : * already, and rel->tuples must be set.
5376 : *
5377 : * We set the following fields of the rel node:
5378 : * rows: the estimated number of output tuples (after applying
5379 : * restriction clauses).
5380 : * width: the estimated average output tuple width in bytes.
5381 : * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
5382 : */
5383 : void
5384 509646 : set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5385 : {
5386 : double nrows;
5387 :
5388 : /* Should only be applied to base relations */
5389 : Assert(rel->relid > 0);
5390 :
5391 1019262 : nrows = rel->tuples *
5392 509646 : clauselist_selectivity(root,
5393 : rel->baserestrictinfo,
5394 : 0,
5395 : JOIN_INNER,
5396 : NULL);
5397 :
5398 509616 : rel->rows = clamp_row_est(nrows);
5399 :
5400 509616 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
5401 :
5402 509616 : set_rel_width(root, rel);
5403 509616 : }
5404 :
5405 : /*
5406 : * get_parameterized_baserel_size
5407 : * Make a size estimate for a parameterized scan of a base relation.
5408 : *
5409 : * 'param_clauses' lists the additional join clauses to be used.
5410 : *
5411 : * set_baserel_size_estimates must have been applied already.
5412 : */
5413 : double
5414 158568 : get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel,
5415 : List *param_clauses)
5416 : {
5417 : List *allclauses;
5418 : double nrows;
5419 :
5420 : /*
5421 : * Estimate the number of rows returned by the parameterized scan, knowing
5422 : * that it will apply all the extra join clauses as well as the rel's own
5423 : * restriction clauses. Note that we force the clauses to be treated as
5424 : * non-join clauses during selectivity estimation.
5425 : */
5426 158568 : allclauses = list_concat_copy(param_clauses, rel->baserestrictinfo);
5427 317136 : nrows = rel->tuples *
5428 158568 : clauselist_selectivity(root,
5429 : allclauses,
5430 158568 : rel->relid, /* do not use 0! */
5431 : JOIN_INNER,
5432 : NULL);
5433 158568 : nrows = clamp_row_est(nrows);
5434 : /* For safety, make sure result is not more than the base estimate */
5435 158568 : if (nrows > rel->rows)
5436 0 : nrows = rel->rows;
5437 158568 : return nrows;
5438 : }
5439 :
5440 : /*
5441 : * set_joinrel_size_estimates
5442 : * Set the size estimates for the given join relation.
5443 : *
5444 : * The rel's targetlist must have been constructed already, and a
5445 : * restriction clause list that matches the given component rels must
5446 : * be provided.
5447 : *
5448 : * Since there is more than one way to make a joinrel for more than two
5449 : * base relations, the results we get here could depend on which component
5450 : * rel pair is provided. In theory we should get the same answers no matter
5451 : * which pair is provided; in practice, since the selectivity estimation
5452 : * routines don't handle all cases equally well, we might not. But there's
5453 : * not much to be done about it. (Would it make sense to repeat the
5454 : * calculations for each pair of input rels that's encountered, and somehow
5455 : * average the results? Probably way more trouble than it's worth, and
5456 : * anyway we must keep the rowcount estimate the same for all paths for the
5457 : * joinrel.)
5458 : *
5459 : * We set only the rows field here. The reltarget field was already set by
5460 : * build_joinrel_tlist, and baserestrictcost is not used for join rels.
5461 : */
5462 : void
5463 251870 : set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
5464 : RelOptInfo *outer_rel,
5465 : RelOptInfo *inner_rel,
5466 : SpecialJoinInfo *sjinfo,
5467 : List *restrictlist)
5468 : {
5469 251870 : rel->rows = calc_joinrel_size_estimate(root,
5470 : rel,
5471 : outer_rel,
5472 : inner_rel,
5473 : outer_rel->rows,
5474 : inner_rel->rows,
5475 : sjinfo,
5476 : restrictlist);
5477 251870 : }
5478 :
5479 : /*
5480 : * get_parameterized_joinrel_size
5481 : * Make a size estimate for a parameterized scan of a join relation.
5482 : *
5483 : * 'rel' is the joinrel under consideration.
5484 : * 'outer_path', 'inner_path' are (probably also parameterized) Paths that
5485 : * produce the relations being joined.
5486 : * 'sjinfo' is any SpecialJoinInfo relevant to this join.
5487 : * 'restrict_clauses' lists the join clauses that need to be applied at the
5488 : * join node (including any movable clauses that were moved down to this join,
5489 : * and not including any movable clauses that were pushed down into the
5490 : * child paths).
5491 : *
5492 : * set_joinrel_size_estimates must have been applied already.
5493 : */
5494 : double
5495 8664 : get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel,
5496 : Path *outer_path,
5497 : Path *inner_path,
5498 : SpecialJoinInfo *sjinfo,
5499 : List *restrict_clauses)
5500 : {
5501 : double nrows;
5502 :
5503 : /*
5504 : * Estimate the number of rows returned by the parameterized join as the
5505 : * sizes of the input paths times the selectivity of the clauses that have
5506 : * ended up at this join node.
5507 : *
5508 : * As with set_joinrel_size_estimates, the rowcount estimate could depend
5509 : * on the pair of input paths provided, though ideally we'd get the same
5510 : * estimate for any pair with the same parameterization.
5511 : */
5512 8664 : nrows = calc_joinrel_size_estimate(root,
5513 : rel,
5514 : outer_path->parent,
5515 : inner_path->parent,
5516 : outer_path->rows,
5517 : inner_path->rows,
5518 : sjinfo,
5519 : restrict_clauses);
5520 : /* For safety, make sure result is not more than the base estimate */
5521 8664 : if (nrows > rel->rows)
5522 12 : nrows = rel->rows;
5523 8664 : return nrows;
5524 : }
5525 :
5526 : /*
5527 : * calc_joinrel_size_estimate
5528 : * Workhorse for set_joinrel_size_estimates and
5529 : * get_parameterized_joinrel_size.
5530 : *
5531 : * outer_rel/inner_rel are the relations being joined, but they should be
5532 : * assumed to have sizes outer_rows/inner_rows; those numbers might be less
5533 : * than what rel->rows says, when we are considering parameterized paths.
5534 : */
5535 : static double
5536 260534 : calc_joinrel_size_estimate(PlannerInfo *root,
5537 : RelOptInfo *joinrel,
5538 : RelOptInfo *outer_rel,
5539 : RelOptInfo *inner_rel,
5540 : double outer_rows,
5541 : double inner_rows,
5542 : SpecialJoinInfo *sjinfo,
5543 : List *restrictlist)
5544 : {
5545 260534 : JoinType jointype = sjinfo->jointype;
5546 : Selectivity fkselec;
5547 : Selectivity jselec;
5548 : Selectivity pselec;
5549 : double nrows;
5550 :
5551 : /*
5552 : * Compute joinclause selectivity. Note that we are only considering
5553 : * clauses that become restriction clauses at this join level; we are not
5554 : * double-counting them because they were not considered in estimating the
5555 : * sizes of the component rels.
5556 : *
5557 : * First, see whether any of the joinclauses can be matched to known FK
5558 : * constraints. If so, drop those clauses from the restrictlist, and
5559 : * instead estimate their selectivity using FK semantics. (We do this
5560 : * without regard to whether said clauses are local or "pushed down".
5561 : * Probably, an FK-matching clause could never be seen as pushed down at
5562 : * an outer join, since it would be strict and hence would be grounds for
5563 : * join strength reduction.) fkselec gets the net selectivity for
5564 : * FK-matching clauses, or 1.0 if there are none.
5565 : */
5566 260534 : fkselec = get_foreign_key_join_selectivity(root,
5567 : outer_rel->relids,
5568 : inner_rel->relids,
5569 : sjinfo,
5570 : &restrictlist);
5571 :
5572 : /*
5573 : * For an outer join, we have to distinguish the selectivity of the join's
5574 : * own clauses (JOIN/ON conditions) from any clauses that were "pushed
5575 : * down". For inner joins we just count them all as joinclauses.
5576 : */
5577 260534 : if (IS_OUTER_JOIN(jointype))
5578 : {
5579 80220 : List *joinquals = NIL;
5580 80220 : List *pushedquals = NIL;
5581 : ListCell *l;
5582 :
5583 : /* Grovel through the clauses to separate into two lists */
5584 180732 : foreach(l, restrictlist)
5585 : {
5586 100512 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
5587 :
5588 100512 : if (RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5589 4296 : pushedquals = lappend(pushedquals, rinfo);
5590 : else
5591 96216 : joinquals = lappend(joinquals, rinfo);
5592 : }
5593 :
5594 : /* Get the separate selectivities */
5595 80220 : jselec = clauselist_selectivity(root,
5596 : joinquals,
5597 : 0,
5598 : jointype,
5599 : sjinfo);
5600 80220 : pselec = clauselist_selectivity(root,
5601 : pushedquals,
5602 : 0,
5603 : jointype,
5604 : sjinfo);
5605 :
5606 : /* Avoid leaking a lot of ListCells */
5607 80220 : list_free(joinquals);
5608 80220 : list_free(pushedquals);
5609 : }
5610 : else
5611 : {
5612 180314 : jselec = clauselist_selectivity(root,
5613 : restrictlist,
5614 : 0,
5615 : jointype,
5616 : sjinfo);
5617 180314 : pselec = 0.0; /* not used, keep compiler quiet */
5618 : }
5619 :
5620 : /*
5621 : * Basically, we multiply size of Cartesian product by selectivity.
5622 : *
5623 : * If we are doing an outer join, take that into account: the joinqual
5624 : * selectivity has to be clamped using the knowledge that the output must
5625 : * be at least as large as the non-nullable input. However, any
5626 : * pushed-down quals are applied after the outer join, so their
5627 : * selectivity applies fully.
5628 : *
5629 : * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
5630 : * of LHS rows that have matches, and we apply that straightforwardly.
5631 : */
5632 260534 : switch (jointype)
5633 : {
5634 172216 : case JOIN_INNER:
5635 172216 : nrows = outer_rows * inner_rows * fkselec * jselec;
5636 : /* pselec not used */
5637 172216 : break;
5638 73514 : case JOIN_LEFT:
5639 73514 : nrows = outer_rows * inner_rows * fkselec * jselec;
5640 73514 : if (nrows < outer_rows)
5641 28578 : nrows = outer_rows;
5642 73514 : nrows *= pselec;
5643 73514 : break;
5644 1714 : case JOIN_FULL:
5645 1714 : nrows = outer_rows * inner_rows * fkselec * jselec;
5646 1714 : if (nrows < outer_rows)
5647 1136 : nrows = outer_rows;
5648 1714 : if (nrows < inner_rows)
5649 120 : nrows = inner_rows;
5650 1714 : nrows *= pselec;
5651 1714 : break;
5652 8098 : case JOIN_SEMI:
5653 8098 : nrows = outer_rows * fkselec * jselec;
5654 : /* pselec not used */
5655 8098 : break;
5656 4992 : case JOIN_ANTI:
5657 4992 : nrows = outer_rows * (1.0 - fkselec * jselec);
5658 4992 : nrows *= pselec;
5659 4992 : break;
5660 0 : default:
5661 : /* other values not expected here */
5662 0 : elog(ERROR, "unrecognized join type: %d", (int) jointype);
5663 : nrows = 0; /* keep compiler quiet */
5664 : break;
5665 : }
5666 :
5667 260534 : return clamp_row_est(nrows);
5668 : }
5669 :
5670 : /*
5671 : * get_foreign_key_join_selectivity
5672 : * Estimate join selectivity for foreign-key-related clauses.
5673 : *
5674 : * Remove any clauses that can be matched to FK constraints from *restrictlist,
5675 : * and return a substitute estimate of their selectivity. 1.0 is returned
5676 : * when there are no such clauses.
5677 : *
5678 : * The reason for treating such clauses specially is that we can get better
5679 : * estimates this way than by relying on clauselist_selectivity(), especially
5680 : * for multi-column FKs where that function's assumption that the clauses are
5681 : * independent falls down badly. But even with single-column FKs, we may be
5682 : * able to get a better answer when the pg_statistic stats are missing or out
5683 : * of date.
5684 : */
5685 : static Selectivity
5686 260534 : get_foreign_key_join_selectivity(PlannerInfo *root,
5687 : Relids outer_relids,
5688 : Relids inner_relids,
5689 : SpecialJoinInfo *sjinfo,
5690 : List **restrictlist)
5691 : {
5692 260534 : Selectivity fkselec = 1.0;
5693 260534 : JoinType jointype = sjinfo->jointype;
5694 260534 : List *worklist = *restrictlist;
5695 : ListCell *lc;
5696 :
5697 : /* Consider each FK constraint that is known to match the query */
5698 262500 : foreach(lc, root->fkey_list)
5699 : {
5700 1966 : ForeignKeyOptInfo *fkinfo = (ForeignKeyOptInfo *) lfirst(lc);
5701 : bool ref_is_outer;
5702 : List *removedlist;
5703 : ListCell *cell;
5704 :
5705 : /*
5706 : * This FK is not relevant unless it connects a baserel on one side of
5707 : * this join to a baserel on the other side.
5708 : */
5709 3580 : if (bms_is_member(fkinfo->con_relid, outer_relids) &&
5710 1614 : bms_is_member(fkinfo->ref_relid, inner_relids))
5711 1440 : ref_is_outer = false;
5712 866 : else if (bms_is_member(fkinfo->ref_relid, outer_relids) &&
5713 340 : bms_is_member(fkinfo->con_relid, inner_relids))
5714 130 : ref_is_outer = true;
5715 : else
5716 396 : continue;
5717 :
5718 : /*
5719 : * If we're dealing with a semi/anti join, and the FK's referenced
5720 : * relation is on the outside, then knowledge of the FK doesn't help
5721 : * us figure out what we need to know (which is the fraction of outer
5722 : * rows that have matches). On the other hand, if the referenced rel
5723 : * is on the inside, then all outer rows must have matches in the
5724 : * referenced table (ignoring nulls). But any restriction or join
5725 : * clauses that filter that table will reduce the fraction of matches.
5726 : * We can account for restriction clauses, but it's too hard to guess
5727 : * how many table rows would get through a join that's inside the RHS.
5728 : * Hence, if either case applies, punt and ignore the FK.
5729 : */
5730 1570 : if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
5731 1048 : (ref_is_outer || bms_membership(inner_relids) != BMS_SINGLETON))
5732 12 : continue;
5733 :
5734 : /*
5735 : * Modify the restrictlist by removing clauses that match the FK (and
5736 : * putting them into removedlist instead). It seems unsafe to modify
5737 : * the originally-passed List structure, so we make a shallow copy the
5738 : * first time through.
5739 : */
5740 1558 : if (worklist == *restrictlist)
5741 1334 : worklist = list_copy(worklist);
5742 :
5743 1558 : removedlist = NIL;
5744 3252 : foreach(cell, worklist)
5745 : {
5746 1694 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
5747 1694 : bool remove_it = false;
5748 : int i;
5749 :
5750 : /* Drop this clause if it matches any column of the FK */
5751 2140 : for (i = 0; i < fkinfo->nkeys; i++)
5752 : {
5753 2110 : if (rinfo->parent_ec)
5754 : {
5755 : /*
5756 : * EC-derived clauses can only match by EC. It is okay to
5757 : * consider any clause derived from the same EC as
5758 : * matching the FK: even if equivclass.c chose to generate
5759 : * a clause equating some other pair of Vars, it could
5760 : * have generated one equating the FK's Vars. So for
5761 : * purposes of estimation, we can act as though it did so.
5762 : *
5763 : * Note: checking parent_ec is a bit of a cheat because
5764 : * there are EC-derived clauses that don't have parent_ec
5765 : * set; but such clauses must compare expressions that
5766 : * aren't just Vars, so they cannot match the FK anyway.
5767 : */
5768 304 : if (fkinfo->eclass[i] == rinfo->parent_ec)
5769 : {
5770 298 : remove_it = true;
5771 298 : break;
5772 : }
5773 : }
5774 : else
5775 : {
5776 : /*
5777 : * Otherwise, see if rinfo was previously matched to FK as
5778 : * a "loose" clause.
5779 : */
5780 1806 : if (list_member_ptr(fkinfo->rinfos[i], rinfo))
5781 : {
5782 1366 : remove_it = true;
5783 1366 : break;
5784 : }
5785 : }
5786 : }
5787 1694 : if (remove_it)
5788 : {
5789 1664 : worklist = foreach_delete_current(worklist, cell);
5790 1664 : removedlist = lappend(removedlist, rinfo);
5791 : }
5792 : }
5793 :
5794 : /*
5795 : * If we failed to remove all the matching clauses we expected to
5796 : * find, chicken out and ignore this FK; applying its selectivity
5797 : * might result in double-counting. Put any clauses we did manage to
5798 : * remove back into the worklist.
5799 : *
5800 : * Since the matching clauses are known not outerjoin-delayed, they
5801 : * would normally have appeared in the initial joinclause list. If we
5802 : * didn't find them, there are two possibilities:
5803 : *
5804 : * 1. If the FK match is based on an EC that is ec_has_const, it won't
5805 : * have generated any join clauses at all. We discount such ECs while
5806 : * checking to see if we have "all" the clauses. (Below, we'll adjust
5807 : * the selectivity estimate for this case.)
5808 : *
5809 : * 2. The clauses were matched to some other FK in a previous
5810 : * iteration of this loop, and thus removed from worklist. (A likely
5811 : * case is that two FKs are matched to the same EC; there will be only
5812 : * one EC-derived clause in the initial list, so the first FK will
5813 : * consume it.) Applying both FKs' selectivity independently risks
5814 : * underestimating the join size; in particular, this would undo one
5815 : * of the main things that ECs were invented for, namely to avoid
5816 : * double-counting the selectivity of redundant equality conditions.
5817 : * Later we might think of a reasonable way to combine the estimates,
5818 : * but for now, just punt, since this is a fairly uncommon situation.
5819 : */
5820 1558 : if (removedlist == NIL ||
5821 1272 : list_length(removedlist) !=
5822 1272 : (fkinfo->nmatched_ec - fkinfo->nconst_ec + fkinfo->nmatched_ri))
5823 : {
5824 286 : worklist = list_concat(worklist, removedlist);
5825 286 : continue;
5826 : }
5827 :
5828 : /*
5829 : * Finally we get to the payoff: estimate selectivity using the
5830 : * knowledge that each referencing row will match exactly one row in
5831 : * the referenced table.
5832 : *
5833 : * XXX that's not true in the presence of nulls in the referencing
5834 : * column(s), so in principle we should derate the estimate for those.
5835 : * However (1) if there are any strict restriction clauses for the
5836 : * referencing column(s) elsewhere in the query, derating here would
5837 : * be double-counting the null fraction, and (2) it's not very clear
5838 : * how to combine null fractions for multiple referencing columns. So
5839 : * we do nothing for now about correcting for nulls.
5840 : *
5841 : * XXX another point here is that if either side of an FK constraint
5842 : * is an inheritance parent, we estimate as though the constraint
5843 : * covers all its children as well. This is not an unreasonable
5844 : * assumption for a referencing table, ie the user probably applied
5845 : * identical constraints to all child tables (though perhaps we ought
5846 : * to check that). But it's not possible to have done that for a
5847 : * referenced table. Fortunately, precisely because that doesn't
5848 : * work, it is uncommon in practice to have an FK referencing a parent
5849 : * table. So, at least for now, disregard inheritance here.
5850 : */
5851 1272 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
5852 824 : {
5853 : /*
5854 : * For JOIN_SEMI and JOIN_ANTI, we only get here when the FK's
5855 : * referenced table is exactly the inside of the join. The join
5856 : * selectivity is defined as the fraction of LHS rows that have
5857 : * matches. The FK implies that every LHS row has a match *in the
5858 : * referenced table*; but any restriction clauses on it will
5859 : * reduce the number of matches. Hence we take the join
5860 : * selectivity as equal to the selectivity of the table's
5861 : * restriction clauses, which is rows / tuples; but we must guard
5862 : * against tuples == 0.
5863 : */
5864 824 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5865 824 : double ref_tuples = Max(ref_rel->tuples, 1.0);
5866 :
5867 824 : fkselec *= ref_rel->rows / ref_tuples;
5868 : }
5869 : else
5870 : {
5871 : /*
5872 : * Otherwise, selectivity is exactly 1/referenced-table-size; but
5873 : * guard against tuples == 0. Note we should use the raw table
5874 : * tuple count, not any estimate of its filtered or joined size.
5875 : */
5876 448 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5877 448 : double ref_tuples = Max(ref_rel->tuples, 1.0);
5878 :
5879 448 : fkselec *= 1.0 / ref_tuples;
5880 : }
5881 :
5882 : /*
5883 : * If any of the FK columns participated in ec_has_const ECs, then
5884 : * equivclass.c will have generated "var = const" restrictions for
5885 : * each side of the join, thus reducing the sizes of both input
5886 : * relations. Taking the fkselec at face value would amount to
5887 : * double-counting the selectivity of the constant restriction for the
5888 : * referencing Var. Hence, look for the restriction clause(s) that
5889 : * were applied to the referencing Var(s), and divide out their
5890 : * selectivity to correct for this.
5891 : */
5892 1272 : if (fkinfo->nconst_ec > 0)
5893 : {
5894 24 : for (int i = 0; i < fkinfo->nkeys; i++)
5895 : {
5896 18 : EquivalenceClass *ec = fkinfo->eclass[i];
5897 :
5898 18 : if (ec && ec->ec_has_const)
5899 : {
5900 6 : EquivalenceMember *em = fkinfo->fk_eclass_member[i];
5901 6 : RestrictInfo *rinfo = find_derived_clause_for_ec_member(root,
5902 : ec,
5903 : em);
5904 :
5905 6 : if (rinfo)
5906 : {
5907 : Selectivity s0;
5908 :
5909 6 : s0 = clause_selectivity(root,
5910 : (Node *) rinfo,
5911 : 0,
5912 : jointype,
5913 : sjinfo);
5914 6 : if (s0 > 0)
5915 6 : fkselec /= s0;
5916 : }
5917 : }
5918 : }
5919 : }
5920 : }
5921 :
5922 260534 : *restrictlist = worklist;
5923 260534 : CLAMP_PROBABILITY(fkselec);
5924 260534 : return fkselec;
5925 : }
5926 :
5927 : /*
5928 : * set_subquery_size_estimates
5929 : * Set the size estimates for a base relation that is a subquery.
5930 : *
5931 : * The rel's targetlist and restrictinfo list must have been constructed
5932 : * already, and the Paths for the subquery must have been completed.
5933 : * We look at the subquery's PlannerInfo to extract data.
5934 : *
5935 : * We set the same fields as set_baserel_size_estimates.
5936 : */
5937 : void
5938 32734 : set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5939 : {
5940 32734 : PlannerInfo *subroot = rel->subroot;
5941 : RelOptInfo *sub_final_rel;
5942 : ListCell *lc;
5943 :
5944 : /* Should only be applied to base relations that are subqueries */
5945 : Assert(rel->relid > 0);
5946 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_SUBQUERY);
5947 :
5948 : /*
5949 : * Copy raw number of output rows from subquery. All of its paths should
5950 : * have the same output rowcount, so just look at cheapest-total.
5951 : */
5952 32734 : sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
5953 32734 : rel->tuples = sub_final_rel->cheapest_total_path->rows;
5954 :
5955 : /*
5956 : * Compute per-output-column width estimates by examining the subquery's
5957 : * targetlist. For any output that is a plain Var, get the width estimate
5958 : * that was made while planning the subquery. Otherwise, we leave it to
5959 : * set_rel_width to fill in a datatype-based default estimate.
5960 : */
5961 156090 : foreach(lc, subroot->parse->targetList)
5962 : {
5963 123356 : TargetEntry *te = lfirst_node(TargetEntry, lc);
5964 123356 : Node *texpr = (Node *) te->expr;
5965 123356 : int32 item_width = 0;
5966 :
5967 : /* junk columns aren't visible to upper query */
5968 123356 : if (te->resjunk)
5969 1358 : continue;
5970 :
5971 : /*
5972 : * The subquery could be an expansion of a view that's had columns
5973 : * added to it since the current query was parsed, so that there are
5974 : * non-junk tlist columns in it that don't correspond to any column
5975 : * visible at our query level. Ignore such columns.
5976 : */
5977 121998 : if (te->resno < rel->min_attr || te->resno > rel->max_attr)
5978 0 : continue;
5979 :
5980 : /*
5981 : * XXX This currently doesn't work for subqueries containing set
5982 : * operations, because the Vars in their tlists are bogus references
5983 : * to the first leaf subquery, which wouldn't give the right answer
5984 : * even if we could still get to its PlannerInfo.
5985 : *
5986 : * Also, the subquery could be an appendrel for which all branches are
5987 : * known empty due to constraint exclusion, in which case
5988 : * set_append_rel_pathlist will have left the attr_widths set to zero.
5989 : *
5990 : * In either case, we just leave the width estimate zero until
5991 : * set_rel_width fixes it.
5992 : */
5993 121998 : if (IsA(texpr, Var) &&
5994 59824 : subroot->parse->setOperations == NULL)
5995 : {
5996 57976 : Var *var = (Var *) texpr;
5997 57976 : RelOptInfo *subrel = find_base_rel(subroot, var->varno);
5998 :
5999 57976 : item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
6000 : }
6001 121998 : rel->attr_widths[te->resno - rel->min_attr] = item_width;
6002 : }
6003 :
6004 : /* Now estimate number of output rows, etc */
6005 32734 : set_baserel_size_estimates(root, rel);
6006 32734 : }
6007 :
6008 : /*
6009 : * set_function_size_estimates
6010 : * Set the size estimates for a base relation that is a function call.
6011 : *
6012 : * The rel's targetlist and restrictinfo list must have been constructed
6013 : * already.
6014 : *
6015 : * We set the same fields as set_baserel_size_estimates.
6016 : */
6017 : void
6018 51268 : set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6019 : {
6020 : RangeTblEntry *rte;
6021 : ListCell *lc;
6022 :
6023 : /* Should only be applied to base relations that are functions */
6024 : Assert(rel->relid > 0);
6025 51268 : rte = planner_rt_fetch(rel->relid, root);
6026 : Assert(rte->rtekind == RTE_FUNCTION);
6027 :
6028 : /*
6029 : * Estimate number of rows the functions will return. The rowcount of the
6030 : * node is that of the largest function result.
6031 : */
6032 51268 : rel->tuples = 0;
6033 103046 : foreach(lc, rte->functions)
6034 : {
6035 51778 : RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
6036 51778 : double ntup = expression_returns_set_rows(root, rtfunc->funcexpr);
6037 :
6038 51778 : if (ntup > rel->tuples)
6039 51292 : rel->tuples = ntup;
6040 : }
6041 :
6042 : /* Now estimate number of output rows, etc */
6043 51268 : set_baserel_size_estimates(root, rel);
6044 51268 : }
6045 :
6046 : /*
6047 : * set_function_size_estimates
6048 : * Set the size estimates for a base relation that is a function call.
6049 : *
6050 : * The rel's targetlist and restrictinfo list must have been constructed
6051 : * already.
6052 : *
6053 : * We set the same fields as set_tablefunc_size_estimates.
6054 : */
6055 : void
6056 626 : set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6057 : {
6058 : /* Should only be applied to base relations that are functions */
6059 : Assert(rel->relid > 0);
6060 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_TABLEFUNC);
6061 :
6062 626 : rel->tuples = 100;
6063 :
6064 : /* Now estimate number of output rows, etc */
6065 626 : set_baserel_size_estimates(root, rel);
6066 626 : }
6067 :
6068 : /*
6069 : * set_values_size_estimates
6070 : * Set the size estimates for a base relation that is a values list.
6071 : *
6072 : * The rel's targetlist and restrictinfo list must have been constructed
6073 : * already.
6074 : *
6075 : * We set the same fields as set_baserel_size_estimates.
6076 : */
6077 : void
6078 8264 : set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6079 : {
6080 : RangeTblEntry *rte;
6081 :
6082 : /* Should only be applied to base relations that are values lists */
6083 : Assert(rel->relid > 0);
6084 8264 : rte = planner_rt_fetch(rel->relid, root);
6085 : Assert(rte->rtekind == RTE_VALUES);
6086 :
6087 : /*
6088 : * Estimate number of rows the values list will return. We know this
6089 : * precisely based on the list length (well, barring set-returning
6090 : * functions in list items, but that's a refinement not catered for
6091 : * anywhere else either).
6092 : */
6093 8264 : rel->tuples = list_length(rte->values_lists);
6094 :
6095 : /* Now estimate number of output rows, etc */
6096 8264 : set_baserel_size_estimates(root, rel);
6097 8264 : }
6098 :
6099 : /*
6100 : * set_cte_size_estimates
6101 : * Set the size estimates for a base relation that is a CTE reference.
6102 : *
6103 : * The rel's targetlist and restrictinfo list must have been constructed
6104 : * already, and we need an estimate of the number of rows returned by the CTE
6105 : * (if a regular CTE) or the non-recursive term (if a self-reference).
6106 : *
6107 : * We set the same fields as set_baserel_size_estimates.
6108 : */
6109 : void
6110 5190 : set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
6111 : {
6112 : RangeTblEntry *rte;
6113 :
6114 : /* Should only be applied to base relations that are CTE references */
6115 : Assert(rel->relid > 0);
6116 5190 : rte = planner_rt_fetch(rel->relid, root);
6117 : Assert(rte->rtekind == RTE_CTE);
6118 :
6119 5190 : if (rte->self_reference)
6120 : {
6121 : /*
6122 : * In a self-reference, we assume the average worktable size is a
6123 : * multiple of the nonrecursive term's size. The best multiplier will
6124 : * vary depending on query "fan-out", so make its value adjustable.
6125 : */
6126 932 : rel->tuples = clamp_row_est(recursive_worktable_factor * cte_rows);
6127 : }
6128 : else
6129 : {
6130 : /* Otherwise just believe the CTE's rowcount estimate */
6131 4258 : rel->tuples = cte_rows;
6132 : }
6133 :
6134 : /* Now estimate number of output rows, etc */
6135 5190 : set_baserel_size_estimates(root, rel);
6136 5190 : }
6137 :
6138 : /*
6139 : * set_namedtuplestore_size_estimates
6140 : * Set the size estimates for a base relation that is a tuplestore reference.
6141 : *
6142 : * The rel's targetlist and restrictinfo list must have been constructed
6143 : * already.
6144 : *
6145 : * We set the same fields as set_baserel_size_estimates.
6146 : */
6147 : void
6148 478 : set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6149 : {
6150 : RangeTblEntry *rte;
6151 :
6152 : /* Should only be applied to base relations that are tuplestore references */
6153 : Assert(rel->relid > 0);
6154 478 : rte = planner_rt_fetch(rel->relid, root);
6155 : Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
6156 :
6157 : /*
6158 : * Use the estimate provided by the code which is generating the named
6159 : * tuplestore. In some cases, the actual number might be available; in
6160 : * others the same plan will be re-used, so a "typical" value might be
6161 : * estimated and used.
6162 : */
6163 478 : rel->tuples = rte->enrtuples;
6164 478 : if (rel->tuples < 0)
6165 0 : rel->tuples = 1000;
6166 :
6167 : /* Now estimate number of output rows, etc */
6168 478 : set_baserel_size_estimates(root, rel);
6169 478 : }
6170 :
6171 : /*
6172 : * set_result_size_estimates
6173 : * Set the size estimates for an RTE_RESULT base relation
6174 : *
6175 : * The rel's targetlist and restrictinfo list must have been constructed
6176 : * already.
6177 : *
6178 : * We set the same fields as set_baserel_size_estimates.
6179 : */
6180 : void
6181 4208 : set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6182 : {
6183 : /* Should only be applied to RTE_RESULT base relations */
6184 : Assert(rel->relid > 0);
6185 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_RESULT);
6186 :
6187 : /* RTE_RESULT always generates a single row, natively */
6188 4208 : rel->tuples = 1;
6189 :
6190 : /* Now estimate number of output rows, etc */
6191 4208 : set_baserel_size_estimates(root, rel);
6192 4208 : }
6193 :
6194 : /*
6195 : * set_foreign_size_estimates
6196 : * Set the size estimates for a base relation that is a foreign table.
6197 : *
6198 : * There is not a whole lot that we can do here; the foreign-data wrapper
6199 : * is responsible for producing useful estimates. We can do a decent job
6200 : * of estimating baserestrictcost, so we set that, and we also set up width
6201 : * using what will be purely datatype-driven estimates from the targetlist.
6202 : * There is no way to do anything sane with the rows value, so we just put
6203 : * a default estimate and hope that the wrapper can improve on it. The
6204 : * wrapper's GetForeignRelSize function will be called momentarily.
6205 : *
6206 : * The rel's targetlist and restrictinfo list must have been constructed
6207 : * already.
6208 : */
6209 : void
6210 2438 : set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6211 : {
6212 : /* Should only be applied to base relations */
6213 : Assert(rel->relid > 0);
6214 :
6215 2438 : rel->rows = 1000; /* entirely bogus default estimate */
6216 :
6217 2438 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
6218 :
6219 2438 : set_rel_width(root, rel);
6220 2438 : }
6221 :
6222 :
6223 : /*
6224 : * set_rel_width
6225 : * Set the estimated output width of a base relation.
6226 : *
6227 : * The estimated output width is the sum of the per-attribute width estimates
6228 : * for the actually-referenced columns, plus any PHVs or other expressions
6229 : * that have to be calculated at this relation. This is the amount of data
6230 : * we'd need to pass upwards in case of a sort, hash, etc.
6231 : *
6232 : * This function also sets reltarget->cost, so it's a bit misnamed now.
6233 : *
6234 : * NB: this works best on plain relations because it prefers to look at
6235 : * real Vars. For subqueries, set_subquery_size_estimates will already have
6236 : * copied up whatever per-column estimates were made within the subquery,
6237 : * and for other types of rels there isn't much we can do anyway. We fall
6238 : * back on (fairly stupid) datatype-based width estimates if we can't get
6239 : * any better number.
6240 : *
6241 : * The per-attribute width estimates are cached for possible re-use while
6242 : * building join relations or post-scan/join pathtargets.
6243 : */
6244 : static void
6245 512054 : set_rel_width(PlannerInfo *root, RelOptInfo *rel)
6246 : {
6247 512054 : Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
6248 512054 : int64 tuple_width = 0;
6249 512054 : bool have_wholerow_var = false;
6250 : ListCell *lc;
6251 :
6252 : /* Vars are assumed to have cost zero, but other exprs do not */
6253 512054 : rel->reltarget->cost.startup = 0;
6254 512054 : rel->reltarget->cost.per_tuple = 0;
6255 :
6256 1846368 : foreach(lc, rel->reltarget->exprs)
6257 : {
6258 1334314 : Node *node = (Node *) lfirst(lc);
6259 :
6260 : /*
6261 : * Ordinarily, a Var in a rel's targetlist must belong to that rel;
6262 : * but there are corner cases involving LATERAL references where that
6263 : * isn't so. If the Var has the wrong varno, fall through to the
6264 : * generic case (it doesn't seem worth the trouble to be any smarter).
6265 : */
6266 1334314 : if (IsA(node, Var) &&
6267 1310324 : ((Var *) node)->varno == rel->relid)
6268 353220 : {
6269 1310258 : Var *var = (Var *) node;
6270 : int ndx;
6271 : int32 item_width;
6272 :
6273 : Assert(var->varattno >= rel->min_attr);
6274 : Assert(var->varattno <= rel->max_attr);
6275 :
6276 1310258 : ndx = var->varattno - rel->min_attr;
6277 :
6278 : /*
6279 : * If it's a whole-row Var, we'll deal with it below after we have
6280 : * already cached as many attr widths as possible.
6281 : */
6282 1310258 : if (var->varattno == 0)
6283 : {
6284 3018 : have_wholerow_var = true;
6285 3018 : continue;
6286 : }
6287 :
6288 : /*
6289 : * The width may have been cached already (especially if it's a
6290 : * subquery), so don't duplicate effort.
6291 : */
6292 1307240 : if (rel->attr_widths[ndx] > 0)
6293 : {
6294 255890 : tuple_width += rel->attr_widths[ndx];
6295 255890 : continue;
6296 : }
6297 :
6298 : /* Try to get column width from statistics */
6299 1051350 : if (reloid != InvalidOid && var->varattno > 0)
6300 : {
6301 833178 : item_width = get_attavgwidth(reloid, var->varattno);
6302 833178 : if (item_width > 0)
6303 : {
6304 698130 : rel->attr_widths[ndx] = item_width;
6305 698130 : tuple_width += item_width;
6306 698130 : continue;
6307 : }
6308 : }
6309 :
6310 : /*
6311 : * Not a plain relation, or can't find statistics for it. Estimate
6312 : * using just the type info.
6313 : */
6314 353220 : item_width = get_typavgwidth(var->vartype, var->vartypmod);
6315 : Assert(item_width > 0);
6316 353220 : rel->attr_widths[ndx] = item_width;
6317 353220 : tuple_width += item_width;
6318 : }
6319 24056 : else if (IsA(node, PlaceHolderVar))
6320 : {
6321 : /*
6322 : * We will need to evaluate the PHV's contained expression while
6323 : * scanning this rel, so be sure to include it in reltarget->cost.
6324 : */
6325 1990 : PlaceHolderVar *phv = (PlaceHolderVar *) node;
6326 1990 : PlaceHolderInfo *phinfo = find_placeholder_info(root, phv);
6327 : QualCost cost;
6328 :
6329 1990 : tuple_width += phinfo->ph_width;
6330 1990 : cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
6331 1990 : rel->reltarget->cost.startup += cost.startup;
6332 1990 : rel->reltarget->cost.per_tuple += cost.per_tuple;
6333 : }
6334 : else
6335 : {
6336 : /*
6337 : * We could be looking at an expression pulled up from a subquery,
6338 : * or a ROW() representing a whole-row child Var, etc. Do what we
6339 : * can using the expression type information.
6340 : */
6341 : int32 item_width;
6342 : QualCost cost;
6343 :
6344 22066 : item_width = get_typavgwidth(exprType(node), exprTypmod(node));
6345 : Assert(item_width > 0);
6346 22066 : tuple_width += item_width;
6347 : /* Not entirely clear if we need to account for cost, but do so */
6348 22066 : cost_qual_eval_node(&cost, node, root);
6349 22066 : rel->reltarget->cost.startup += cost.startup;
6350 22066 : rel->reltarget->cost.per_tuple += cost.per_tuple;
6351 : }
6352 : }
6353 :
6354 : /*
6355 : * If we have a whole-row reference, estimate its width as the sum of
6356 : * per-column widths plus heap tuple header overhead.
6357 : */
6358 512054 : if (have_wholerow_var)
6359 : {
6360 3018 : int64 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
6361 :
6362 3018 : if (reloid != InvalidOid)
6363 : {
6364 : /* Real relation, so estimate true tuple width */
6365 2360 : wholerow_width += get_relation_data_width(reloid,
6366 2360 : rel->attr_widths - rel->min_attr);
6367 : }
6368 : else
6369 : {
6370 : /* Do what we can with info for a phony rel */
6371 : AttrNumber i;
6372 :
6373 1794 : for (i = 1; i <= rel->max_attr; i++)
6374 1136 : wholerow_width += rel->attr_widths[i - rel->min_attr];
6375 : }
6376 :
6377 3018 : rel->attr_widths[0 - rel->min_attr] = clamp_width_est(wholerow_width);
6378 :
6379 : /*
6380 : * Include the whole-row Var as part of the output tuple. Yes, that
6381 : * really is what happens at runtime.
6382 : */
6383 3018 : tuple_width += wholerow_width;
6384 : }
6385 :
6386 512054 : rel->reltarget->width = clamp_width_est(tuple_width);
6387 512054 : }
6388 :
6389 : /*
6390 : * set_pathtarget_cost_width
6391 : * Set the estimated eval cost and output width of a PathTarget tlist.
6392 : *
6393 : * As a notational convenience, returns the same PathTarget pointer passed in.
6394 : *
6395 : * Most, though not quite all, uses of this function occur after we've run
6396 : * set_rel_width() for base relations; so we can usually obtain cached width
6397 : * estimates for Vars. If we can't, fall back on datatype-based width
6398 : * estimates. Present early-planning uses of PathTargets don't need accurate
6399 : * widths badly enough to justify going to the catalogs for better data.
6400 : */
6401 : PathTarget *
6402 610530 : set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
6403 : {
6404 610530 : int64 tuple_width = 0;
6405 : ListCell *lc;
6406 :
6407 : /* Vars are assumed to have cost zero, but other exprs do not */
6408 610530 : target->cost.startup = 0;
6409 610530 : target->cost.per_tuple = 0;
6410 :
6411 2124662 : foreach(lc, target->exprs)
6412 : {
6413 1514132 : Node *node = (Node *) lfirst(lc);
6414 :
6415 1514132 : tuple_width += get_expr_width(root, node);
6416 :
6417 : /* For non-Vars, account for evaluation cost */
6418 1514132 : if (!IsA(node, Var))
6419 : {
6420 : QualCost cost;
6421 :
6422 644722 : cost_qual_eval_node(&cost, node, root);
6423 644722 : target->cost.startup += cost.startup;
6424 644722 : target->cost.per_tuple += cost.per_tuple;
6425 : }
6426 : }
6427 :
6428 610530 : target->width = clamp_width_est(tuple_width);
6429 :
6430 610530 : return target;
6431 : }
6432 :
6433 : /*
6434 : * get_expr_width
6435 : * Estimate the width of the given expr attempting to use the width
6436 : * cached in a Var's owning RelOptInfo, else fallback on the type's
6437 : * average width when unable to or when the given Node is not a Var.
6438 : */
6439 : static int32
6440 1837440 : get_expr_width(PlannerInfo *root, const Node *expr)
6441 : {
6442 : int32 width;
6443 :
6444 1837440 : if (IsA(expr, Var))
6445 : {
6446 1180006 : const Var *var = (const Var *) expr;
6447 :
6448 : /* We should not see any upper-level Vars here */
6449 : Assert(var->varlevelsup == 0);
6450 :
6451 : /* Try to get data from RelOptInfo cache */
6452 1180006 : if (!IS_SPECIAL_VARNO(var->varno) &&
6453 1174196 : var->varno < root->simple_rel_array_size)
6454 : {
6455 1174196 : RelOptInfo *rel = root->simple_rel_array[var->varno];
6456 :
6457 1174196 : if (rel != NULL &&
6458 1156504 : var->varattno >= rel->min_attr &&
6459 1156504 : var->varattno <= rel->max_attr)
6460 : {
6461 1156504 : int ndx = var->varattno - rel->min_attr;
6462 :
6463 1156504 : if (rel->attr_widths[ndx] > 0)
6464 1124552 : return rel->attr_widths[ndx];
6465 : }
6466 : }
6467 :
6468 : /*
6469 : * No cached data available, so estimate using just the type info.
6470 : */
6471 55454 : width = get_typavgwidth(var->vartype, var->vartypmod);
6472 : Assert(width > 0);
6473 :
6474 55454 : return width;
6475 : }
6476 :
6477 657434 : width = get_typavgwidth(exprType(expr), exprTypmod(expr));
6478 : Assert(width > 0);
6479 657434 : return width;
6480 : }
6481 :
6482 : /*
6483 : * relation_byte_size
6484 : * Estimate the storage space in bytes for a given number of tuples
6485 : * of a given width (size in bytes).
6486 : */
6487 : static double
6488 4999520 : relation_byte_size(double tuples, int width)
6489 : {
6490 4999520 : return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
6491 : }
6492 :
6493 : /*
6494 : * page_size
6495 : * Returns an estimate of the number of pages covered by a given
6496 : * number of tuples of a given width (size in bytes).
6497 : */
6498 : static double
6499 9284 : page_size(double tuples, int width)
6500 : {
6501 9284 : return ceil(relation_byte_size(tuples, width) / BLCKSZ);
6502 : }
6503 :
6504 : /*
6505 : * Estimate the fraction of the work that each worker will do given the
6506 : * number of workers budgeted for the path.
6507 : */
6508 : static double
6509 463074 : get_parallel_divisor(Path *path)
6510 : {
6511 463074 : double parallel_divisor = path->parallel_workers;
6512 :
6513 : /*
6514 : * Early experience with parallel query suggests that when there is only
6515 : * one worker, the leader often makes a very substantial contribution to
6516 : * executing the parallel portion of the plan, but as more workers are
6517 : * added, it does less and less, because it's busy reading tuples from the
6518 : * workers and doing whatever non-parallel post-processing is needed. By
6519 : * the time we reach 4 workers, the leader no longer makes a meaningful
6520 : * contribution. Thus, for now, estimate that the leader spends 30% of
6521 : * its time servicing each worker, and the remainder executing the
6522 : * parallel plan.
6523 : */
6524 463074 : if (parallel_leader_participation)
6525 : {
6526 : double leader_contribution;
6527 :
6528 461772 : leader_contribution = 1.0 - (0.3 * path->parallel_workers);
6529 461772 : if (leader_contribution > 0)
6530 459456 : parallel_divisor += leader_contribution;
6531 : }
6532 :
6533 463074 : return parallel_divisor;
6534 : }
6535 :
6536 : /*
6537 : * compute_bitmap_pages
6538 : * Estimate number of pages fetched from heap in a bitmap heap scan.
6539 : *
6540 : * 'baserel' is the relation to be scanned
6541 : * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
6542 : * 'loop_count' is the number of repetitions of the indexscan to factor into
6543 : * estimates of caching behavior
6544 : *
6545 : * If cost_p isn't NULL, the indexTotalCost estimate is returned in *cost_p.
6546 : * If tuples_p isn't NULL, the tuples_fetched estimate is returned in *tuples_p.
6547 : */
6548 : double
6549 688324 : compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel,
6550 : Path *bitmapqual, double loop_count,
6551 : Cost *cost_p, double *tuples_p)
6552 : {
6553 : Cost indexTotalCost;
6554 : Selectivity indexSelectivity;
6555 : double T;
6556 : double pages_fetched;
6557 : double tuples_fetched;
6558 : double heap_pages;
6559 : double maxentries;
6560 :
6561 : /*
6562 : * Fetch total cost of obtaining the bitmap, as well as its total
6563 : * selectivity.
6564 : */
6565 688324 : cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
6566 :
6567 : /*
6568 : * Estimate number of main-table pages fetched.
6569 : */
6570 688324 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
6571 :
6572 688324 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
6573 :
6574 : /*
6575 : * For a single scan, the number of heap pages that need to be fetched is
6576 : * the same as the Mackert and Lohman formula for the case T <= b (ie, no
6577 : * re-reads needed).
6578 : */
6579 688324 : pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
6580 :
6581 : /*
6582 : * Calculate the number of pages fetched from the heap. Then based on
6583 : * current work_mem estimate get the estimated maxentries in the bitmap.
6584 : * (Note that we always do this calculation based on the number of pages
6585 : * that would be fetched in a single iteration, even if loop_count > 1.
6586 : * That's correct, because only that number of entries will be stored in
6587 : * the bitmap at one time.)
6588 : */
6589 688324 : heap_pages = Min(pages_fetched, baserel->pages);
6590 688324 : maxentries = tbm_calculate_entries(work_mem * (Size) 1024);
6591 :
6592 688324 : if (loop_count > 1)
6593 : {
6594 : /*
6595 : * For repeated bitmap scans, scale up the number of tuples fetched in
6596 : * the Mackert and Lohman formula by the number of scans, so that we
6597 : * estimate the number of pages fetched by all the scans. Then
6598 : * pro-rate for one scan.
6599 : */
6600 144514 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
6601 : baserel->pages,
6602 : get_indexpath_pages(bitmapqual),
6603 : root);
6604 144514 : pages_fetched /= loop_count;
6605 : }
6606 :
6607 688324 : if (pages_fetched >= T)
6608 65730 : pages_fetched = T;
6609 : else
6610 622594 : pages_fetched = ceil(pages_fetched);
6611 :
6612 688324 : if (maxentries < heap_pages)
6613 : {
6614 : double exact_pages;
6615 : double lossy_pages;
6616 :
6617 : /*
6618 : * Crude approximation of the number of lossy pages. Because of the
6619 : * way tbm_lossify() is coded, the number of lossy pages increases
6620 : * very sharply as soon as we run short of memory; this formula has
6621 : * that property and seems to perform adequately in testing, but it's
6622 : * possible we could do better somehow.
6623 : */
6624 18 : lossy_pages = Max(0, heap_pages - maxentries / 2);
6625 18 : exact_pages = heap_pages - lossy_pages;
6626 :
6627 : /*
6628 : * If there are lossy pages then recompute the number of tuples
6629 : * processed by the bitmap heap node. We assume here that the chance
6630 : * of a given tuple coming from an exact page is the same as the
6631 : * chance that a given page is exact. This might not be true, but
6632 : * it's not clear how we can do any better.
6633 : */
6634 18 : if (lossy_pages > 0)
6635 : tuples_fetched =
6636 18 : clamp_row_est(indexSelectivity *
6637 18 : (exact_pages / heap_pages) * baserel->tuples +
6638 18 : (lossy_pages / heap_pages) * baserel->tuples);
6639 : }
6640 :
6641 688324 : if (cost_p)
6642 543892 : *cost_p = indexTotalCost;
6643 688324 : if (tuples_p)
6644 543892 : *tuples_p = tuples_fetched;
6645 :
6646 688324 : return pages_fetched;
6647 : }
6648 :
6649 : /*
6650 : * compute_gather_rows
6651 : * Estimate number of rows for gather (merge) nodes.
6652 : *
6653 : * In a parallel plan, each worker's row estimate is determined by dividing the
6654 : * total number of rows by parallel_divisor, which accounts for the leader's
6655 : * contribution in addition to the number of workers. Accordingly, when
6656 : * estimating the number of rows for gather (merge) nodes, we multiply the rows
6657 : * per worker by the same parallel_divisor to undo the division.
6658 : */
6659 : double
6660 41994 : compute_gather_rows(Path *path)
6661 : {
6662 : Assert(path->parallel_workers > 0);
6663 :
6664 41994 : return clamp_row_est(path->rows * get_parallel_divisor(path));
6665 : }
|