Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * costsize.c
4 : * Routines to compute (and set) relation sizes and path costs
5 : *
6 : * Path costs are measured in arbitrary units established by these basic
7 : * parameters:
8 : *
9 : * seq_page_cost Cost of a sequential page fetch
10 : * random_page_cost Cost of a non-sequential page fetch
11 : * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 : * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 : * cpu_operator_cost Cost of CPU time to execute an operator or function
14 : * parallel_tuple_cost Cost of CPU time to pass a tuple from worker to leader backend
15 : * parallel_setup_cost Cost of setting up shared memory for parallelism
16 : *
17 : * We expect that the kernel will typically do some amount of read-ahead
18 : * optimization; this in conjunction with seek costs means that seq_page_cost
19 : * is normally considerably less than random_page_cost. (However, if the
20 : * database is fully cached in RAM, it is reasonable to set them equal.)
21 : *
22 : * We also use a rough estimate "effective_cache_size" of the number of
23 : * disk pages in Postgres + OS-level disk cache. (We can't simply use
24 : * NBuffers for this purpose because that would ignore the effects of
25 : * the kernel's disk cache.)
26 : *
27 : * Obviously, taking constants for these values is an oversimplification,
28 : * but it's tough enough to get any useful estimates even at this level of
29 : * detail. Note that all of these parameters are user-settable, in case
30 : * the default values are drastically off for a particular platform.
31 : *
32 : * seq_page_cost and random_page_cost can also be overridden for an individual
33 : * tablespace, in case some data is on a fast disk and other data is on a slow
34 : * disk. Per-tablespace overrides never apply to temporary work files such as
35 : * an external sort or a materialize node that overflows work_mem.
36 : *
37 : * We compute two separate costs for each path:
38 : * total_cost: total estimated cost to fetch all tuples
39 : * startup_cost: cost that is expended before first tuple is fetched
40 : * In some scenarios, such as when there is a LIMIT or we are implementing
41 : * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
42 : * path's result. A caller can estimate the cost of fetching a partial
43 : * result by interpolating between startup_cost and total_cost. In detail:
44 : * actual_cost = startup_cost +
45 : * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
46 : * Note that a base relation's rows count (and, by extension, plan_rows for
47 : * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
48 : * that this equation works properly. (Note: while path->rows is never zero
49 : * for ordinary relations, it is zero for paths for provably-empty relations,
50 : * so beware of division-by-zero.) The LIMIT is applied as a top-level
51 : * plan node.
52 : *
53 : * For largely historical reasons, most of the routines in this module use
54 : * the passed result Path only to store their results (rows, startup_cost and
55 : * total_cost) into. All the input data they need is passed as separate
56 : * parameters, even though much of it could be extracted from the Path.
57 : * An exception is made for the cost_XXXjoin() routines, which expect all
58 : * the other fields of the passed XXXPath to be filled in, and similarly
59 : * cost_index() assumes the passed IndexPath is valid except for its output
60 : * values.
61 : *
62 : *
63 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
64 : * Portions Copyright (c) 1994, Regents of the University of California
65 : *
66 : * IDENTIFICATION
67 : * src/backend/optimizer/path/costsize.c
68 : *
69 : *-------------------------------------------------------------------------
70 : */
71 :
72 : #include "postgres.h"
73 :
74 : #include <limits.h>
75 : #include <math.h>
76 :
77 : #include "access/amapi.h"
78 : #include "access/htup_details.h"
79 : #include "access/tsmapi.h"
80 : #include "executor/executor.h"
81 : #include "executor/nodeAgg.h"
82 : #include "executor/nodeHash.h"
83 : #include "executor/nodeMemoize.h"
84 : #include "miscadmin.h"
85 : #include "nodes/makefuncs.h"
86 : #include "nodes/nodeFuncs.h"
87 : #include "optimizer/clauses.h"
88 : #include "optimizer/cost.h"
89 : #include "optimizer/optimizer.h"
90 : #include "optimizer/pathnode.h"
91 : #include "optimizer/paths.h"
92 : #include "optimizer/placeholder.h"
93 : #include "optimizer/plancat.h"
94 : #include "optimizer/restrictinfo.h"
95 : #include "parser/parsetree.h"
96 : #include "utils/lsyscache.h"
97 : #include "utils/selfuncs.h"
98 : #include "utils/spccache.h"
99 : #include "utils/tuplesort.h"
100 :
101 :
102 : #define LOG2(x) (log(x) / 0.693147180559945)
103 :
104 : /*
105 : * Append and MergeAppend nodes are less expensive than some other operations
106 : * which use cpu_tuple_cost; instead of adding a separate GUC, estimate the
107 : * per-tuple cost as cpu_tuple_cost multiplied by this value.
108 : */
109 : #define APPEND_CPU_COST_MULTIPLIER 0.5
110 :
111 : /*
112 : * Maximum value for row estimates. We cap row estimates to this to help
113 : * ensure that costs based on these estimates remain within the range of what
114 : * double can represent. add_path() wouldn't act sanely given infinite or NaN
115 : * cost values.
116 : */
117 : #define MAXIMUM_ROWCOUNT 1e100
118 :
119 : double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
120 : double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
121 : double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
122 : double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
123 : double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
124 : double parallel_tuple_cost = DEFAULT_PARALLEL_TUPLE_COST;
125 : double parallel_setup_cost = DEFAULT_PARALLEL_SETUP_COST;
126 : double recursive_worktable_factor = DEFAULT_RECURSIVE_WORKTABLE_FACTOR;
127 :
128 : int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
129 :
130 : Cost disable_cost = 1.0e10;
131 :
132 : int max_parallel_workers_per_gather = 2;
133 :
134 : bool enable_seqscan = true;
135 : bool enable_indexscan = true;
136 : bool enable_indexonlyscan = true;
137 : bool enable_bitmapscan = true;
138 : bool enable_tidscan = true;
139 : bool enable_sort = true;
140 : bool enable_incremental_sort = true;
141 : bool enable_hashagg = true;
142 : bool enable_nestloop = true;
143 : bool enable_material = true;
144 : bool enable_memoize = true;
145 : bool enable_mergejoin = true;
146 : bool enable_hashjoin = true;
147 : bool enable_gathermerge = true;
148 : bool enable_partitionwise_join = false;
149 : bool enable_partitionwise_aggregate = false;
150 : bool enable_parallel_append = true;
151 : bool enable_parallel_hash = true;
152 : bool enable_partition_pruning = true;
153 : bool enable_presorted_aggregate = true;
154 : bool enable_async_append = true;
155 :
156 : typedef struct
157 : {
158 : PlannerInfo *root;
159 : QualCost total;
160 : } cost_qual_eval_context;
161 :
162 : static List *extract_nonindex_conditions(List *qual_clauses, List *indexclauses);
163 : static MergeScanSelCache *cached_scansel(PlannerInfo *root,
164 : RestrictInfo *rinfo,
165 : PathKey *pathkey);
166 : static void cost_rescan(PlannerInfo *root, Path *path,
167 : Cost *rescan_startup_cost, Cost *rescan_total_cost);
168 : static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
169 : static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
170 : ParamPathInfo *param_info,
171 : QualCost *qpqual_cost);
172 : static bool has_indexed_join_quals(NestPath *path);
173 : static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
174 : List *quals);
175 : static double calc_joinrel_size_estimate(PlannerInfo *root,
176 : RelOptInfo *joinrel,
177 : RelOptInfo *outer_rel,
178 : RelOptInfo *inner_rel,
179 : double outer_rows,
180 : double inner_rows,
181 : SpecialJoinInfo *sjinfo,
182 : List *restrictlist);
183 : static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root,
184 : Relids outer_relids,
185 : Relids inner_relids,
186 : SpecialJoinInfo *sjinfo,
187 : List **restrictlist);
188 : static Cost append_nonpartial_cost(List *subpaths, int numpaths,
189 : int parallel_workers);
190 : static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
191 : static int32 get_expr_width(PlannerInfo *root, const Node *expr);
192 : static double relation_byte_size(double tuples, int width);
193 : static double page_size(double tuples, int width);
194 : static double get_parallel_divisor(Path *path);
195 :
196 :
197 : /*
198 : * clamp_row_est
199 : * Force a row-count estimate to a sane value.
200 : */
201 : double
202 6762636 : clamp_row_est(double nrows)
203 : {
204 : /*
205 : * Avoid infinite and NaN row estimates. Costs derived from such values
206 : * are going to be useless. Also force the estimate to be at least one
207 : * row, to make explain output look better and to avoid possible
208 : * divide-by-zero when interpolating costs. Make it an integer, too.
209 : */
210 6762636 : if (nrows > MAXIMUM_ROWCOUNT || isnan(nrows))
211 0 : nrows = MAXIMUM_ROWCOUNT;
212 6762636 : else if (nrows <= 1.0)
213 2559964 : nrows = 1.0;
214 : else
215 4202672 : nrows = rint(nrows);
216 :
217 6762636 : return nrows;
218 : }
219 :
220 : /*
221 : * clamp_width_est
222 : * Force a tuple-width estimate to a sane value.
223 : *
224 : * The planner represents datatype width and tuple width estimates as int32.
225 : * When summing column width estimates to create a tuple width estimate,
226 : * it's possible to reach integer overflow in edge cases. To ensure sane
227 : * behavior, we form such sums in int64 arithmetic and then apply this routine
228 : * to clamp to int32 range.
229 : */
230 : int32
231 1606752 : clamp_width_est(int64 tuple_width)
232 : {
233 : /*
234 : * Anything more than MaxAllocSize is clearly bogus, since we could not
235 : * create a tuple that large.
236 : */
237 1606752 : if (tuple_width > MaxAllocSize)
238 0 : return (int32) MaxAllocSize;
239 :
240 : /*
241 : * Unlike clamp_row_est, we just Assert that the value isn't negative,
242 : * rather than masking such errors.
243 : */
244 : Assert(tuple_width >= 0);
245 :
246 1606752 : return (int32) tuple_width;
247 : }
248 :
249 : /*
250 : * clamp_cardinality_to_long
251 : * Cast a Cardinality value to a sane long value.
252 : */
253 : long
254 39190 : clamp_cardinality_to_long(Cardinality x)
255 : {
256 : /*
257 : * Just for paranoia's sake, ensure we do something sane with negative or
258 : * NaN values.
259 : */
260 39190 : if (isnan(x))
261 0 : return LONG_MAX;
262 39190 : if (x <= 0)
263 488 : return 0;
264 :
265 : /*
266 : * If "long" is 64 bits, then LONG_MAX cannot be represented exactly as a
267 : * double. Casting it to double and back may well result in overflow due
268 : * to rounding, so avoid doing that. We trust that any double value that
269 : * compares strictly less than "(double) LONG_MAX" will cast to a
270 : * representable "long" value.
271 : */
272 38702 : return (x < (double) LONG_MAX) ? (long) x : LONG_MAX;
273 : }
274 :
275 :
276 : /*
277 : * cost_seqscan
278 : * Determines and returns the cost of scanning a relation sequentially.
279 : *
280 : * 'baserel' is the relation to be scanned
281 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
282 : */
283 : void
284 368164 : cost_seqscan(Path *path, PlannerInfo *root,
285 : RelOptInfo *baserel, ParamPathInfo *param_info)
286 : {
287 368164 : Cost startup_cost = 0;
288 : Cost cpu_run_cost;
289 : Cost disk_run_cost;
290 : double spc_seq_page_cost;
291 : QualCost qpqual_cost;
292 : Cost cpu_per_tuple;
293 :
294 : /* Should only be applied to base relations */
295 : Assert(baserel->relid > 0);
296 : Assert(baserel->rtekind == RTE_RELATION);
297 :
298 : /* Mark the path with the correct row estimate */
299 368164 : if (param_info)
300 774 : path->rows = param_info->ppi_rows;
301 : else
302 367390 : path->rows = baserel->rows;
303 :
304 368164 : if (!enable_seqscan)
305 15644 : startup_cost += disable_cost;
306 :
307 : /* fetch estimated page cost for tablespace containing table */
308 368164 : get_tablespace_page_costs(baserel->reltablespace,
309 : NULL,
310 : &spc_seq_page_cost);
311 :
312 : /*
313 : * disk costs
314 : */
315 368164 : disk_run_cost = spc_seq_page_cost * baserel->pages;
316 :
317 : /* CPU costs */
318 368164 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
319 :
320 368164 : startup_cost += qpqual_cost.startup;
321 368164 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
322 368164 : cpu_run_cost = cpu_per_tuple * baserel->tuples;
323 : /* tlist eval costs are paid per output row, not per tuple scanned */
324 368164 : startup_cost += path->pathtarget->cost.startup;
325 368164 : cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
326 :
327 : /* Adjust costing for parallelism, if used. */
328 368164 : if (path->parallel_workers > 0)
329 : {
330 26152 : double parallel_divisor = get_parallel_divisor(path);
331 :
332 : /* The CPU cost is divided among all the workers. */
333 26152 : cpu_run_cost /= parallel_divisor;
334 :
335 : /*
336 : * It may be possible to amortize some of the I/O cost, but probably
337 : * not very much, because most operating systems already do aggressive
338 : * prefetching. For now, we assume that the disk run cost can't be
339 : * amortized at all.
340 : */
341 :
342 : /*
343 : * In the case of a parallel plan, the row count needs to represent
344 : * the number of tuples processed per worker.
345 : */
346 26152 : path->rows = clamp_row_est(path->rows / parallel_divisor);
347 : }
348 :
349 368164 : path->startup_cost = startup_cost;
350 368164 : path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
351 368164 : }
352 :
353 : /*
354 : * cost_samplescan
355 : * Determines and returns the cost of scanning a relation using sampling.
356 : *
357 : * 'baserel' is the relation to be scanned
358 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
359 : */
360 : void
361 300 : cost_samplescan(Path *path, PlannerInfo *root,
362 : RelOptInfo *baserel, ParamPathInfo *param_info)
363 : {
364 300 : Cost startup_cost = 0;
365 300 : Cost run_cost = 0;
366 : RangeTblEntry *rte;
367 : TableSampleClause *tsc;
368 : TsmRoutine *tsm;
369 : double spc_seq_page_cost,
370 : spc_random_page_cost,
371 : spc_page_cost;
372 : QualCost qpqual_cost;
373 : Cost cpu_per_tuple;
374 :
375 : /* Should only be applied to base relations with tablesample clauses */
376 : Assert(baserel->relid > 0);
377 300 : rte = planner_rt_fetch(baserel->relid, root);
378 : Assert(rte->rtekind == RTE_RELATION);
379 300 : tsc = rte->tablesample;
380 : Assert(tsc != NULL);
381 300 : tsm = GetTsmRoutine(tsc->tsmhandler);
382 :
383 : /* Mark the path with the correct row estimate */
384 300 : if (param_info)
385 66 : path->rows = param_info->ppi_rows;
386 : else
387 234 : path->rows = baserel->rows;
388 :
389 : /* fetch estimated page cost for tablespace containing table */
390 300 : get_tablespace_page_costs(baserel->reltablespace,
391 : &spc_random_page_cost,
392 : &spc_seq_page_cost);
393 :
394 : /* if NextSampleBlock is used, assume random access, else sequential */
395 600 : spc_page_cost = (tsm->NextSampleBlock != NULL) ?
396 300 : spc_random_page_cost : spc_seq_page_cost;
397 :
398 : /*
399 : * disk costs (recall that baserel->pages has already been set to the
400 : * number of pages the sampling method will visit)
401 : */
402 300 : run_cost += spc_page_cost * baserel->pages;
403 :
404 : /*
405 : * CPU costs (recall that baserel->tuples has already been set to the
406 : * number of tuples the sampling method will select). Note that we ignore
407 : * execution cost of the TABLESAMPLE parameter expressions; they will be
408 : * evaluated only once per scan, and in most usages they'll likely be
409 : * simple constants anyway. We also don't charge anything for the
410 : * calculations the sampling method might do internally.
411 : */
412 300 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
413 :
414 300 : startup_cost += qpqual_cost.startup;
415 300 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
416 300 : run_cost += cpu_per_tuple * baserel->tuples;
417 : /* tlist eval costs are paid per output row, not per tuple scanned */
418 300 : startup_cost += path->pathtarget->cost.startup;
419 300 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
420 :
421 300 : path->startup_cost = startup_cost;
422 300 : path->total_cost = startup_cost + run_cost;
423 300 : }
424 :
425 : /*
426 : * cost_gather
427 : * Determines and returns the cost of gather path.
428 : *
429 : * 'rel' is the relation to be operated upon
430 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
431 : * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
432 : * both 'rel' and 'param_info'. This is useful when the path doesn't exactly
433 : * correspond to any particular RelOptInfo.
434 : */
435 : void
436 16258 : cost_gather(GatherPath *path, PlannerInfo *root,
437 : RelOptInfo *rel, ParamPathInfo *param_info,
438 : double *rows)
439 : {
440 16258 : Cost startup_cost = 0;
441 16258 : Cost run_cost = 0;
442 :
443 : /* Mark the path with the correct row estimate */
444 16258 : if (rows)
445 1744 : path->path.rows = *rows;
446 14514 : else if (param_info)
447 0 : path->path.rows = param_info->ppi_rows;
448 : else
449 14514 : path->path.rows = rel->rows;
450 :
451 16258 : startup_cost = path->subpath->startup_cost;
452 :
453 16258 : run_cost = path->subpath->total_cost - path->subpath->startup_cost;
454 :
455 : /* Parallel setup and communication cost. */
456 16258 : startup_cost += parallel_setup_cost;
457 16258 : run_cost += parallel_tuple_cost * path->path.rows;
458 :
459 16258 : path->path.startup_cost = startup_cost;
460 16258 : path->path.total_cost = (startup_cost + run_cost);
461 16258 : }
462 :
463 : /*
464 : * cost_gather_merge
465 : * Determines and returns the cost of gather merge path.
466 : *
467 : * GatherMerge merges several pre-sorted input streams, using a heap that at
468 : * any given instant holds the next tuple from each stream. If there are N
469 : * streams, we need about N*log2(N) tuple comparisons to construct the heap at
470 : * startup, and then for each output tuple, about log2(N) comparisons to
471 : * replace the top heap entry with the next tuple from the same stream.
472 : */
473 : void
474 9760 : cost_gather_merge(GatherMergePath *path, PlannerInfo *root,
475 : RelOptInfo *rel, ParamPathInfo *param_info,
476 : Cost input_startup_cost, Cost input_total_cost,
477 : double *rows)
478 : {
479 9760 : Cost startup_cost = 0;
480 9760 : Cost run_cost = 0;
481 : Cost comparison_cost;
482 : double N;
483 : double logN;
484 :
485 : /* Mark the path with the correct row estimate */
486 9760 : if (rows)
487 4462 : path->path.rows = *rows;
488 5298 : else if (param_info)
489 0 : path->path.rows = param_info->ppi_rows;
490 : else
491 5298 : path->path.rows = rel->rows;
492 :
493 9760 : if (!enable_gathermerge)
494 0 : startup_cost += disable_cost;
495 :
496 : /*
497 : * Add one to the number of workers to account for the leader. This might
498 : * be overgenerous since the leader will do less work than other workers
499 : * in typical cases, but we'll go with it for now.
500 : */
501 : Assert(path->num_workers > 0);
502 9760 : N = (double) path->num_workers + 1;
503 9760 : logN = LOG2(N);
504 :
505 : /* Assumed cost per tuple comparison */
506 9760 : comparison_cost = 2.0 * cpu_operator_cost;
507 :
508 : /* Heap creation cost */
509 9760 : startup_cost += comparison_cost * N * logN;
510 :
511 : /* Per-tuple heap maintenance cost */
512 9760 : run_cost += path->path.rows * comparison_cost * logN;
513 :
514 : /* small cost for heap management, like cost_merge_append */
515 9760 : run_cost += cpu_operator_cost * path->path.rows;
516 :
517 : /*
518 : * Parallel setup and communication cost. Since Gather Merge, unlike
519 : * Gather, requires us to block until a tuple is available from every
520 : * worker, we bump the IPC cost up a little bit as compared with Gather.
521 : * For lack of a better idea, charge an extra 5%.
522 : */
523 9760 : startup_cost += parallel_setup_cost;
524 9760 : run_cost += parallel_tuple_cost * path->path.rows * 1.05;
525 :
526 9760 : path->path.startup_cost = startup_cost + input_startup_cost;
527 9760 : path->path.total_cost = (startup_cost + run_cost + input_total_cost);
528 9760 : }
529 :
530 : /*
531 : * cost_index
532 : * Determines and returns the cost of scanning a relation using an index.
533 : *
534 : * 'path' describes the indexscan under consideration, and is complete
535 : * except for the fields to be set by this routine
536 : * 'loop_count' is the number of repetitions of the indexscan to factor into
537 : * estimates of caching behavior
538 : *
539 : * In addition to rows, startup_cost and total_cost, cost_index() sets the
540 : * path's indextotalcost and indexselectivity fields. These values will be
541 : * needed if the IndexPath is used in a BitmapIndexScan.
542 : *
543 : * NOTE: path->indexquals must contain only clauses usable as index
544 : * restrictions. Any additional quals evaluated as qpquals may reduce the
545 : * number of returned tuples, but they won't reduce the number of tuples
546 : * we have to fetch from the table, so they don't reduce the scan cost.
547 : */
548 : void
549 632814 : cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
550 : bool partial_path)
551 : {
552 632814 : IndexOptInfo *index = path->indexinfo;
553 632814 : RelOptInfo *baserel = index->rel;
554 632814 : bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
555 : amcostestimate_function amcostestimate;
556 : List *qpquals;
557 632814 : Cost startup_cost = 0;
558 632814 : Cost run_cost = 0;
559 632814 : Cost cpu_run_cost = 0;
560 : Cost indexStartupCost;
561 : Cost indexTotalCost;
562 : Selectivity indexSelectivity;
563 : double indexCorrelation,
564 : csquared;
565 : double spc_seq_page_cost,
566 : spc_random_page_cost;
567 : Cost min_IO_cost,
568 : max_IO_cost;
569 : QualCost qpqual_cost;
570 : Cost cpu_per_tuple;
571 : double tuples_fetched;
572 : double pages_fetched;
573 : double rand_heap_pages;
574 : double index_pages;
575 :
576 : /* Should only be applied to base relations */
577 : Assert(IsA(baserel, RelOptInfo) &&
578 : IsA(index, IndexOptInfo));
579 : Assert(baserel->relid > 0);
580 : Assert(baserel->rtekind == RTE_RELATION);
581 :
582 : /*
583 : * Mark the path with the correct row estimate, and identify which quals
584 : * will need to be enforced as qpquals. We need not check any quals that
585 : * are implied by the index's predicate, so we can use indrestrictinfo not
586 : * baserestrictinfo as the list of relevant restriction clauses for the
587 : * rel.
588 : */
589 632814 : if (path->path.param_info)
590 : {
591 112892 : path->path.rows = path->path.param_info->ppi_rows;
592 : /* qpquals come from the rel's restriction clauses and ppi_clauses */
593 112892 : qpquals = list_concat(extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
594 : path->indexclauses),
595 112892 : extract_nonindex_conditions(path->path.param_info->ppi_clauses,
596 : path->indexclauses));
597 : }
598 : else
599 : {
600 519922 : path->path.rows = baserel->rows;
601 : /* qpquals come from just the rel's restriction clauses */
602 519922 : qpquals = extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
603 : path->indexclauses);
604 : }
605 :
606 632814 : if (!enable_indexscan)
607 3944 : startup_cost += disable_cost;
608 : /* we don't need to check enable_indexonlyscan; indxpath.c does that */
609 :
610 : /*
611 : * Call index-access-method-specific code to estimate the processing cost
612 : * for scanning the index, as well as the selectivity of the index (ie,
613 : * the fraction of main-table tuples we will have to retrieve) and its
614 : * correlation to the main-table tuple order. We need a cast here because
615 : * pathnodes.h uses a weak function type to avoid including amapi.h.
616 : */
617 632814 : amcostestimate = (amcostestimate_function) index->amcostestimate;
618 632814 : amcostestimate(root, path, loop_count,
619 : &indexStartupCost, &indexTotalCost,
620 : &indexSelectivity, &indexCorrelation,
621 : &index_pages);
622 :
623 : /*
624 : * Save amcostestimate's results for possible use in bitmap scan planning.
625 : * We don't bother to save indexStartupCost or indexCorrelation, because a
626 : * bitmap scan doesn't care about either.
627 : */
628 632814 : path->indextotalcost = indexTotalCost;
629 632814 : path->indexselectivity = indexSelectivity;
630 :
631 : /* all costs for touching index itself included here */
632 632814 : startup_cost += indexStartupCost;
633 632814 : run_cost += indexTotalCost - indexStartupCost;
634 :
635 : /* estimate number of main-table tuples fetched */
636 632814 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
637 :
638 : /* fetch estimated page costs for tablespace containing table */
639 632814 : get_tablespace_page_costs(baserel->reltablespace,
640 : &spc_random_page_cost,
641 : &spc_seq_page_cost);
642 :
643 : /*----------
644 : * Estimate number of main-table pages fetched, and compute I/O cost.
645 : *
646 : * When the index ordering is uncorrelated with the table ordering,
647 : * we use an approximation proposed by Mackert and Lohman (see
648 : * index_pages_fetched() for details) to compute the number of pages
649 : * fetched, and then charge spc_random_page_cost per page fetched.
650 : *
651 : * When the index ordering is exactly correlated with the table ordering
652 : * (just after a CLUSTER, for example), the number of pages fetched should
653 : * be exactly selectivity * table_size. What's more, all but the first
654 : * will be sequential fetches, not the random fetches that occur in the
655 : * uncorrelated case. So if the number of pages is more than 1, we
656 : * ought to charge
657 : * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
658 : * For partially-correlated indexes, we ought to charge somewhere between
659 : * these two estimates. We currently interpolate linearly between the
660 : * estimates based on the correlation squared (XXX is that appropriate?).
661 : *
662 : * If it's an index-only scan, then we will not need to fetch any heap
663 : * pages for which the visibility map shows all tuples are visible.
664 : * Hence, reduce the estimated number of heap fetches accordingly.
665 : * We use the measured fraction of the entire heap that is all-visible,
666 : * which might not be particularly relevant to the subset of the heap
667 : * that this query will fetch; but it's not clear how to do better.
668 : *----------
669 : */
670 632814 : if (loop_count > 1)
671 : {
672 : /*
673 : * For repeated indexscans, the appropriate estimate for the
674 : * uncorrelated case is to scale up the number of tuples fetched in
675 : * the Mackert and Lohman formula by the number of scans, so that we
676 : * estimate the number of pages fetched by all the scans; then
677 : * pro-rate the costs for one scan. In this case we assume all the
678 : * fetches are random accesses.
679 : */
680 65254 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
681 : baserel->pages,
682 65254 : (double) index->pages,
683 : root);
684 :
685 65254 : if (indexonly)
686 8058 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
687 :
688 65254 : rand_heap_pages = pages_fetched;
689 :
690 65254 : max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
691 :
692 : /*
693 : * In the perfectly correlated case, the number of pages touched by
694 : * each scan is selectivity * table_size, and we can use the Mackert
695 : * and Lohman formula at the page level to estimate how much work is
696 : * saved by caching across scans. We still assume all the fetches are
697 : * random, though, which is an overestimate that's hard to correct for
698 : * without double-counting the cache effects. (But in most cases
699 : * where such a plan is actually interesting, only one page would get
700 : * fetched per scan anyway, so it shouldn't matter much.)
701 : */
702 65254 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
703 :
704 65254 : pages_fetched = index_pages_fetched(pages_fetched * loop_count,
705 : baserel->pages,
706 65254 : (double) index->pages,
707 : root);
708 :
709 65254 : if (indexonly)
710 8058 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
711 :
712 65254 : min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
713 : }
714 : else
715 : {
716 : /*
717 : * Normal case: apply the Mackert and Lohman formula, and then
718 : * interpolate between that and the correlation-derived result.
719 : */
720 567560 : pages_fetched = index_pages_fetched(tuples_fetched,
721 : baserel->pages,
722 567560 : (double) index->pages,
723 : root);
724 :
725 567560 : if (indexonly)
726 54870 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
727 :
728 567560 : rand_heap_pages = pages_fetched;
729 :
730 : /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
731 567560 : max_IO_cost = pages_fetched * spc_random_page_cost;
732 :
733 : /* min_IO_cost is for the perfectly correlated case (csquared=1) */
734 567560 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
735 :
736 567560 : if (indexonly)
737 54870 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
738 :
739 567560 : if (pages_fetched > 0)
740 : {
741 516296 : min_IO_cost = spc_random_page_cost;
742 516296 : if (pages_fetched > 1)
743 133970 : min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
744 : }
745 : else
746 51264 : min_IO_cost = 0;
747 : }
748 :
749 632814 : if (partial_path)
750 : {
751 : /*
752 : * For index only scans compute workers based on number of index pages
753 : * fetched; the number of heap pages we fetch might be so small as to
754 : * effectively rule out parallelism, which we don't want to do.
755 : */
756 218744 : if (indexonly)
757 19996 : rand_heap_pages = -1;
758 :
759 : /*
760 : * Estimate the number of parallel workers required to scan index. Use
761 : * the number of heap pages computed considering heap fetches won't be
762 : * sequential as for parallel scans the pages are accessed in random
763 : * order.
764 : */
765 218744 : path->path.parallel_workers = compute_parallel_worker(baserel,
766 : rand_heap_pages,
767 : index_pages,
768 : max_parallel_workers_per_gather);
769 :
770 : /*
771 : * Fall out if workers can't be assigned for parallel scan, because in
772 : * such a case this path will be rejected. So there is no benefit in
773 : * doing extra computation.
774 : */
775 218744 : if (path->path.parallel_workers <= 0)
776 208918 : return;
777 :
778 9826 : path->path.parallel_aware = true;
779 : }
780 :
781 : /*
782 : * Now interpolate based on estimated index order correlation to get total
783 : * disk I/O cost for main table accesses.
784 : */
785 423896 : csquared = indexCorrelation * indexCorrelation;
786 :
787 423896 : run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
788 :
789 : /*
790 : * Estimate CPU costs per tuple.
791 : *
792 : * What we want here is cpu_tuple_cost plus the evaluation costs of any
793 : * qual clauses that we have to evaluate as qpquals.
794 : */
795 423896 : cost_qual_eval(&qpqual_cost, qpquals, root);
796 :
797 423896 : startup_cost += qpqual_cost.startup;
798 423896 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
799 :
800 423896 : cpu_run_cost += cpu_per_tuple * tuples_fetched;
801 :
802 : /* tlist eval costs are paid per output row, not per tuple scanned */
803 423896 : startup_cost += path->path.pathtarget->cost.startup;
804 423896 : cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
805 :
806 : /* Adjust costing for parallelism, if used. */
807 423896 : if (path->path.parallel_workers > 0)
808 : {
809 9826 : double parallel_divisor = get_parallel_divisor(&path->path);
810 :
811 9826 : path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
812 :
813 : /* The CPU cost is divided among all the workers. */
814 9826 : cpu_run_cost /= parallel_divisor;
815 : }
816 :
817 423896 : run_cost += cpu_run_cost;
818 :
819 423896 : path->path.startup_cost = startup_cost;
820 423896 : path->path.total_cost = startup_cost + run_cost;
821 : }
822 :
823 : /*
824 : * extract_nonindex_conditions
825 : *
826 : * Given a list of quals to be enforced in an indexscan, extract the ones that
827 : * will have to be applied as qpquals (ie, the index machinery won't handle
828 : * them). Here we detect only whether a qual clause is directly redundant
829 : * with some indexclause. If the index path is chosen for use, createplan.c
830 : * will try a bit harder to get rid of redundant qual conditions; specifically
831 : * it will see if quals can be proven to be implied by the indexquals. But
832 : * it does not seem worth the cycles to try to factor that in at this stage,
833 : * since we're only trying to estimate qual eval costs. Otherwise this must
834 : * match the logic in create_indexscan_plan().
835 : *
836 : * qual_clauses, and the result, are lists of RestrictInfos.
837 : * indexclauses is a list of IndexClauses.
838 : */
839 : static List *
840 745706 : extract_nonindex_conditions(List *qual_clauses, List *indexclauses)
841 : {
842 745706 : List *result = NIL;
843 : ListCell *lc;
844 :
845 1554650 : foreach(lc, qual_clauses)
846 : {
847 808944 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
848 :
849 808944 : if (rinfo->pseudoconstant)
850 7430 : continue; /* we may drop pseudoconstants here */
851 801514 : if (is_redundant_with_indexclauses(rinfo, indexclauses))
852 477150 : continue; /* dup or derived from same EquivalenceClass */
853 : /* ... skip the predicate proof attempt createplan.c will try ... */
854 324364 : result = lappend(result, rinfo);
855 : }
856 745706 : return result;
857 : }
858 :
859 : /*
860 : * index_pages_fetched
861 : * Estimate the number of pages actually fetched after accounting for
862 : * cache effects.
863 : *
864 : * We use an approximation proposed by Mackert and Lohman, "Index Scans
865 : * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
866 : * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
867 : * The Mackert and Lohman approximation is that the number of pages
868 : * fetched is
869 : * PF =
870 : * min(2TNs/(2T+Ns), T) when T <= b
871 : * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
872 : * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
873 : * where
874 : * T = # pages in table
875 : * N = # tuples in table
876 : * s = selectivity = fraction of table to be scanned
877 : * b = # buffer pages available (we include kernel space here)
878 : *
879 : * We assume that effective_cache_size is the total number of buffer pages
880 : * available for the whole query, and pro-rate that space across all the
881 : * tables in the query and the index currently under consideration. (This
882 : * ignores space needed for other indexes used by the query, but since we
883 : * don't know which indexes will get used, we can't estimate that very well;
884 : * and in any case counting all the tables may well be an overestimate, since
885 : * depending on the join plan not all the tables may be scanned concurrently.)
886 : *
887 : * The product Ns is the number of tuples fetched; we pass in that
888 : * product rather than calculating it here. "pages" is the number of pages
889 : * in the object under consideration (either an index or a table).
890 : * "index_pages" is the amount to add to the total table space, which was
891 : * computed for us by make_one_rel.
892 : *
893 : * Caller is expected to have ensured that tuples_fetched is greater than zero
894 : * and rounded to integer (see clamp_row_est). The result will likewise be
895 : * greater than zero and integral.
896 : */
897 : double
898 875546 : index_pages_fetched(double tuples_fetched, BlockNumber pages,
899 : double index_pages, PlannerInfo *root)
900 : {
901 : double pages_fetched;
902 : double total_pages;
903 : double T,
904 : b;
905 :
906 : /* T is # pages in table, but don't allow it to be zero */
907 875546 : T = (pages > 1) ? (double) pages : 1.0;
908 :
909 : /* Compute number of pages assumed to be competing for cache space */
910 875546 : total_pages = root->total_table_pages + index_pages;
911 875546 : total_pages = Max(total_pages, 1.0);
912 : Assert(T <= total_pages);
913 :
914 : /* b is pro-rated share of effective_cache_size */
915 875546 : b = (double) effective_cache_size * T / total_pages;
916 :
917 : /* force it positive and integral */
918 875546 : if (b <= 1.0)
919 0 : b = 1.0;
920 : else
921 875546 : b = ceil(b);
922 :
923 : /* This part is the Mackert and Lohman formula */
924 875546 : if (T <= b)
925 : {
926 875546 : pages_fetched =
927 875546 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
928 875546 : if (pages_fetched >= T)
929 491010 : pages_fetched = T;
930 : else
931 384536 : pages_fetched = ceil(pages_fetched);
932 : }
933 : else
934 : {
935 : double lim;
936 :
937 0 : lim = (2.0 * T * b) / (2.0 * T - b);
938 0 : if (tuples_fetched <= lim)
939 : {
940 0 : pages_fetched =
941 0 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
942 : }
943 : else
944 : {
945 0 : pages_fetched =
946 0 : b + (tuples_fetched - lim) * (T - b) / T;
947 : }
948 0 : pages_fetched = ceil(pages_fetched);
949 : }
950 875546 : return pages_fetched;
951 : }
952 :
953 : /*
954 : * get_indexpath_pages
955 : * Determine the total size of the indexes used in a bitmap index path.
956 : *
957 : * Note: if the same index is used more than once in a bitmap tree, we will
958 : * count it multiple times, which perhaps is the wrong thing ... but it's
959 : * not completely clear, and detecting duplicates is difficult, so ignore it
960 : * for now.
961 : */
962 : static double
963 147128 : get_indexpath_pages(Path *bitmapqual)
964 : {
965 147128 : double result = 0;
966 : ListCell *l;
967 :
968 147128 : if (IsA(bitmapqual, BitmapAndPath))
969 : {
970 18586 : BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
971 :
972 55758 : foreach(l, apath->bitmapquals)
973 : {
974 37172 : result += get_indexpath_pages((Path *) lfirst(l));
975 : }
976 : }
977 128542 : else if (IsA(bitmapqual, BitmapOrPath))
978 : {
979 66 : BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
980 :
981 198 : foreach(l, opath->bitmapquals)
982 : {
983 132 : result += get_indexpath_pages((Path *) lfirst(l));
984 : }
985 : }
986 128476 : else if (IsA(bitmapqual, IndexPath))
987 : {
988 128476 : IndexPath *ipath = (IndexPath *) bitmapqual;
989 :
990 128476 : result = (double) ipath->indexinfo->pages;
991 : }
992 : else
993 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
994 :
995 147128 : return result;
996 : }
997 :
998 : /*
999 : * cost_bitmap_heap_scan
1000 : * Determines and returns the cost of scanning a relation using a bitmap
1001 : * index-then-heap plan.
1002 : *
1003 : * 'baserel' is the relation to be scanned
1004 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1005 : * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
1006 : * 'loop_count' is the number of repetitions of the indexscan to factor into
1007 : * estimates of caching behavior
1008 : *
1009 : * Note: the component IndexPaths in bitmapqual should have been costed
1010 : * using the same loop_count.
1011 : */
1012 : void
1013 440278 : cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
1014 : ParamPathInfo *param_info,
1015 : Path *bitmapqual, double loop_count)
1016 : {
1017 440278 : Cost startup_cost = 0;
1018 440278 : Cost run_cost = 0;
1019 : Cost indexTotalCost;
1020 : QualCost qpqual_cost;
1021 : Cost cpu_per_tuple;
1022 : Cost cost_per_page;
1023 : Cost cpu_run_cost;
1024 : double tuples_fetched;
1025 : double pages_fetched;
1026 : double spc_seq_page_cost,
1027 : spc_random_page_cost;
1028 : double T;
1029 :
1030 : /* Should only be applied to base relations */
1031 : Assert(IsA(baserel, RelOptInfo));
1032 : Assert(baserel->relid > 0);
1033 : Assert(baserel->rtekind == RTE_RELATION);
1034 :
1035 : /* Mark the path with the correct row estimate */
1036 440278 : if (param_info)
1037 174404 : path->rows = param_info->ppi_rows;
1038 : else
1039 265874 : path->rows = baserel->rows;
1040 :
1041 440278 : if (!enable_bitmapscan)
1042 9522 : startup_cost += disable_cost;
1043 :
1044 440278 : pages_fetched = compute_bitmap_pages(root, baserel, bitmapqual,
1045 : loop_count, &indexTotalCost,
1046 : &tuples_fetched);
1047 :
1048 440278 : startup_cost += indexTotalCost;
1049 440278 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
1050 :
1051 : /* Fetch estimated page costs for tablespace containing table. */
1052 440278 : get_tablespace_page_costs(baserel->reltablespace,
1053 : &spc_random_page_cost,
1054 : &spc_seq_page_cost);
1055 :
1056 : /*
1057 : * For small numbers of pages we should charge spc_random_page_cost
1058 : * apiece, while if nearly all the table's pages are being read, it's more
1059 : * appropriate to charge spc_seq_page_cost apiece. The effect is
1060 : * nonlinear, too. For lack of a better idea, interpolate like this to
1061 : * determine the cost per page.
1062 : */
1063 440278 : if (pages_fetched >= 2.0)
1064 79240 : cost_per_page = spc_random_page_cost -
1065 79240 : (spc_random_page_cost - spc_seq_page_cost)
1066 79240 : * sqrt(pages_fetched / T);
1067 : else
1068 361038 : cost_per_page = spc_random_page_cost;
1069 :
1070 440278 : run_cost += pages_fetched * cost_per_page;
1071 :
1072 : /*
1073 : * Estimate CPU costs per tuple.
1074 : *
1075 : * Often the indexquals don't need to be rechecked at each tuple ... but
1076 : * not always, especially not if there are enough tuples involved that the
1077 : * bitmaps become lossy. For the moment, just assume they will be
1078 : * rechecked always. This means we charge the full freight for all the
1079 : * scan clauses.
1080 : */
1081 440278 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1082 :
1083 440278 : startup_cost += qpqual_cost.startup;
1084 440278 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1085 440278 : cpu_run_cost = cpu_per_tuple * tuples_fetched;
1086 :
1087 : /* Adjust costing for parallelism, if used. */
1088 440278 : if (path->parallel_workers > 0)
1089 : {
1090 4186 : double parallel_divisor = get_parallel_divisor(path);
1091 :
1092 : /* The CPU cost is divided among all the workers. */
1093 4186 : cpu_run_cost /= parallel_divisor;
1094 :
1095 4186 : path->rows = clamp_row_est(path->rows / parallel_divisor);
1096 : }
1097 :
1098 :
1099 440278 : run_cost += cpu_run_cost;
1100 :
1101 : /* tlist eval costs are paid per output row, not per tuple scanned */
1102 440278 : startup_cost += path->pathtarget->cost.startup;
1103 440278 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1104 :
1105 440278 : path->startup_cost = startup_cost;
1106 440278 : path->total_cost = startup_cost + run_cost;
1107 440278 : }
1108 :
1109 : /*
1110 : * cost_bitmap_tree_node
1111 : * Extract cost and selectivity from a bitmap tree node (index/and/or)
1112 : */
1113 : void
1114 826448 : cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
1115 : {
1116 826448 : if (IsA(path, IndexPath))
1117 : {
1118 781634 : *cost = ((IndexPath *) path)->indextotalcost;
1119 781634 : *selec = ((IndexPath *) path)->indexselectivity;
1120 :
1121 : /*
1122 : * Charge a small amount per retrieved tuple to reflect the costs of
1123 : * manipulating the bitmap. This is mostly to make sure that a bitmap
1124 : * scan doesn't look to be the same cost as an indexscan to retrieve a
1125 : * single tuple.
1126 : */
1127 781634 : *cost += 0.1 * cpu_operator_cost * path->rows;
1128 : }
1129 44814 : else if (IsA(path, BitmapAndPath))
1130 : {
1131 41486 : *cost = path->total_cost;
1132 41486 : *selec = ((BitmapAndPath *) path)->bitmapselectivity;
1133 : }
1134 3328 : else if (IsA(path, BitmapOrPath))
1135 : {
1136 3328 : *cost = path->total_cost;
1137 3328 : *selec = ((BitmapOrPath *) path)->bitmapselectivity;
1138 : }
1139 : else
1140 : {
1141 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(path));
1142 : *cost = *selec = 0; /* keep compiler quiet */
1143 : }
1144 826448 : }
1145 :
1146 : /*
1147 : * cost_bitmap_and_node
1148 : * Estimate the cost of a BitmapAnd node
1149 : *
1150 : * Note that this considers only the costs of index scanning and bitmap
1151 : * creation, not the eventual heap access. In that sense the object isn't
1152 : * truly a Path, but it has enough path-like properties (costs in particular)
1153 : * to warrant treating it as one. We don't bother to set the path rows field,
1154 : * however.
1155 : */
1156 : void
1157 41406 : cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
1158 : {
1159 : Cost totalCost;
1160 : Selectivity selec;
1161 : ListCell *l;
1162 :
1163 : /*
1164 : * We estimate AND selectivity on the assumption that the inputs are
1165 : * independent. This is probably often wrong, but we don't have the info
1166 : * to do better.
1167 : *
1168 : * The runtime cost of the BitmapAnd itself is estimated at 100x
1169 : * cpu_operator_cost for each tbm_intersect needed. Probably too small,
1170 : * definitely too simplistic?
1171 : */
1172 41406 : totalCost = 0.0;
1173 41406 : selec = 1.0;
1174 124218 : foreach(l, path->bitmapquals)
1175 : {
1176 82812 : Path *subpath = (Path *) lfirst(l);
1177 : Cost subCost;
1178 : Selectivity subselec;
1179 :
1180 82812 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1181 :
1182 82812 : selec *= subselec;
1183 :
1184 82812 : totalCost += subCost;
1185 82812 : if (l != list_head(path->bitmapquals))
1186 41406 : totalCost += 100.0 * cpu_operator_cost;
1187 : }
1188 41406 : path->bitmapselectivity = selec;
1189 41406 : path->path.rows = 0; /* per above, not used */
1190 41406 : path->path.startup_cost = totalCost;
1191 41406 : path->path.total_cost = totalCost;
1192 41406 : }
1193 :
1194 : /*
1195 : * cost_bitmap_or_node
1196 : * Estimate the cost of a BitmapOr node
1197 : *
1198 : * See comments for cost_bitmap_and_node.
1199 : */
1200 : void
1201 954 : cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
1202 : {
1203 : Cost totalCost;
1204 : Selectivity selec;
1205 : ListCell *l;
1206 :
1207 : /*
1208 : * We estimate OR selectivity on the assumption that the inputs are
1209 : * non-overlapping, since that's often the case in "x IN (list)" type
1210 : * situations. Of course, we clamp to 1.0 at the end.
1211 : *
1212 : * The runtime cost of the BitmapOr itself is estimated at 100x
1213 : * cpu_operator_cost for each tbm_union needed. Probably too small,
1214 : * definitely too simplistic? We are aware that the tbm_unions are
1215 : * optimized out when the inputs are BitmapIndexScans.
1216 : */
1217 954 : totalCost = 0.0;
1218 954 : selec = 0.0;
1219 2922 : foreach(l, path->bitmapquals)
1220 : {
1221 1968 : Path *subpath = (Path *) lfirst(l);
1222 : Cost subCost;
1223 : Selectivity subselec;
1224 :
1225 1968 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1226 :
1227 1968 : selec += subselec;
1228 :
1229 1968 : totalCost += subCost;
1230 1968 : if (l != list_head(path->bitmapquals) &&
1231 1014 : !IsA(subpath, IndexPath))
1232 30 : totalCost += 100.0 * cpu_operator_cost;
1233 : }
1234 954 : path->bitmapselectivity = Min(selec, 1.0);
1235 954 : path->path.rows = 0; /* per above, not used */
1236 954 : path->path.startup_cost = totalCost;
1237 954 : path->path.total_cost = totalCost;
1238 954 : }
1239 :
1240 : /*
1241 : * cost_tidscan
1242 : * Determines and returns the cost of scanning a relation using TIDs.
1243 : *
1244 : * 'baserel' is the relation to be scanned
1245 : * 'tidquals' is the list of TID-checkable quals
1246 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1247 : */
1248 : void
1249 790 : cost_tidscan(Path *path, PlannerInfo *root,
1250 : RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
1251 : {
1252 790 : Cost startup_cost = 0;
1253 790 : Cost run_cost = 0;
1254 : QualCost qpqual_cost;
1255 : Cost cpu_per_tuple;
1256 : QualCost tid_qual_cost;
1257 : double ntuples;
1258 : ListCell *l;
1259 : double spc_random_page_cost;
1260 :
1261 : /* Should only be applied to base relations */
1262 : Assert(baserel->relid > 0);
1263 : Assert(baserel->rtekind == RTE_RELATION);
1264 :
1265 : /* Mark the path with the correct row estimate */
1266 790 : if (param_info)
1267 144 : path->rows = param_info->ppi_rows;
1268 : else
1269 646 : path->rows = baserel->rows;
1270 :
1271 : /* Count how many tuples we expect to retrieve */
1272 790 : ntuples = 0;
1273 1604 : foreach(l, tidquals)
1274 : {
1275 814 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
1276 814 : Expr *qual = rinfo->clause;
1277 :
1278 814 : if (IsA(qual, ScalarArrayOpExpr))
1279 : {
1280 : /* Each element of the array yields 1 tuple */
1281 48 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) qual;
1282 48 : Node *arraynode = (Node *) lsecond(saop->args);
1283 :
1284 48 : ntuples += estimate_array_length(root, arraynode);
1285 : }
1286 766 : else if (IsA(qual, CurrentOfExpr))
1287 : {
1288 : /* CURRENT OF yields 1 tuple */
1289 404 : ntuples++;
1290 : }
1291 : else
1292 : {
1293 : /* It's just CTID = something, count 1 tuple */
1294 362 : ntuples++;
1295 : }
1296 : }
1297 :
1298 : /*
1299 : * The TID qual expressions will be computed once, any other baserestrict
1300 : * quals once per retrieved tuple.
1301 : */
1302 790 : cost_qual_eval(&tid_qual_cost, tidquals, root);
1303 :
1304 : /* fetch estimated page cost for tablespace containing table */
1305 790 : get_tablespace_page_costs(baserel->reltablespace,
1306 : &spc_random_page_cost,
1307 : NULL);
1308 :
1309 : /* disk costs --- assume each tuple on a different page */
1310 790 : run_cost += spc_random_page_cost * ntuples;
1311 :
1312 : /* Add scanning CPU costs */
1313 790 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1314 :
1315 : /* XXX currently we assume TID quals are a subset of qpquals */
1316 790 : startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1317 790 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1318 790 : tid_qual_cost.per_tuple;
1319 790 : run_cost += cpu_per_tuple * ntuples;
1320 :
1321 : /* tlist eval costs are paid per output row, not per tuple scanned */
1322 790 : startup_cost += path->pathtarget->cost.startup;
1323 790 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1324 :
1325 790 : path->startup_cost = startup_cost;
1326 790 : path->total_cost = startup_cost + run_cost;
1327 790 : }
1328 :
1329 : /*
1330 : * cost_tidrangescan
1331 : * Determines and sets the costs of scanning a relation using a range of
1332 : * TIDs for 'path'
1333 : *
1334 : * 'baserel' is the relation to be scanned
1335 : * 'tidrangequals' is the list of TID-checkable range quals
1336 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1337 : */
1338 : void
1339 202 : cost_tidrangescan(Path *path, PlannerInfo *root,
1340 : RelOptInfo *baserel, List *tidrangequals,
1341 : ParamPathInfo *param_info)
1342 : {
1343 : Selectivity selectivity;
1344 : double pages;
1345 202 : Cost startup_cost = 0;
1346 202 : Cost run_cost = 0;
1347 : QualCost qpqual_cost;
1348 : Cost cpu_per_tuple;
1349 : QualCost tid_qual_cost;
1350 : double ntuples;
1351 : double nseqpages;
1352 : double spc_random_page_cost;
1353 : double spc_seq_page_cost;
1354 :
1355 : /* Should only be applied to base relations */
1356 : Assert(baserel->relid > 0);
1357 : Assert(baserel->rtekind == RTE_RELATION);
1358 :
1359 : /* Mark the path with the correct row estimate */
1360 202 : if (param_info)
1361 0 : path->rows = param_info->ppi_rows;
1362 : else
1363 202 : path->rows = baserel->rows;
1364 :
1365 : /* Count how many tuples and pages we expect to scan */
1366 202 : selectivity = clauselist_selectivity(root, tidrangequals, baserel->relid,
1367 : JOIN_INNER, NULL);
1368 202 : pages = ceil(selectivity * baserel->pages);
1369 :
1370 202 : if (pages <= 0.0)
1371 42 : pages = 1.0;
1372 :
1373 : /*
1374 : * The first page in a range requires a random seek, but each subsequent
1375 : * page is just a normal sequential page read. NOTE: it's desirable for
1376 : * TID Range Scans to cost more than the equivalent Sequential Scans,
1377 : * because Seq Scans have some performance advantages such as scan
1378 : * synchronization and parallelizability, and we'd prefer one of them to
1379 : * be picked unless a TID Range Scan really is better.
1380 : */
1381 202 : ntuples = selectivity * baserel->tuples;
1382 202 : nseqpages = pages - 1.0;
1383 :
1384 : /*
1385 : * The TID qual expressions will be computed once, any other baserestrict
1386 : * quals once per retrieved tuple.
1387 : */
1388 202 : cost_qual_eval(&tid_qual_cost, tidrangequals, root);
1389 :
1390 : /* fetch estimated page cost for tablespace containing table */
1391 202 : get_tablespace_page_costs(baserel->reltablespace,
1392 : &spc_random_page_cost,
1393 : &spc_seq_page_cost);
1394 :
1395 : /* disk costs; 1 random page and the remainder as seq pages */
1396 202 : run_cost += spc_random_page_cost + spc_seq_page_cost * nseqpages;
1397 :
1398 : /* Add scanning CPU costs */
1399 202 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1400 :
1401 : /*
1402 : * XXX currently we assume TID quals are a subset of qpquals at this
1403 : * point; they will be removed (if possible) when we create the plan, so
1404 : * we subtract their cost from the total qpqual cost. (If the TID quals
1405 : * can't be removed, this is a mistake and we're going to underestimate
1406 : * the CPU cost a bit.)
1407 : */
1408 202 : startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1409 202 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1410 202 : tid_qual_cost.per_tuple;
1411 202 : run_cost += cpu_per_tuple * ntuples;
1412 :
1413 : /* tlist eval costs are paid per output row, not per tuple scanned */
1414 202 : startup_cost += path->pathtarget->cost.startup;
1415 202 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1416 :
1417 202 : path->startup_cost = startup_cost;
1418 202 : path->total_cost = startup_cost + run_cost;
1419 202 : }
1420 :
1421 : /*
1422 : * cost_subqueryscan
1423 : * Determines and returns the cost of scanning a subquery RTE.
1424 : *
1425 : * 'baserel' is the relation to be scanned
1426 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1427 : * 'trivial_pathtarget' is true if the pathtarget is believed to be trivial.
1428 : */
1429 : void
1430 35496 : cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root,
1431 : RelOptInfo *baserel, ParamPathInfo *param_info,
1432 : bool trivial_pathtarget)
1433 : {
1434 : Cost startup_cost;
1435 : Cost run_cost;
1436 : List *qpquals;
1437 : QualCost qpqual_cost;
1438 : Cost cpu_per_tuple;
1439 :
1440 : /* Should only be applied to base relations that are subqueries */
1441 : Assert(baserel->relid > 0);
1442 : Assert(baserel->rtekind == RTE_SUBQUERY);
1443 :
1444 : /*
1445 : * We compute the rowcount estimate as the subplan's estimate times the
1446 : * selectivity of relevant restriction clauses. In simple cases this will
1447 : * come out the same as baserel->rows; but when dealing with parallelized
1448 : * paths we must do it like this to get the right answer.
1449 : */
1450 35496 : if (param_info)
1451 498 : qpquals = list_concat_copy(param_info->ppi_clauses,
1452 498 : baserel->baserestrictinfo);
1453 : else
1454 34998 : qpquals = baserel->baserestrictinfo;
1455 :
1456 35496 : path->path.rows = clamp_row_est(path->subpath->rows *
1457 35496 : clauselist_selectivity(root,
1458 : qpquals,
1459 : 0,
1460 : JOIN_INNER,
1461 : NULL));
1462 :
1463 : /*
1464 : * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1465 : * any restriction clauses and tlist that will be attached to the
1466 : * SubqueryScan node, plus cpu_tuple_cost to account for selection and
1467 : * projection overhead.
1468 : */
1469 35496 : path->path.startup_cost = path->subpath->startup_cost;
1470 35496 : path->path.total_cost = path->subpath->total_cost;
1471 :
1472 : /*
1473 : * However, if there are no relevant restriction clauses and the
1474 : * pathtarget is trivial, then we expect that setrefs.c will optimize away
1475 : * the SubqueryScan plan node altogether, so we should just make its cost
1476 : * and rowcount equal to the input path's.
1477 : *
1478 : * Note: there are some edge cases where createplan.c will apply a
1479 : * different targetlist to the SubqueryScan node, thus falsifying our
1480 : * current estimate of whether the target is trivial, and making the cost
1481 : * estimate (though not the rowcount) wrong. It does not seem worth the
1482 : * extra complication to try to account for that exactly, especially since
1483 : * that behavior falsifies other cost estimates as well.
1484 : */
1485 35496 : if (qpquals == NIL && trivial_pathtarget)
1486 15710 : return;
1487 :
1488 19786 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1489 :
1490 19786 : startup_cost = qpqual_cost.startup;
1491 19786 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1492 19786 : run_cost = cpu_per_tuple * path->subpath->rows;
1493 :
1494 : /* tlist eval costs are paid per output row, not per tuple scanned */
1495 19786 : startup_cost += path->path.pathtarget->cost.startup;
1496 19786 : run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
1497 :
1498 19786 : path->path.startup_cost += startup_cost;
1499 19786 : path->path.total_cost += startup_cost + run_cost;
1500 : }
1501 :
1502 : /*
1503 : * cost_functionscan
1504 : * Determines and returns the cost of scanning a function RTE.
1505 : *
1506 : * 'baserel' is the relation to be scanned
1507 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1508 : */
1509 : void
1510 39568 : cost_functionscan(Path *path, PlannerInfo *root,
1511 : RelOptInfo *baserel, ParamPathInfo *param_info)
1512 : {
1513 39568 : Cost startup_cost = 0;
1514 39568 : Cost run_cost = 0;
1515 : QualCost qpqual_cost;
1516 : Cost cpu_per_tuple;
1517 : RangeTblEntry *rte;
1518 : QualCost exprcost;
1519 :
1520 : /* Should only be applied to base relations that are functions */
1521 : Assert(baserel->relid > 0);
1522 39568 : rte = planner_rt_fetch(baserel->relid, root);
1523 : Assert(rte->rtekind == RTE_FUNCTION);
1524 :
1525 : /* Mark the path with the correct row estimate */
1526 39568 : if (param_info)
1527 8102 : path->rows = param_info->ppi_rows;
1528 : else
1529 31466 : path->rows = baserel->rows;
1530 :
1531 : /*
1532 : * Estimate costs of executing the function expression(s).
1533 : *
1534 : * Currently, nodeFunctionscan.c always executes the functions to
1535 : * completion before returning any rows, and caches the results in a
1536 : * tuplestore. So the function eval cost is all startup cost, and per-row
1537 : * costs are minimal.
1538 : *
1539 : * XXX in principle we ought to charge tuplestore spill costs if the
1540 : * number of rows is large. However, given how phony our rowcount
1541 : * estimates for functions tend to be, there's not a lot of point in that
1542 : * refinement right now.
1543 : */
1544 39568 : cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1545 :
1546 39568 : startup_cost += exprcost.startup + exprcost.per_tuple;
1547 :
1548 : /* Add scanning CPU costs */
1549 39568 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1550 :
1551 39568 : startup_cost += qpqual_cost.startup;
1552 39568 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1553 39568 : run_cost += cpu_per_tuple * baserel->tuples;
1554 :
1555 : /* tlist eval costs are paid per output row, not per tuple scanned */
1556 39568 : startup_cost += path->pathtarget->cost.startup;
1557 39568 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1558 :
1559 39568 : path->startup_cost = startup_cost;
1560 39568 : path->total_cost = startup_cost + run_cost;
1561 39568 : }
1562 :
1563 : /*
1564 : * cost_tablefuncscan
1565 : * Determines and returns the cost of scanning a table function.
1566 : *
1567 : * 'baserel' is the relation to be scanned
1568 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1569 : */
1570 : void
1571 548 : cost_tablefuncscan(Path *path, PlannerInfo *root,
1572 : RelOptInfo *baserel, ParamPathInfo *param_info)
1573 : {
1574 548 : Cost startup_cost = 0;
1575 548 : Cost run_cost = 0;
1576 : QualCost qpqual_cost;
1577 : Cost cpu_per_tuple;
1578 : RangeTblEntry *rte;
1579 : QualCost exprcost;
1580 :
1581 : /* Should only be applied to base relations that are functions */
1582 : Assert(baserel->relid > 0);
1583 548 : rte = planner_rt_fetch(baserel->relid, root);
1584 : Assert(rte->rtekind == RTE_TABLEFUNC);
1585 :
1586 : /* Mark the path with the correct row estimate */
1587 548 : if (param_info)
1588 234 : path->rows = param_info->ppi_rows;
1589 : else
1590 314 : path->rows = baserel->rows;
1591 :
1592 : /*
1593 : * Estimate costs of executing the table func expression(s).
1594 : *
1595 : * XXX in principle we ought to charge tuplestore spill costs if the
1596 : * number of rows is large. However, given how phony our rowcount
1597 : * estimates for tablefuncs tend to be, there's not a lot of point in that
1598 : * refinement right now.
1599 : */
1600 548 : cost_qual_eval_node(&exprcost, (Node *) rte->tablefunc, root);
1601 :
1602 548 : startup_cost += exprcost.startup + exprcost.per_tuple;
1603 :
1604 : /* Add scanning CPU costs */
1605 548 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1606 :
1607 548 : startup_cost += qpqual_cost.startup;
1608 548 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1609 548 : run_cost += cpu_per_tuple * baserel->tuples;
1610 :
1611 : /* tlist eval costs are paid per output row, not per tuple scanned */
1612 548 : startup_cost += path->pathtarget->cost.startup;
1613 548 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1614 :
1615 548 : path->startup_cost = startup_cost;
1616 548 : path->total_cost = startup_cost + run_cost;
1617 548 : }
1618 :
1619 : /*
1620 : * cost_valuesscan
1621 : * Determines and returns the cost of scanning a VALUES RTE.
1622 : *
1623 : * 'baserel' is the relation to be scanned
1624 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1625 : */
1626 : void
1627 7660 : cost_valuesscan(Path *path, PlannerInfo *root,
1628 : RelOptInfo *baserel, ParamPathInfo *param_info)
1629 : {
1630 7660 : Cost startup_cost = 0;
1631 7660 : Cost run_cost = 0;
1632 : QualCost qpqual_cost;
1633 : Cost cpu_per_tuple;
1634 :
1635 : /* Should only be applied to base relations that are values lists */
1636 : Assert(baserel->relid > 0);
1637 : Assert(baserel->rtekind == RTE_VALUES);
1638 :
1639 : /* Mark the path with the correct row estimate */
1640 7660 : if (param_info)
1641 48 : path->rows = param_info->ppi_rows;
1642 : else
1643 7612 : path->rows = baserel->rows;
1644 :
1645 : /*
1646 : * For now, estimate list evaluation cost at one operator eval per list
1647 : * (probably pretty bogus, but is it worth being smarter?)
1648 : */
1649 7660 : cpu_per_tuple = cpu_operator_cost;
1650 :
1651 : /* Add scanning CPU costs */
1652 7660 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1653 :
1654 7660 : startup_cost += qpqual_cost.startup;
1655 7660 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1656 7660 : run_cost += cpu_per_tuple * baserel->tuples;
1657 :
1658 : /* tlist eval costs are paid per output row, not per tuple scanned */
1659 7660 : startup_cost += path->pathtarget->cost.startup;
1660 7660 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1661 :
1662 7660 : path->startup_cost = startup_cost;
1663 7660 : path->total_cost = startup_cost + run_cost;
1664 7660 : }
1665 :
1666 : /*
1667 : * cost_ctescan
1668 : * Determines and returns the cost of scanning a CTE RTE.
1669 : *
1670 : * Note: this is used for both self-reference and regular CTEs; the
1671 : * possible cost differences are below the threshold of what we could
1672 : * estimate accurately anyway. Note that the costs of evaluating the
1673 : * referenced CTE query are added into the final plan as initplan costs,
1674 : * and should NOT be counted here.
1675 : */
1676 : void
1677 4046 : cost_ctescan(Path *path, PlannerInfo *root,
1678 : RelOptInfo *baserel, ParamPathInfo *param_info)
1679 : {
1680 4046 : Cost startup_cost = 0;
1681 4046 : Cost run_cost = 0;
1682 : QualCost qpqual_cost;
1683 : Cost cpu_per_tuple;
1684 :
1685 : /* Should only be applied to base relations that are CTEs */
1686 : Assert(baserel->relid > 0);
1687 : Assert(baserel->rtekind == RTE_CTE);
1688 :
1689 : /* Mark the path with the correct row estimate */
1690 4046 : if (param_info)
1691 0 : path->rows = param_info->ppi_rows;
1692 : else
1693 4046 : path->rows = baserel->rows;
1694 :
1695 : /* Charge one CPU tuple cost per row for tuplestore manipulation */
1696 4046 : cpu_per_tuple = cpu_tuple_cost;
1697 :
1698 : /* Add scanning CPU costs */
1699 4046 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1700 :
1701 4046 : startup_cost += qpqual_cost.startup;
1702 4046 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1703 4046 : run_cost += cpu_per_tuple * baserel->tuples;
1704 :
1705 : /* tlist eval costs are paid per output row, not per tuple scanned */
1706 4046 : startup_cost += path->pathtarget->cost.startup;
1707 4046 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1708 :
1709 4046 : path->startup_cost = startup_cost;
1710 4046 : path->total_cost = startup_cost + run_cost;
1711 4046 : }
1712 :
1713 : /*
1714 : * cost_namedtuplestorescan
1715 : * Determines and returns the cost of scanning a named tuplestore.
1716 : */
1717 : void
1718 442 : cost_namedtuplestorescan(Path *path, PlannerInfo *root,
1719 : RelOptInfo *baserel, ParamPathInfo *param_info)
1720 : {
1721 442 : Cost startup_cost = 0;
1722 442 : Cost run_cost = 0;
1723 : QualCost qpqual_cost;
1724 : Cost cpu_per_tuple;
1725 :
1726 : /* Should only be applied to base relations that are Tuplestores */
1727 : Assert(baserel->relid > 0);
1728 : Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
1729 :
1730 : /* Mark the path with the correct row estimate */
1731 442 : if (param_info)
1732 0 : path->rows = param_info->ppi_rows;
1733 : else
1734 442 : path->rows = baserel->rows;
1735 :
1736 : /* Charge one CPU tuple cost per row for tuplestore manipulation */
1737 442 : cpu_per_tuple = cpu_tuple_cost;
1738 :
1739 : /* Add scanning CPU costs */
1740 442 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1741 :
1742 442 : startup_cost += qpqual_cost.startup;
1743 442 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1744 442 : run_cost += cpu_per_tuple * baserel->tuples;
1745 :
1746 442 : path->startup_cost = startup_cost;
1747 442 : path->total_cost = startup_cost + run_cost;
1748 442 : }
1749 :
1750 : /*
1751 : * cost_resultscan
1752 : * Determines and returns the cost of scanning an RTE_RESULT relation.
1753 : */
1754 : void
1755 1554 : cost_resultscan(Path *path, PlannerInfo *root,
1756 : RelOptInfo *baserel, ParamPathInfo *param_info)
1757 : {
1758 1554 : Cost startup_cost = 0;
1759 1554 : Cost run_cost = 0;
1760 : QualCost qpqual_cost;
1761 : Cost cpu_per_tuple;
1762 :
1763 : /* Should only be applied to RTE_RESULT base relations */
1764 : Assert(baserel->relid > 0);
1765 : Assert(baserel->rtekind == RTE_RESULT);
1766 :
1767 : /* Mark the path with the correct row estimate */
1768 1554 : if (param_info)
1769 144 : path->rows = param_info->ppi_rows;
1770 : else
1771 1410 : path->rows = baserel->rows;
1772 :
1773 : /* We charge qual cost plus cpu_tuple_cost */
1774 1554 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1775 :
1776 1554 : startup_cost += qpqual_cost.startup;
1777 1554 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1778 1554 : run_cost += cpu_per_tuple * baserel->tuples;
1779 :
1780 1554 : path->startup_cost = startup_cost;
1781 1554 : path->total_cost = startup_cost + run_cost;
1782 1554 : }
1783 :
1784 : /*
1785 : * cost_recursive_union
1786 : * Determines and returns the cost of performing a recursive union,
1787 : * and also the estimated output size.
1788 : *
1789 : * We are given Paths for the nonrecursive and recursive terms.
1790 : */
1791 : void
1792 804 : cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
1793 : {
1794 : Cost startup_cost;
1795 : Cost total_cost;
1796 : double total_rows;
1797 :
1798 : /* We probably have decent estimates for the non-recursive term */
1799 804 : startup_cost = nrterm->startup_cost;
1800 804 : total_cost = nrterm->total_cost;
1801 804 : total_rows = nrterm->rows;
1802 :
1803 : /*
1804 : * We arbitrarily assume that about 10 recursive iterations will be
1805 : * needed, and that we've managed to get a good fix on the cost and output
1806 : * size of each one of them. These are mighty shaky assumptions but it's
1807 : * hard to see how to do better.
1808 : */
1809 804 : total_cost += 10 * rterm->total_cost;
1810 804 : total_rows += 10 * rterm->rows;
1811 :
1812 : /*
1813 : * Also charge cpu_tuple_cost per row to account for the costs of
1814 : * manipulating the tuplestores. (We don't worry about possible
1815 : * spill-to-disk costs.)
1816 : */
1817 804 : total_cost += cpu_tuple_cost * total_rows;
1818 :
1819 804 : runion->startup_cost = startup_cost;
1820 804 : runion->total_cost = total_cost;
1821 804 : runion->rows = total_rows;
1822 804 : runion->pathtarget->width = Max(nrterm->pathtarget->width,
1823 : rterm->pathtarget->width);
1824 804 : }
1825 :
1826 : /*
1827 : * cost_tuplesort
1828 : * Determines and returns the cost of sorting a relation using tuplesort,
1829 : * not including the cost of reading the input data.
1830 : *
1831 : * If the total volume of data to sort is less than sort_mem, we will do
1832 : * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1833 : * comparisons for t tuples.
1834 : *
1835 : * If the total volume exceeds sort_mem, we switch to a tape-style merge
1836 : * algorithm. There will still be about t*log2(t) tuple comparisons in
1837 : * total, but we will also need to write and read each tuple once per
1838 : * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1839 : * number of initial runs formed and M is the merge order used by tuplesort.c.
1840 : * Since the average initial run should be about sort_mem, we have
1841 : * disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
1842 : * cpu = comparison_cost * t * log2(t)
1843 : *
1844 : * If the sort is bounded (i.e., only the first k result tuples are needed)
1845 : * and k tuples can fit into sort_mem, we use a heap method that keeps only
1846 : * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1847 : *
1848 : * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1849 : * accesses (XXX can't we refine that guess?)
1850 : *
1851 : * By default, we charge two operator evals per tuple comparison, which should
1852 : * be in the right ballpark in most cases. The caller can tweak this by
1853 : * specifying nonzero comparison_cost; typically that's used for any extra
1854 : * work that has to be done to prepare the inputs to the comparison operators.
1855 : *
1856 : * 'tuples' is the number of tuples in the relation
1857 : * 'width' is the average tuple width in bytes
1858 : * 'comparison_cost' is the extra cost per comparison, if any
1859 : * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1860 : * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1861 : */
1862 : static void
1863 1273124 : cost_tuplesort(Cost *startup_cost, Cost *run_cost,
1864 : double tuples, int width,
1865 : Cost comparison_cost, int sort_mem,
1866 : double limit_tuples)
1867 : {
1868 1273124 : double input_bytes = relation_byte_size(tuples, width);
1869 : double output_bytes;
1870 : double output_tuples;
1871 1273124 : long sort_mem_bytes = sort_mem * 1024L;
1872 :
1873 : /*
1874 : * We want to be sure the cost of a sort is never estimated as zero, even
1875 : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1876 : */
1877 1273124 : if (tuples < 2.0)
1878 372878 : tuples = 2.0;
1879 :
1880 : /* Include the default cost-per-comparison */
1881 1273124 : comparison_cost += 2.0 * cpu_operator_cost;
1882 :
1883 : /* Do we have a useful LIMIT? */
1884 1273124 : if (limit_tuples > 0 && limit_tuples < tuples)
1885 : {
1886 1714 : output_tuples = limit_tuples;
1887 1714 : output_bytes = relation_byte_size(output_tuples, width);
1888 : }
1889 : else
1890 : {
1891 1271410 : output_tuples = tuples;
1892 1271410 : output_bytes = input_bytes;
1893 : }
1894 :
1895 1273124 : if (output_bytes > sort_mem_bytes)
1896 : {
1897 : /*
1898 : * We'll have to use a disk-based sort of all the tuples
1899 : */
1900 17000 : double npages = ceil(input_bytes / BLCKSZ);
1901 17000 : double nruns = input_bytes / sort_mem_bytes;
1902 17000 : double mergeorder = tuplesort_merge_order(sort_mem_bytes);
1903 : double log_runs;
1904 : double npageaccesses;
1905 :
1906 : /*
1907 : * CPU costs
1908 : *
1909 : * Assume about N log2 N comparisons
1910 : */
1911 17000 : *startup_cost = comparison_cost * tuples * LOG2(tuples);
1912 :
1913 : /* Disk costs */
1914 :
1915 : /* Compute logM(r) as log(r) / log(M) */
1916 17000 : if (nruns > mergeorder)
1917 4586 : log_runs = ceil(log(nruns) / log(mergeorder));
1918 : else
1919 12414 : log_runs = 1.0;
1920 17000 : npageaccesses = 2.0 * npages * log_runs;
1921 : /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1922 17000 : *startup_cost += npageaccesses *
1923 17000 : (seq_page_cost * 0.75 + random_page_cost * 0.25);
1924 : }
1925 1256124 : else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1926 : {
1927 : /*
1928 : * We'll use a bounded heap-sort keeping just K tuples in memory, for
1929 : * a total number of tuple comparisons of N log2 K; but the constant
1930 : * factor is a bit higher than for quicksort. Tweak it so that the
1931 : * cost curve is continuous at the crossover point.
1932 : */
1933 1316 : *startup_cost = comparison_cost * tuples * LOG2(2.0 * output_tuples);
1934 : }
1935 : else
1936 : {
1937 : /* We'll use plain quicksort on all the input tuples */
1938 1254808 : *startup_cost = comparison_cost * tuples * LOG2(tuples);
1939 : }
1940 :
1941 : /*
1942 : * Also charge a small amount (arbitrarily set equal to operator cost) per
1943 : * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
1944 : * doesn't do qual-checking or projection, so it has less overhead than
1945 : * most plan nodes. Note it's correct to use tuples not output_tuples
1946 : * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1947 : * counting the LIMIT otherwise.
1948 : */
1949 1273124 : *run_cost = cpu_operator_cost * tuples;
1950 1273124 : }
1951 :
1952 : /*
1953 : * cost_incremental_sort
1954 : * Determines and returns the cost of sorting a relation incrementally, when
1955 : * the input path is presorted by a prefix of the pathkeys.
1956 : *
1957 : * 'presorted_keys' is the number of leading pathkeys by which the input path
1958 : * is sorted.
1959 : *
1960 : * We estimate the number of groups into which the relation is divided by the
1961 : * leading pathkeys, and then calculate the cost of sorting a single group
1962 : * with tuplesort using cost_tuplesort().
1963 : */
1964 : void
1965 7074 : cost_incremental_sort(Path *path,
1966 : PlannerInfo *root, List *pathkeys, int presorted_keys,
1967 : Cost input_startup_cost, Cost input_total_cost,
1968 : double input_tuples, int width, Cost comparison_cost, int sort_mem,
1969 : double limit_tuples)
1970 : {
1971 : Cost startup_cost,
1972 : run_cost,
1973 7074 : input_run_cost = input_total_cost - input_startup_cost;
1974 : double group_tuples,
1975 : input_groups;
1976 : Cost group_startup_cost,
1977 : group_run_cost,
1978 : group_input_run_cost;
1979 7074 : List *presortedExprs = NIL;
1980 : ListCell *l;
1981 7074 : bool unknown_varno = false;
1982 :
1983 : Assert(presorted_keys > 0 && presorted_keys < list_length(pathkeys));
1984 :
1985 : /*
1986 : * We want to be sure the cost of a sort is never estimated as zero, even
1987 : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1988 : */
1989 7074 : if (input_tuples < 2.0)
1990 4418 : input_tuples = 2.0;
1991 :
1992 : /* Default estimate of number of groups, capped to one group per row. */
1993 7074 : input_groups = Min(input_tuples, DEFAULT_NUM_DISTINCT);
1994 :
1995 : /*
1996 : * Extract presorted keys as list of expressions.
1997 : *
1998 : * We need to be careful about Vars containing "varno 0" which might have
1999 : * been introduced by generate_append_tlist, which would confuse
2000 : * estimate_num_groups (in fact it'd fail for such expressions). See
2001 : * recurse_set_operations which has to deal with the same issue.
2002 : *
2003 : * Unlike recurse_set_operations we can't access the original target list
2004 : * here, and even if we could it's not very clear how useful would that be
2005 : * for a set operation combining multiple tables. So we simply detect if
2006 : * there are any expressions with "varno 0" and use the default
2007 : * DEFAULT_NUM_DISTINCT in that case.
2008 : *
2009 : * We might also use either 1.0 (a single group) or input_tuples (each row
2010 : * being a separate group), pretty much the worst and best case for
2011 : * incremental sort. But those are extreme cases and using something in
2012 : * between seems reasonable. Furthermore, generate_append_tlist is used
2013 : * for set operations, which are likely to produce mostly unique output
2014 : * anyway - from that standpoint the DEFAULT_NUM_DISTINCT is defensive
2015 : * while maintaining lower startup cost.
2016 : */
2017 7212 : foreach(l, pathkeys)
2018 : {
2019 7212 : PathKey *key = (PathKey *) lfirst(l);
2020 7212 : EquivalenceMember *member = (EquivalenceMember *)
2021 7212 : linitial(key->pk_eclass->ec_members);
2022 :
2023 : /*
2024 : * Check if the expression contains Var with "varno 0" so that we
2025 : * don't call estimate_num_groups in that case.
2026 : */
2027 7212 : if (bms_is_member(0, pull_varnos(root, (Node *) member->em_expr)))
2028 : {
2029 10 : unknown_varno = true;
2030 10 : break;
2031 : }
2032 :
2033 : /* expression not containing any Vars with "varno 0" */
2034 7202 : presortedExprs = lappend(presortedExprs, member->em_expr);
2035 :
2036 7202 : if (foreach_current_index(l) + 1 >= presorted_keys)
2037 7064 : break;
2038 : }
2039 :
2040 : /* Estimate the number of groups with equal presorted keys. */
2041 7074 : if (!unknown_varno)
2042 7064 : input_groups = estimate_num_groups(root, presortedExprs, input_tuples,
2043 : NULL, NULL);
2044 :
2045 7074 : group_tuples = input_tuples / input_groups;
2046 7074 : group_input_run_cost = input_run_cost / input_groups;
2047 :
2048 : /*
2049 : * Estimate the average cost of sorting of one group where presorted keys
2050 : * are equal.
2051 : */
2052 7074 : cost_tuplesort(&group_startup_cost, &group_run_cost,
2053 : group_tuples, width, comparison_cost, sort_mem,
2054 : limit_tuples);
2055 :
2056 : /*
2057 : * Startup cost of incremental sort is the startup cost of its first group
2058 : * plus the cost of its input.
2059 : */
2060 7074 : startup_cost = group_startup_cost + input_startup_cost +
2061 : group_input_run_cost;
2062 :
2063 : /*
2064 : * After we started producing tuples from the first group, the cost of
2065 : * producing all the tuples is given by the cost to finish processing this
2066 : * group, plus the total cost to process the remaining groups, plus the
2067 : * remaining cost of input.
2068 : */
2069 7074 : run_cost = group_run_cost + (group_run_cost + group_startup_cost) *
2070 7074 : (input_groups - 1) + group_input_run_cost * (input_groups - 1);
2071 :
2072 : /*
2073 : * Incremental sort adds some overhead by itself. Firstly, it has to
2074 : * detect the sort groups. This is roughly equal to one extra copy and
2075 : * comparison per tuple.
2076 : */
2077 7074 : run_cost += (cpu_tuple_cost + comparison_cost) * input_tuples;
2078 :
2079 : /*
2080 : * Additionally, we charge double cpu_tuple_cost for each input group to
2081 : * account for the tuplesort_reset that's performed after each group.
2082 : */
2083 7074 : run_cost += 2.0 * cpu_tuple_cost * input_groups;
2084 :
2085 7074 : path->rows = input_tuples;
2086 7074 : path->startup_cost = startup_cost;
2087 7074 : path->total_cost = startup_cost + run_cost;
2088 7074 : }
2089 :
2090 : /*
2091 : * cost_sort
2092 : * Determines and returns the cost of sorting a relation, including
2093 : * the cost of reading the input data.
2094 : *
2095 : * NOTE: some callers currently pass NIL for pathkeys because they
2096 : * can't conveniently supply the sort keys. Since this routine doesn't
2097 : * currently do anything with pathkeys anyway, that doesn't matter...
2098 : * but if it ever does, it should react gracefully to lack of key data.
2099 : * (Actually, the thing we'd most likely be interested in is just the number
2100 : * of sort keys, which all callers *could* supply.)
2101 : */
2102 : void
2103 1266050 : cost_sort(Path *path, PlannerInfo *root,
2104 : List *pathkeys, Cost input_cost, double tuples, int width,
2105 : Cost comparison_cost, int sort_mem,
2106 : double limit_tuples)
2107 :
2108 : {
2109 : Cost startup_cost;
2110 : Cost run_cost;
2111 :
2112 1266050 : cost_tuplesort(&startup_cost, &run_cost,
2113 : tuples, width,
2114 : comparison_cost, sort_mem,
2115 : limit_tuples);
2116 :
2117 1266050 : if (!enable_sort)
2118 1294 : startup_cost += disable_cost;
2119 :
2120 1266050 : startup_cost += input_cost;
2121 :
2122 1266050 : path->rows = tuples;
2123 1266050 : path->startup_cost = startup_cost;
2124 1266050 : path->total_cost = startup_cost + run_cost;
2125 1266050 : }
2126 :
2127 : /*
2128 : * append_nonpartial_cost
2129 : * Estimate the cost of the non-partial paths in a Parallel Append.
2130 : * The non-partial paths are assumed to be the first "numpaths" paths
2131 : * from the subpaths list, and to be in order of decreasing cost.
2132 : */
2133 : static Cost
2134 15592 : append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
2135 : {
2136 : Cost *costarr;
2137 : int arrlen;
2138 : ListCell *l;
2139 : ListCell *cell;
2140 : int path_index;
2141 : int min_index;
2142 : int max_index;
2143 :
2144 15592 : if (numpaths == 0)
2145 14120 : return 0;
2146 :
2147 : /*
2148 : * Array length is number of workers or number of relevant paths,
2149 : * whichever is less.
2150 : */
2151 1472 : arrlen = Min(parallel_workers, numpaths);
2152 1472 : costarr = (Cost *) palloc(sizeof(Cost) * arrlen);
2153 :
2154 : /* The first few paths will each be claimed by a different worker. */
2155 1472 : path_index = 0;
2156 3960 : foreach(cell, subpaths)
2157 : {
2158 3314 : Path *subpath = (Path *) lfirst(cell);
2159 :
2160 3314 : if (path_index == arrlen)
2161 826 : break;
2162 2488 : costarr[path_index++] = subpath->total_cost;
2163 : }
2164 :
2165 : /*
2166 : * Since subpaths are sorted by decreasing cost, the last one will have
2167 : * the minimum cost.
2168 : */
2169 1472 : min_index = arrlen - 1;
2170 :
2171 : /*
2172 : * For each of the remaining subpaths, add its cost to the array element
2173 : * with minimum cost.
2174 : */
2175 1954 : for_each_cell(l, subpaths, cell)
2176 : {
2177 1010 : Path *subpath = (Path *) lfirst(l);
2178 :
2179 : /* Consider only the non-partial paths */
2180 1010 : if (path_index++ == numpaths)
2181 528 : break;
2182 :
2183 482 : costarr[min_index] += subpath->total_cost;
2184 :
2185 : /* Update the new min cost array index */
2186 482 : min_index = 0;
2187 1482 : for (int i = 0; i < arrlen; i++)
2188 : {
2189 1000 : if (costarr[i] < costarr[min_index])
2190 202 : min_index = i;
2191 : }
2192 : }
2193 :
2194 : /* Return the highest cost from the array */
2195 1472 : max_index = 0;
2196 3960 : for (int i = 0; i < arrlen; i++)
2197 : {
2198 2488 : if (costarr[i] > costarr[max_index])
2199 182 : max_index = i;
2200 : }
2201 :
2202 1472 : return costarr[max_index];
2203 : }
2204 :
2205 : /*
2206 : * cost_append
2207 : * Determines and returns the cost of an Append node.
2208 : */
2209 : void
2210 46478 : cost_append(AppendPath *apath)
2211 : {
2212 : ListCell *l;
2213 :
2214 46478 : apath->path.startup_cost = 0;
2215 46478 : apath->path.total_cost = 0;
2216 46478 : apath->path.rows = 0;
2217 :
2218 46478 : if (apath->subpaths == NIL)
2219 1500 : return;
2220 :
2221 44978 : if (!apath->path.parallel_aware)
2222 : {
2223 29386 : List *pathkeys = apath->path.pathkeys;
2224 :
2225 29386 : if (pathkeys == NIL)
2226 : {
2227 27386 : Path *firstsubpath = (Path *) linitial(apath->subpaths);
2228 :
2229 : /*
2230 : * For an unordered, non-parallel-aware Append we take the startup
2231 : * cost as the startup cost of the first subpath.
2232 : */
2233 27386 : apath->path.startup_cost = firstsubpath->startup_cost;
2234 :
2235 : /* Compute rows and costs as sums of subplan rows and costs. */
2236 108678 : foreach(l, apath->subpaths)
2237 : {
2238 81292 : Path *subpath = (Path *) lfirst(l);
2239 :
2240 81292 : apath->path.rows += subpath->rows;
2241 81292 : apath->path.total_cost += subpath->total_cost;
2242 : }
2243 : }
2244 : else
2245 : {
2246 : /*
2247 : * For an ordered, non-parallel-aware Append we take the startup
2248 : * cost as the sum of the subpath startup costs. This ensures
2249 : * that we don't underestimate the startup cost when a query's
2250 : * LIMIT is such that several of the children have to be run to
2251 : * satisfy it. This might be overkill --- another plausible hack
2252 : * would be to take the Append's startup cost as the maximum of
2253 : * the child startup costs. But we don't want to risk believing
2254 : * that an ORDER BY LIMIT query can be satisfied at small cost
2255 : * when the first child has small startup cost but later ones
2256 : * don't. (If we had the ability to deal with nonlinear cost
2257 : * interpolation for partial retrievals, we would not need to be
2258 : * so conservative about this.)
2259 : *
2260 : * This case is also different from the above in that we have to
2261 : * account for possibly injecting sorts into subpaths that aren't
2262 : * natively ordered.
2263 : */
2264 7812 : foreach(l, apath->subpaths)
2265 : {
2266 5812 : Path *subpath = (Path *) lfirst(l);
2267 : Path sort_path; /* dummy for result of cost_sort */
2268 :
2269 5812 : if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
2270 : {
2271 : /*
2272 : * We'll need to insert a Sort node, so include costs for
2273 : * that. We can use the parent's LIMIT if any, since we
2274 : * certainly won't pull more than that many tuples from
2275 : * any child.
2276 : */
2277 44 : cost_sort(&sort_path,
2278 : NULL, /* doesn't currently need root */
2279 : pathkeys,
2280 : subpath->total_cost,
2281 : subpath->rows,
2282 44 : subpath->pathtarget->width,
2283 : 0.0,
2284 : work_mem,
2285 : apath->limit_tuples);
2286 44 : subpath = &sort_path;
2287 : }
2288 :
2289 5812 : apath->path.rows += subpath->rows;
2290 5812 : apath->path.startup_cost += subpath->startup_cost;
2291 5812 : apath->path.total_cost += subpath->total_cost;
2292 : }
2293 : }
2294 : }
2295 : else /* parallel-aware */
2296 : {
2297 15592 : int i = 0;
2298 15592 : double parallel_divisor = get_parallel_divisor(&apath->path);
2299 :
2300 : /* Parallel-aware Append never produces ordered output. */
2301 : Assert(apath->path.pathkeys == NIL);
2302 :
2303 : /* Calculate startup cost. */
2304 63780 : foreach(l, apath->subpaths)
2305 : {
2306 48188 : Path *subpath = (Path *) lfirst(l);
2307 :
2308 : /*
2309 : * Append will start returning tuples when the child node having
2310 : * lowest startup cost is done setting up. We consider only the
2311 : * first few subplans that immediately get a worker assigned.
2312 : */
2313 48188 : if (i == 0)
2314 15592 : apath->path.startup_cost = subpath->startup_cost;
2315 32596 : else if (i < apath->path.parallel_workers)
2316 15058 : apath->path.startup_cost = Min(apath->path.startup_cost,
2317 : subpath->startup_cost);
2318 :
2319 : /*
2320 : * Apply parallel divisor to subpaths. Scale the number of rows
2321 : * for each partial subpath based on the ratio of the parallel
2322 : * divisor originally used for the subpath to the one we adopted.
2323 : * Also add the cost of partial paths to the total cost, but
2324 : * ignore non-partial paths for now.
2325 : */
2326 48188 : if (i < apath->first_partial_path)
2327 2970 : apath->path.rows += subpath->rows / parallel_divisor;
2328 : else
2329 : {
2330 : double subpath_parallel_divisor;
2331 :
2332 45218 : subpath_parallel_divisor = get_parallel_divisor(subpath);
2333 45218 : apath->path.rows += subpath->rows * (subpath_parallel_divisor /
2334 : parallel_divisor);
2335 45218 : apath->path.total_cost += subpath->total_cost;
2336 : }
2337 :
2338 48188 : apath->path.rows = clamp_row_est(apath->path.rows);
2339 :
2340 48188 : i++;
2341 : }
2342 :
2343 : /* Add cost for non-partial subpaths. */
2344 15592 : apath->path.total_cost +=
2345 15592 : append_nonpartial_cost(apath->subpaths,
2346 : apath->first_partial_path,
2347 : apath->path.parallel_workers);
2348 : }
2349 :
2350 : /*
2351 : * Although Append does not do any selection or projection, it's not free;
2352 : * add a small per-tuple overhead.
2353 : */
2354 44978 : apath->path.total_cost +=
2355 44978 : cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * apath->path.rows;
2356 : }
2357 :
2358 : /*
2359 : * cost_merge_append
2360 : * Determines and returns the cost of a MergeAppend node.
2361 : *
2362 : * MergeAppend merges several pre-sorted input streams, using a heap that
2363 : * at any given instant holds the next tuple from each stream. If there
2364 : * are N streams, we need about N*log2(N) tuple comparisons to construct
2365 : * the heap at startup, and then for each output tuple, about log2(N)
2366 : * comparisons to replace the top entry.
2367 : *
2368 : * (The effective value of N will drop once some of the input streams are
2369 : * exhausted, but it seems unlikely to be worth trying to account for that.)
2370 : *
2371 : * The heap is never spilled to disk, since we assume N is not very large.
2372 : * So this is much simpler than cost_sort.
2373 : *
2374 : * As in cost_sort, we charge two operator evals per tuple comparison.
2375 : *
2376 : * 'pathkeys' is a list of sort keys
2377 : * 'n_streams' is the number of input streams
2378 : * 'input_startup_cost' is the sum of the input streams' startup costs
2379 : * 'input_total_cost' is the sum of the input streams' total costs
2380 : * 'tuples' is the number of tuples in all the streams
2381 : */
2382 : void
2383 3800 : cost_merge_append(Path *path, PlannerInfo *root,
2384 : List *pathkeys, int n_streams,
2385 : Cost input_startup_cost, Cost input_total_cost,
2386 : double tuples)
2387 : {
2388 3800 : Cost startup_cost = 0;
2389 3800 : Cost run_cost = 0;
2390 : Cost comparison_cost;
2391 : double N;
2392 : double logN;
2393 :
2394 : /*
2395 : * Avoid log(0)...
2396 : */
2397 3800 : N = (n_streams < 2) ? 2.0 : (double) n_streams;
2398 3800 : logN = LOG2(N);
2399 :
2400 : /* Assumed cost per tuple comparison */
2401 3800 : comparison_cost = 2.0 * cpu_operator_cost;
2402 :
2403 : /* Heap creation cost */
2404 3800 : startup_cost += comparison_cost * N * logN;
2405 :
2406 : /* Per-tuple heap maintenance cost */
2407 3800 : run_cost += tuples * comparison_cost * logN;
2408 :
2409 : /*
2410 : * Although MergeAppend does not do any selection or projection, it's not
2411 : * free; add a small per-tuple overhead.
2412 : */
2413 3800 : run_cost += cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * tuples;
2414 :
2415 3800 : path->startup_cost = startup_cost + input_startup_cost;
2416 3800 : path->total_cost = startup_cost + run_cost + input_total_cost;
2417 3800 : }
2418 :
2419 : /*
2420 : * cost_material
2421 : * Determines and returns the cost of materializing a relation, including
2422 : * the cost of reading the input data.
2423 : *
2424 : * If the total volume of data to materialize exceeds work_mem, we will need
2425 : * to write it to disk, so the cost is much higher in that case.
2426 : *
2427 : * Note that here we are estimating the costs for the first scan of the
2428 : * relation, so the materialization is all overhead --- any savings will
2429 : * occur only on rescan, which is estimated in cost_rescan.
2430 : */
2431 : void
2432 413156 : cost_material(Path *path,
2433 : Cost input_startup_cost, Cost input_total_cost,
2434 : double tuples, int width)
2435 : {
2436 413156 : Cost startup_cost = input_startup_cost;
2437 413156 : Cost run_cost = input_total_cost - input_startup_cost;
2438 413156 : double nbytes = relation_byte_size(tuples, width);
2439 413156 : long work_mem_bytes = work_mem * 1024L;
2440 :
2441 413156 : path->rows = tuples;
2442 :
2443 : /*
2444 : * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
2445 : * reflect bookkeeping overhead. (This rate must be more than what
2446 : * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
2447 : * if it is exactly the same then there will be a cost tie between
2448 : * nestloop with A outer, materialized B inner and nestloop with B outer,
2449 : * materialized A inner. The extra cost ensures we'll prefer
2450 : * materializing the smaller rel.) Note that this is normally a good deal
2451 : * less than cpu_tuple_cost; which is OK because a Material plan node
2452 : * doesn't do qual-checking or projection, so it's got less overhead than
2453 : * most plan nodes.
2454 : */
2455 413156 : run_cost += 2 * cpu_operator_cost * tuples;
2456 :
2457 : /*
2458 : * If we will spill to disk, charge at the rate of seq_page_cost per page.
2459 : * This cost is assumed to be evenly spread through the plan run phase,
2460 : * which isn't exactly accurate but our cost model doesn't allow for
2461 : * nonuniform costs within the run phase.
2462 : */
2463 413156 : if (nbytes > work_mem_bytes)
2464 : {
2465 4596 : double npages = ceil(nbytes / BLCKSZ);
2466 :
2467 4596 : run_cost += seq_page_cost * npages;
2468 : }
2469 :
2470 413156 : path->startup_cost = startup_cost;
2471 413156 : path->total_cost = startup_cost + run_cost;
2472 413156 : }
2473 :
2474 : /*
2475 : * cost_memoize_rescan
2476 : * Determines the estimated cost of rescanning a Memoize node.
2477 : *
2478 : * In order to estimate this, we must gain knowledge of how often we expect to
2479 : * be called and how many distinct sets of parameters we are likely to be
2480 : * called with. If we expect a good cache hit ratio, then we can set our
2481 : * costs to account for that hit ratio, plus a little bit of cost for the
2482 : * caching itself. Caching will not work out well if we expect to be called
2483 : * with too many distinct parameter values. The worst-case here is that we
2484 : * never see any parameter value twice, in which case we'd never get a cache
2485 : * hit and caching would be a complete waste of effort.
2486 : */
2487 : static void
2488 221440 : cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
2489 : Cost *rescan_startup_cost, Cost *rescan_total_cost)
2490 : {
2491 : EstimationInfo estinfo;
2492 : ListCell *lc;
2493 221440 : Cost input_startup_cost = mpath->subpath->startup_cost;
2494 221440 : Cost input_total_cost = mpath->subpath->total_cost;
2495 221440 : double tuples = mpath->subpath->rows;
2496 221440 : double calls = mpath->calls;
2497 221440 : int width = mpath->subpath->pathtarget->width;
2498 :
2499 : double hash_mem_bytes;
2500 : double est_entry_bytes;
2501 : double est_cache_entries;
2502 : double ndistinct;
2503 : double evict_ratio;
2504 : double hit_ratio;
2505 : Cost startup_cost;
2506 : Cost total_cost;
2507 :
2508 : /* available cache space */
2509 221440 : hash_mem_bytes = get_hash_memory_limit();
2510 :
2511 : /*
2512 : * Set the number of bytes each cache entry should consume in the cache.
2513 : * To provide us with better estimations on how many cache entries we can
2514 : * store at once, we make a call to the executor here to ask it what
2515 : * memory overheads there are for a single cache entry.
2516 : */
2517 221440 : est_entry_bytes = relation_byte_size(tuples, width) +
2518 221440 : ExecEstimateCacheEntryOverheadBytes(tuples);
2519 :
2520 : /* include the estimated width for the cache keys */
2521 464572 : foreach(lc, mpath->param_exprs)
2522 243132 : est_entry_bytes += get_expr_width(root, (Node *) lfirst(lc));
2523 :
2524 : /* estimate on the upper limit of cache entries we can hold at once */
2525 221440 : est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
2526 :
2527 : /* estimate on the distinct number of parameter values */
2528 221440 : ndistinct = estimate_num_groups(root, mpath->param_exprs, calls, NULL,
2529 : &estinfo);
2530 :
2531 : /*
2532 : * When the estimation fell back on using a default value, it's a bit too
2533 : * risky to assume that it's ok to use a Memoize node. The use of a
2534 : * default could cause us to use a Memoize node when it's really
2535 : * inappropriate to do so. If we see that this has been done, then we'll
2536 : * assume that every call will have unique parameters, which will almost
2537 : * certainly mean a MemoizePath will never survive add_path().
2538 : */
2539 221440 : if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
2540 10058 : ndistinct = calls;
2541 :
2542 : /*
2543 : * Since we've already estimated the maximum number of entries we can
2544 : * store at once and know the estimated number of distinct values we'll be
2545 : * called with, we'll take this opportunity to set the path's est_entries.
2546 : * This will ultimately determine the hash table size that the executor
2547 : * will use. If we leave this at zero, the executor will just choose the
2548 : * size itself. Really this is not the right place to do this, but it's
2549 : * convenient since everything is already calculated.
2550 : */
2551 221440 : mpath->est_entries = Min(Min(ndistinct, est_cache_entries),
2552 : PG_UINT32_MAX);
2553 :
2554 : /*
2555 : * When the number of distinct parameter values is above the amount we can
2556 : * store in the cache, then we'll have to evict some entries from the
2557 : * cache. This is not free. Here we estimate how often we'll incur the
2558 : * cost of that eviction.
2559 : */
2560 221440 : evict_ratio = 1.0 - Min(est_cache_entries, ndistinct) / ndistinct;
2561 :
2562 : /*
2563 : * In order to estimate how costly a single scan will be, we need to
2564 : * attempt to estimate what the cache hit ratio will be. To do that we
2565 : * must look at how many scans are estimated in total for this node and
2566 : * how many of those scans we expect to get a cache hit.
2567 : */
2568 442880 : hit_ratio = ((calls - ndistinct) / calls) *
2569 221440 : (est_cache_entries / Max(ndistinct, est_cache_entries));
2570 :
2571 : Assert(hit_ratio >= 0 && hit_ratio <= 1.0);
2572 :
2573 : /*
2574 : * Set the total_cost accounting for the expected cache hit ratio. We
2575 : * also add on a cpu_operator_cost to account for a cache lookup. This
2576 : * will happen regardless of whether it's a cache hit or not.
2577 : */
2578 221440 : total_cost = input_total_cost * (1.0 - hit_ratio) + cpu_operator_cost;
2579 :
2580 : /* Now adjust the total cost to account for cache evictions */
2581 :
2582 : /* Charge a cpu_tuple_cost for evicting the actual cache entry */
2583 221440 : total_cost += cpu_tuple_cost * evict_ratio;
2584 :
2585 : /*
2586 : * Charge a 10th of cpu_operator_cost to evict every tuple in that entry.
2587 : * The per-tuple eviction is really just a pfree, so charging a whole
2588 : * cpu_operator_cost seems a little excessive.
2589 : */
2590 221440 : total_cost += cpu_operator_cost / 10.0 * evict_ratio * tuples;
2591 :
2592 : /*
2593 : * Now adjust for storing things in the cache, since that's not free
2594 : * either. Everything must go in the cache. We don't proportion this
2595 : * over any ratio, just apply it once for the scan. We charge a
2596 : * cpu_tuple_cost for the creation of the cache entry and also a
2597 : * cpu_operator_cost for each tuple we expect to cache.
2598 : */
2599 221440 : total_cost += cpu_tuple_cost + cpu_operator_cost * tuples;
2600 :
2601 : /*
2602 : * Getting the first row must be also be proportioned according to the
2603 : * expected cache hit ratio.
2604 : */
2605 221440 : startup_cost = input_startup_cost * (1.0 - hit_ratio);
2606 :
2607 : /*
2608 : * Additionally we charge a cpu_tuple_cost to account for cache lookups,
2609 : * which we'll do regardless of whether it was a cache hit or not.
2610 : */
2611 221440 : startup_cost += cpu_tuple_cost;
2612 :
2613 221440 : *rescan_startup_cost = startup_cost;
2614 221440 : *rescan_total_cost = total_cost;
2615 221440 : }
2616 :
2617 : /*
2618 : * cost_agg
2619 : * Determines and returns the cost of performing an Agg plan node,
2620 : * including the cost of its input.
2621 : *
2622 : * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
2623 : * we are using a hashed Agg node just to do grouping).
2624 : *
2625 : * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
2626 : * are for appropriately-sorted input.
2627 : */
2628 : void
2629 58980 : cost_agg(Path *path, PlannerInfo *root,
2630 : AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
2631 : int numGroupCols, double numGroups,
2632 : List *quals,
2633 : Cost input_startup_cost, Cost input_total_cost,
2634 : double input_tuples, double input_width)
2635 : {
2636 : double output_tuples;
2637 : Cost startup_cost;
2638 : Cost total_cost;
2639 : AggClauseCosts dummy_aggcosts;
2640 :
2641 : /* Use all-zero per-aggregate costs if NULL is passed */
2642 58980 : if (aggcosts == NULL)
2643 : {
2644 : Assert(aggstrategy == AGG_HASHED);
2645 54000 : MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
2646 9000 : aggcosts = &dummy_aggcosts;
2647 : }
2648 :
2649 : /*
2650 : * The transCost.per_tuple component of aggcosts should be charged once
2651 : * per input tuple, corresponding to the costs of evaluating the aggregate
2652 : * transfns and their input expressions. The finalCost.per_tuple component
2653 : * is charged once per output tuple, corresponding to the costs of
2654 : * evaluating the finalfns. Startup costs are of course charged but once.
2655 : *
2656 : * If we are grouping, we charge an additional cpu_operator_cost per
2657 : * grouping column per input tuple for grouping comparisons.
2658 : *
2659 : * We will produce a single output tuple if not grouping, and a tuple per
2660 : * group otherwise. We charge cpu_tuple_cost for each output tuple.
2661 : *
2662 : * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
2663 : * same total CPU cost, but AGG_SORTED has lower startup cost. If the
2664 : * input path is already sorted appropriately, AGG_SORTED should be
2665 : * preferred (since it has no risk of memory overflow). This will happen
2666 : * as long as the computed total costs are indeed exactly equal --- but if
2667 : * there's roundoff error we might do the wrong thing. So be sure that
2668 : * the computations below form the same intermediate values in the same
2669 : * order.
2670 : */
2671 58980 : if (aggstrategy == AGG_PLAIN)
2672 : {
2673 31854 : startup_cost = input_total_cost;
2674 31854 : startup_cost += aggcosts->transCost.startup;
2675 31854 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2676 31854 : startup_cost += aggcosts->finalCost.startup;
2677 31854 : startup_cost += aggcosts->finalCost.per_tuple;
2678 : /* we aren't grouping */
2679 31854 : total_cost = startup_cost + cpu_tuple_cost;
2680 31854 : output_tuples = 1;
2681 : }
2682 27126 : else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
2683 : {
2684 : /* Here we are able to deliver output on-the-fly */
2685 10402 : startup_cost = input_startup_cost;
2686 10402 : total_cost = input_total_cost;
2687 10402 : if (aggstrategy == AGG_MIXED && !enable_hashagg)
2688 : {
2689 456 : startup_cost += disable_cost;
2690 456 : total_cost += disable_cost;
2691 : }
2692 : /* calcs phrased this way to match HASHED case, see note above */
2693 10402 : total_cost += aggcosts->transCost.startup;
2694 10402 : total_cost += aggcosts->transCost.per_tuple * input_tuples;
2695 10402 : total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2696 10402 : total_cost += aggcosts->finalCost.startup;
2697 10402 : total_cost += aggcosts->finalCost.per_tuple * numGroups;
2698 10402 : total_cost += cpu_tuple_cost * numGroups;
2699 10402 : output_tuples = numGroups;
2700 : }
2701 : else
2702 : {
2703 : /* must be AGG_HASHED */
2704 16724 : startup_cost = input_total_cost;
2705 16724 : if (!enable_hashagg)
2706 1500 : startup_cost += disable_cost;
2707 16724 : startup_cost += aggcosts->transCost.startup;
2708 16724 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2709 : /* cost of computing hash value */
2710 16724 : startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
2711 16724 : startup_cost += aggcosts->finalCost.startup;
2712 :
2713 16724 : total_cost = startup_cost;
2714 16724 : total_cost += aggcosts->finalCost.per_tuple * numGroups;
2715 : /* cost of retrieving from hash table */
2716 16724 : total_cost += cpu_tuple_cost * numGroups;
2717 16724 : output_tuples = numGroups;
2718 : }
2719 :
2720 : /*
2721 : * Add the disk costs of hash aggregation that spills to disk.
2722 : *
2723 : * Groups that go into the hash table stay in memory until finalized, so
2724 : * spilling and reprocessing tuples doesn't incur additional invocations
2725 : * of transCost or finalCost. Furthermore, the computed hash value is
2726 : * stored with the spilled tuples, so we don't incur extra invocations of
2727 : * the hash function.
2728 : *
2729 : * Hash Agg begins returning tuples after the first batch is complete.
2730 : * Accrue writes (spilled tuples) to startup_cost and to total_cost;
2731 : * accrue reads only to total_cost.
2732 : */
2733 58980 : if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
2734 : {
2735 : double pages;
2736 17568 : double pages_written = 0.0;
2737 17568 : double pages_read = 0.0;
2738 : double spill_cost;
2739 : double hashentrysize;
2740 : double nbatches;
2741 : Size mem_limit;
2742 : uint64 ngroups_limit;
2743 : int num_partitions;
2744 : int depth;
2745 :
2746 : /*
2747 : * Estimate number of batches based on the computed limits. If less
2748 : * than or equal to one, all groups are expected to fit in memory;
2749 : * otherwise we expect to spill.
2750 : */
2751 17568 : hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
2752 : input_width,
2753 : aggcosts->transitionSpace);
2754 17568 : hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
2755 : &ngroups_limit, &num_partitions);
2756 :
2757 17568 : nbatches = Max((numGroups * hashentrysize) / mem_limit,
2758 : numGroups / ngroups_limit);
2759 :
2760 17568 : nbatches = Max(ceil(nbatches), 1.0);
2761 17568 : num_partitions = Max(num_partitions, 2);
2762 :
2763 : /*
2764 : * The number of partitions can change at different levels of
2765 : * recursion; but for the purposes of this calculation assume it stays
2766 : * constant.
2767 : */
2768 17568 : depth = ceil(log(nbatches) / log(num_partitions));
2769 :
2770 : /*
2771 : * Estimate number of pages read and written. For each level of
2772 : * recursion, a tuple must be written and then later read.
2773 : */
2774 17568 : pages = relation_byte_size(input_tuples, input_width) / BLCKSZ;
2775 17568 : pages_written = pages_read = pages * depth;
2776 :
2777 : /*
2778 : * HashAgg has somewhat worse IO behavior than Sort on typical
2779 : * hardware/OS combinations. Account for this with a generic penalty.
2780 : */
2781 17568 : pages_read *= 2.0;
2782 17568 : pages_written *= 2.0;
2783 :
2784 17568 : startup_cost += pages_written * random_page_cost;
2785 17568 : total_cost += pages_written * random_page_cost;
2786 17568 : total_cost += pages_read * seq_page_cost;
2787 :
2788 : /* account for CPU cost of spilling a tuple and reading it back */
2789 17568 : spill_cost = depth * input_tuples * 2.0 * cpu_tuple_cost;
2790 17568 : startup_cost += spill_cost;
2791 17568 : total_cost += spill_cost;
2792 : }
2793 :
2794 : /*
2795 : * If there are quals (HAVING quals), account for their cost and
2796 : * selectivity.
2797 : */
2798 58980 : if (quals)
2799 : {
2800 : QualCost qual_cost;
2801 :
2802 3776 : cost_qual_eval(&qual_cost, quals, root);
2803 3776 : startup_cost += qual_cost.startup;
2804 3776 : total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
2805 :
2806 3776 : output_tuples = clamp_row_est(output_tuples *
2807 3776 : clauselist_selectivity(root,
2808 : quals,
2809 : 0,
2810 : JOIN_INNER,
2811 : NULL));
2812 : }
2813 :
2814 58980 : path->rows = output_tuples;
2815 58980 : path->startup_cost = startup_cost;
2816 58980 : path->total_cost = total_cost;
2817 58980 : }
2818 :
2819 : /*
2820 : * get_windowclause_startup_tuples
2821 : * Estimate how many tuples we'll need to fetch from a WindowAgg's
2822 : * subnode before we can output the first WindowAgg tuple.
2823 : *
2824 : * How many tuples need to be read depends on the WindowClause. For example,
2825 : * a WindowClause with no PARTITION BY and no ORDER BY requires that all
2826 : * subnode tuples are read and aggregated before the WindowAgg can output
2827 : * anything. If there's a PARTITION BY, then we only need to look at tuples
2828 : * in the first partition. Here we attempt to estimate just how many
2829 : * 'input_tuples' the WindowAgg will need to read for the given WindowClause
2830 : * before the first tuple can be output.
2831 : */
2832 : static double
2833 2636 : get_windowclause_startup_tuples(PlannerInfo *root, WindowClause *wc,
2834 : double input_tuples)
2835 : {
2836 2636 : int frameOptions = wc->frameOptions;
2837 : double partition_tuples;
2838 : double return_tuples;
2839 : double peer_tuples;
2840 :
2841 : /*
2842 : * First, figure out how many partitions there are likely to be and set
2843 : * partition_tuples according to that estimate.
2844 : */
2845 2636 : if (wc->partitionClause != NIL)
2846 : {
2847 : double num_partitions;
2848 644 : List *partexprs = get_sortgrouplist_exprs(wc->partitionClause,
2849 644 : root->parse->targetList);
2850 :
2851 644 : num_partitions = estimate_num_groups(root, partexprs, input_tuples,
2852 : NULL, NULL);
2853 644 : list_free(partexprs);
2854 :
2855 644 : partition_tuples = input_tuples / num_partitions;
2856 : }
2857 : else
2858 : {
2859 : /* all tuples belong to the same partition */
2860 1992 : partition_tuples = input_tuples;
2861 : }
2862 :
2863 : /* estimate the number of tuples in each peer group */
2864 2636 : if (wc->orderClause != NIL)
2865 : {
2866 : double num_groups;
2867 : List *orderexprs;
2868 :
2869 2210 : orderexprs = get_sortgrouplist_exprs(wc->orderClause,
2870 2210 : root->parse->targetList);
2871 :
2872 : /* estimate out how many peer groups there are in the partition */
2873 2210 : num_groups = estimate_num_groups(root, orderexprs,
2874 : partition_tuples, NULL,
2875 : NULL);
2876 2210 : list_free(orderexprs);
2877 2210 : peer_tuples = partition_tuples / num_groups;
2878 : }
2879 : else
2880 : {
2881 : /* no ORDER BY so only 1 tuple belongs in each peer group */
2882 426 : peer_tuples = 1.0;
2883 : }
2884 :
2885 2636 : if (frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING)
2886 : {
2887 : /* include all partition rows */
2888 346 : return_tuples = partition_tuples;
2889 : }
2890 2290 : else if (frameOptions & FRAMEOPTION_END_CURRENT_ROW)
2891 : {
2892 1300 : if (frameOptions & FRAMEOPTION_ROWS)
2893 : {
2894 : /* just count the current row */
2895 582 : return_tuples = 1.0;
2896 : }
2897 718 : else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
2898 : {
2899 : /*
2900 : * When in RANGE/GROUPS mode, it's more complex. If there's no
2901 : * ORDER BY, then all rows in the partition are peers, otherwise
2902 : * we'll need to read the first group of peers.
2903 : */
2904 718 : if (wc->orderClause == NIL)
2905 258 : return_tuples = partition_tuples;
2906 : else
2907 460 : return_tuples = peer_tuples;
2908 : }
2909 : else
2910 : {
2911 : /*
2912 : * Something new we don't support yet? This needs attention.
2913 : * We'll just return 1.0 in the meantime.
2914 : */
2915 : Assert(false);
2916 0 : return_tuples = 1.0;
2917 : }
2918 : }
2919 990 : else if (frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING)
2920 : {
2921 : /*
2922 : * BETWEEN ... AND N PRECEDING will only need to read the WindowAgg's
2923 : * subnode after N ROWS/RANGES/GROUPS. N can be 0, but not negative,
2924 : * so we'll just assume only the current row needs to be read to fetch
2925 : * the first WindowAgg row.
2926 : */
2927 108 : return_tuples = 1.0;
2928 : }
2929 882 : else if (frameOptions & FRAMEOPTION_END_OFFSET_FOLLOWING)
2930 : {
2931 882 : Const *endOffset = (Const *) wc->endOffset;
2932 : double end_offset_value;
2933 :
2934 : /* try and figure out the value specified in the endOffset. */
2935 882 : if (IsA(endOffset, Const))
2936 : {
2937 870 : if (endOffset->constisnull)
2938 : {
2939 : /*
2940 : * NULLs are not allowed, but currently, there's no code to
2941 : * error out if there's a NULL Const. We'll only discover
2942 : * this during execution. For now, just pretend everything is
2943 : * fine and assume that just the first row/range/group will be
2944 : * needed.
2945 : */
2946 0 : end_offset_value = 1.0;
2947 : }
2948 : else
2949 : {
2950 870 : switch (endOffset->consttype)
2951 : {
2952 24 : case INT2OID:
2953 24 : end_offset_value =
2954 24 : (double) DatumGetInt16(endOffset->constvalue);
2955 24 : break;
2956 132 : case INT4OID:
2957 132 : end_offset_value =
2958 132 : (double) DatumGetInt32(endOffset->constvalue);
2959 132 : break;
2960 372 : case INT8OID:
2961 372 : end_offset_value =
2962 372 : (double) DatumGetInt64(endOffset->constvalue);
2963 372 : break;
2964 342 : default:
2965 342 : end_offset_value =
2966 342 : partition_tuples / peer_tuples *
2967 : DEFAULT_INEQ_SEL;
2968 342 : break;
2969 : }
2970 : }
2971 : }
2972 : else
2973 : {
2974 : /*
2975 : * When the end bound is not a Const, we'll just need to guess. We
2976 : * just make use of DEFAULT_INEQ_SEL.
2977 : */
2978 12 : end_offset_value =
2979 12 : partition_tuples / peer_tuples * DEFAULT_INEQ_SEL;
2980 : }
2981 :
2982 882 : if (frameOptions & FRAMEOPTION_ROWS)
2983 : {
2984 : /* include the N FOLLOWING and the current row */
2985 222 : return_tuples = end_offset_value + 1.0;
2986 : }
2987 660 : else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
2988 : {
2989 : /* include N FOLLOWING ranges/group and the initial range/group */
2990 660 : return_tuples = peer_tuples * (end_offset_value + 1.0);
2991 : }
2992 : else
2993 : {
2994 : /*
2995 : * Something new we don't support yet? This needs attention.
2996 : * We'll just return 1.0 in the meantime.
2997 : */
2998 : Assert(false);
2999 0 : return_tuples = 1.0;
3000 : }
3001 : }
3002 : else
3003 : {
3004 : /*
3005 : * Something new we don't support yet? This needs attention. We'll
3006 : * just return 1.0 in the meantime.
3007 : */
3008 : Assert(false);
3009 0 : return_tuples = 1.0;
3010 : }
3011 :
3012 2636 : if (wc->partitionClause != NIL || wc->orderClause != NIL)
3013 : {
3014 : /*
3015 : * Cap the return value to the estimated partition tuples and account
3016 : * for the extra tuple WindowAgg will need to read to confirm the next
3017 : * tuple does not belong to the same partition or peer group.
3018 : */
3019 2372 : return_tuples = Min(return_tuples + 1.0, partition_tuples);
3020 : }
3021 : else
3022 : {
3023 : /*
3024 : * Cap the return value so it's never higher than the expected tuples
3025 : * in the partition.
3026 : */
3027 264 : return_tuples = Min(return_tuples, partition_tuples);
3028 : }
3029 :
3030 : /*
3031 : * We needn't worry about any EXCLUDE options as those only exclude rows
3032 : * from being aggregated, not from being read from the WindowAgg's
3033 : * subnode.
3034 : */
3035 :
3036 2636 : return clamp_row_est(return_tuples);
3037 : }
3038 :
3039 : /*
3040 : * cost_windowagg
3041 : * Determines and returns the cost of performing a WindowAgg plan node,
3042 : * including the cost of its input.
3043 : *
3044 : * Input is assumed already properly sorted.
3045 : */
3046 : void
3047 2636 : cost_windowagg(Path *path, PlannerInfo *root,
3048 : List *windowFuncs, WindowClause *winclause,
3049 : Cost input_startup_cost, Cost input_total_cost,
3050 : double input_tuples)
3051 : {
3052 : Cost startup_cost;
3053 : Cost total_cost;
3054 : double startup_tuples;
3055 : int numPartCols;
3056 : int numOrderCols;
3057 : ListCell *lc;
3058 :
3059 2636 : numPartCols = list_length(winclause->partitionClause);
3060 2636 : numOrderCols = list_length(winclause->orderClause);
3061 :
3062 2636 : startup_cost = input_startup_cost;
3063 2636 : total_cost = input_total_cost;
3064 :
3065 : /*
3066 : * Window functions are assumed to cost their stated execution cost, plus
3067 : * the cost of evaluating their input expressions, per tuple. Since they
3068 : * may in fact evaluate their inputs at multiple rows during each cycle,
3069 : * this could be a drastic underestimate; but without a way to know how
3070 : * many rows the window function will fetch, it's hard to do better. In
3071 : * any case, it's a good estimate for all the built-in window functions,
3072 : * so we'll just do this for now.
3073 : */
3074 5974 : foreach(lc, windowFuncs)
3075 : {
3076 3338 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc);
3077 : Cost wfunccost;
3078 : QualCost argcosts;
3079 :
3080 3338 : argcosts.startup = argcosts.per_tuple = 0;
3081 3338 : add_function_cost(root, wfunc->winfnoid, (Node *) wfunc,
3082 : &argcosts);
3083 3338 : startup_cost += argcosts.startup;
3084 3338 : wfunccost = argcosts.per_tuple;
3085 :
3086 : /* also add the input expressions' cost to per-input-row costs */
3087 3338 : cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
3088 3338 : startup_cost += argcosts.startup;
3089 3338 : wfunccost += argcosts.per_tuple;
3090 :
3091 : /*
3092 : * Add the filter's cost to per-input-row costs. XXX We should reduce
3093 : * input expression costs according to filter selectivity.
3094 : */
3095 3338 : cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
3096 3338 : startup_cost += argcosts.startup;
3097 3338 : wfunccost += argcosts.per_tuple;
3098 :
3099 3338 : total_cost += wfunccost * input_tuples;
3100 : }
3101 :
3102 : /*
3103 : * We also charge cpu_operator_cost per grouping column per tuple for
3104 : * grouping comparisons, plus cpu_tuple_cost per tuple for general
3105 : * overhead.
3106 : *
3107 : * XXX this neglects costs of spooling the data to disk when it overflows
3108 : * work_mem. Sooner or later that should get accounted for.
3109 : */
3110 2636 : total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
3111 2636 : total_cost += cpu_tuple_cost * input_tuples;
3112 :
3113 2636 : path->rows = input_tuples;
3114 2636 : path->startup_cost = startup_cost;
3115 2636 : path->total_cost = total_cost;
3116 :
3117 : /*
3118 : * Also, take into account how many tuples we need to read from the
3119 : * subnode in order to produce the first tuple from the WindowAgg. To do
3120 : * this we proportion the run cost (total cost not including startup cost)
3121 : * over the estimated startup tuples. We already included the startup
3122 : * cost of the subnode, so we only need to do this when the estimated
3123 : * startup tuples is above 1.0.
3124 : */
3125 2636 : startup_tuples = get_windowclause_startup_tuples(root, winclause,
3126 : input_tuples);
3127 :
3128 2636 : if (startup_tuples > 1.0)
3129 2358 : path->startup_cost += (total_cost - startup_cost) / input_tuples *
3130 2358 : (startup_tuples - 1.0);
3131 2636 : }
3132 :
3133 : /*
3134 : * cost_group
3135 : * Determines and returns the cost of performing a Group plan node,
3136 : * including the cost of its input.
3137 : *
3138 : * Note: caller must ensure that input costs are for appropriately-sorted
3139 : * input.
3140 : */
3141 : void
3142 1576 : cost_group(Path *path, PlannerInfo *root,
3143 : int numGroupCols, double numGroups,
3144 : List *quals,
3145 : Cost input_startup_cost, Cost input_total_cost,
3146 : double input_tuples)
3147 : {
3148 : double output_tuples;
3149 : Cost startup_cost;
3150 : Cost total_cost;
3151 :
3152 1576 : output_tuples = numGroups;
3153 1576 : startup_cost = input_startup_cost;
3154 1576 : total_cost = input_total_cost;
3155 :
3156 : /*
3157 : * Charge one cpu_operator_cost per comparison per input tuple. We assume
3158 : * all columns get compared at most of the tuples.
3159 : */
3160 1576 : total_cost += cpu_operator_cost * input_tuples * numGroupCols;
3161 :
3162 : /*
3163 : * If there are quals (HAVING quals), account for their cost and
3164 : * selectivity.
3165 : */
3166 1576 : if (quals)
3167 : {
3168 : QualCost qual_cost;
3169 :
3170 0 : cost_qual_eval(&qual_cost, quals, root);
3171 0 : startup_cost += qual_cost.startup;
3172 0 : total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
3173 :
3174 0 : output_tuples = clamp_row_est(output_tuples *
3175 0 : clauselist_selectivity(root,
3176 : quals,
3177 : 0,
3178 : JOIN_INNER,
3179 : NULL));
3180 : }
3181 :
3182 1576 : path->rows = output_tuples;
3183 1576 : path->startup_cost = startup_cost;
3184 1576 : path->total_cost = total_cost;
3185 1576 : }
3186 :
3187 : /*
3188 : * initial_cost_nestloop
3189 : * Preliminary estimate of the cost of a nestloop join path.
3190 : *
3191 : * This must quickly produce lower-bound estimates of the path's startup and
3192 : * total costs. If we are unable to eliminate the proposed path from
3193 : * consideration using the lower bounds, final_cost_nestloop will be called
3194 : * to obtain the final estimates.
3195 : *
3196 : * The exact division of labor between this function and final_cost_nestloop
3197 : * is private to them, and represents a tradeoff between speed of the initial
3198 : * estimate and getting a tight lower bound. We choose to not examine the
3199 : * join quals here, since that's by far the most expensive part of the
3200 : * calculations. The end result is that CPU-cost considerations must be
3201 : * left for the second phase; and for SEMI/ANTI joins, we must also postpone
3202 : * incorporation of the inner path's run cost.
3203 : *
3204 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3205 : * other data to be used by final_cost_nestloop
3206 : * 'jointype' is the type of join to be performed
3207 : * 'outer_path' is the outer input to the join
3208 : * 'inner_path' is the inner input to the join
3209 : * 'extra' contains miscellaneous information about the join
3210 : */
3211 : void
3212 2167696 : initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
3213 : JoinType jointype,
3214 : Path *outer_path, Path *inner_path,
3215 : JoinPathExtraData *extra)
3216 : {
3217 2167696 : Cost startup_cost = 0;
3218 2167696 : Cost run_cost = 0;
3219 2167696 : double outer_path_rows = outer_path->rows;
3220 : Cost inner_rescan_start_cost;
3221 : Cost inner_rescan_total_cost;
3222 : Cost inner_run_cost;
3223 : Cost inner_rescan_run_cost;
3224 :
3225 : /* estimate costs to rescan the inner relation */
3226 2167696 : cost_rescan(root, inner_path,
3227 : &inner_rescan_start_cost,
3228 : &inner_rescan_total_cost);
3229 :
3230 : /* cost of source data */
3231 :
3232 : /*
3233 : * NOTE: clearly, we must pay both outer and inner paths' startup_cost
3234 : * before we can start returning tuples, so the join's startup cost is
3235 : * their sum. We'll also pay the inner path's rescan startup cost
3236 : * multiple times.
3237 : */
3238 2167696 : startup_cost += outer_path->startup_cost + inner_path->startup_cost;
3239 2167696 : run_cost += outer_path->total_cost - outer_path->startup_cost;
3240 2167696 : if (outer_path_rows > 1)
3241 1522756 : run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
3242 :
3243 2167696 : inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
3244 2167696 : inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
3245 :
3246 2167696 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI ||
3247 2127700 : extra->inner_unique)
3248 : {
3249 : /*
3250 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3251 : * executor will stop after the first match.
3252 : *
3253 : * Getting decent estimates requires inspection of the join quals,
3254 : * which we choose to postpone to final_cost_nestloop.
3255 : */
3256 :
3257 : /* Save private data for final_cost_nestloop */
3258 991026 : workspace->inner_run_cost = inner_run_cost;
3259 991026 : workspace->inner_rescan_run_cost = inner_rescan_run_cost;
3260 : }
3261 : else
3262 : {
3263 : /* Normal case; we'll scan whole input rel for each outer row */
3264 1176670 : run_cost += inner_run_cost;
3265 1176670 : if (outer_path_rows > 1)
3266 834528 : run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
3267 : }
3268 :
3269 : /* CPU costs left for later */
3270 :
3271 : /* Public result fields */
3272 2167696 : workspace->startup_cost = startup_cost;
3273 2167696 : workspace->total_cost = startup_cost + run_cost;
3274 : /* Save private data for final_cost_nestloop */
3275 2167696 : workspace->run_cost = run_cost;
3276 2167696 : }
3277 :
3278 : /*
3279 : * final_cost_nestloop
3280 : * Final estimate of the cost and result size of a nestloop join path.
3281 : *
3282 : * 'path' is already filled in except for the rows and cost fields
3283 : * 'workspace' is the result from initial_cost_nestloop
3284 : * 'extra' contains miscellaneous information about the join
3285 : */
3286 : void
3287 1059014 : final_cost_nestloop(PlannerInfo *root, NestPath *path,
3288 : JoinCostWorkspace *workspace,
3289 : JoinPathExtraData *extra)
3290 : {
3291 1059014 : Path *outer_path = path->jpath.outerjoinpath;
3292 1059014 : Path *inner_path = path->jpath.innerjoinpath;
3293 1059014 : double outer_path_rows = outer_path->rows;
3294 1059014 : double inner_path_rows = inner_path->rows;
3295 1059014 : Cost startup_cost = workspace->startup_cost;
3296 1059014 : Cost run_cost = workspace->run_cost;
3297 : Cost cpu_per_tuple;
3298 : QualCost restrict_qual_cost;
3299 : double ntuples;
3300 :
3301 : /* Protect some assumptions below that rowcounts aren't zero */
3302 1059014 : if (outer_path_rows <= 0)
3303 0 : outer_path_rows = 1;
3304 1059014 : if (inner_path_rows <= 0)
3305 654 : inner_path_rows = 1;
3306 : /* Mark the path with the correct row estimate */
3307 1059014 : if (path->jpath.path.param_info)
3308 24876 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3309 : else
3310 1034138 : path->jpath.path.rows = path->jpath.path.parent->rows;
3311 :
3312 : /* For partial paths, scale row estimate. */
3313 1059014 : if (path->jpath.path.parallel_workers > 0)
3314 : {
3315 12540 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3316 :
3317 12540 : path->jpath.path.rows =
3318 12540 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3319 : }
3320 :
3321 : /*
3322 : * We could include disable_cost in the preliminary estimate, but that
3323 : * would amount to optimizing for the case where the join method is
3324 : * disabled, which doesn't seem like the way to bet.
3325 : */
3326 1059014 : if (!enable_nestloop)
3327 3498 : startup_cost += disable_cost;
3328 :
3329 : /* cost of inner-relation source data (we already dealt with outer rel) */
3330 :
3331 1059014 : if (path->jpath.jointype == JOIN_SEMI || path->jpath.jointype == JOIN_ANTI ||
3332 1032438 : extra->inner_unique)
3333 680072 : {
3334 : /*
3335 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3336 : * executor will stop after the first match.
3337 : */
3338 680072 : Cost inner_run_cost = workspace->inner_run_cost;
3339 680072 : Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
3340 : double outer_matched_rows;
3341 : double outer_unmatched_rows;
3342 : Selectivity inner_scan_frac;
3343 :
3344 : /*
3345 : * For an outer-rel row that has at least one match, we can expect the
3346 : * inner scan to stop after a fraction 1/(match_count+1) of the inner
3347 : * rows, if the matches are evenly distributed. Since they probably
3348 : * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
3349 : * that fraction. (If we used a larger fuzz factor, we'd have to
3350 : * clamp inner_scan_frac to at most 1.0; but since match_count is at
3351 : * least 1, no such clamp is needed now.)
3352 : */
3353 680072 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
3354 680072 : outer_unmatched_rows = outer_path_rows - outer_matched_rows;
3355 680072 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
3356 :
3357 : /*
3358 : * Compute number of tuples processed (not number emitted!). First,
3359 : * account for successfully-matched outer rows.
3360 : */
3361 680072 : ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
3362 :
3363 : /*
3364 : * Now we need to estimate the actual costs of scanning the inner
3365 : * relation, which may be quite a bit less than N times inner_run_cost
3366 : * due to early scan stops. We consider two cases. If the inner path
3367 : * is an indexscan using all the joinquals as indexquals, then an
3368 : * unmatched outer row results in an indexscan returning no rows,
3369 : * which is probably quite cheap. Otherwise, the executor will have
3370 : * to scan the whole inner rel for an unmatched row; not so cheap.
3371 : */
3372 680072 : if (has_indexed_join_quals(path))
3373 : {
3374 : /*
3375 : * Successfully-matched outer rows will only require scanning
3376 : * inner_scan_frac of the inner relation. In this case, we don't
3377 : * need to charge the full inner_run_cost even when that's more
3378 : * than inner_rescan_run_cost, because we can assume that none of
3379 : * the inner scans ever scan the whole inner relation. So it's
3380 : * okay to assume that all the inner scan executions can be
3381 : * fractions of the full cost, even if materialization is reducing
3382 : * the rescan cost. At this writing, it's impossible to get here
3383 : * for a materialized inner scan, so inner_run_cost and
3384 : * inner_rescan_run_cost will be the same anyway; but just in
3385 : * case, use inner_run_cost for the first matched tuple and
3386 : * inner_rescan_run_cost for additional ones.
3387 : */
3388 119294 : run_cost += inner_run_cost * inner_scan_frac;
3389 119294 : if (outer_matched_rows > 1)
3390 16276 : run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
3391 :
3392 : /*
3393 : * Add the cost of inner-scan executions for unmatched outer rows.
3394 : * We estimate this as the same cost as returning the first tuple
3395 : * of a nonempty scan. We consider that these are all rescans,
3396 : * since we used inner_run_cost once already.
3397 : */
3398 119294 : run_cost += outer_unmatched_rows *
3399 119294 : inner_rescan_run_cost / inner_path_rows;
3400 :
3401 : /*
3402 : * We won't be evaluating any quals at all for unmatched rows, so
3403 : * don't add them to ntuples.
3404 : */
3405 : }
3406 : else
3407 : {
3408 : /*
3409 : * Here, a complicating factor is that rescans may be cheaper than
3410 : * first scans. If we never scan all the way to the end of the
3411 : * inner rel, it might be (depending on the plan type) that we'd
3412 : * never pay the whole inner first-scan run cost. However it is
3413 : * difficult to estimate whether that will happen (and it could
3414 : * not happen if there are any unmatched outer rows!), so be
3415 : * conservative and always charge the whole first-scan cost once.
3416 : * We consider this charge to correspond to the first unmatched
3417 : * outer row, unless there isn't one in our estimate, in which
3418 : * case blame it on the first matched row.
3419 : */
3420 :
3421 : /* First, count all unmatched join tuples as being processed */
3422 560778 : ntuples += outer_unmatched_rows * inner_path_rows;
3423 :
3424 : /* Now add the forced full scan, and decrement appropriate count */
3425 560778 : run_cost += inner_run_cost;
3426 560778 : if (outer_unmatched_rows >= 1)
3427 545616 : outer_unmatched_rows -= 1;
3428 : else
3429 15162 : outer_matched_rows -= 1;
3430 :
3431 : /* Add inner run cost for additional outer tuples having matches */
3432 560778 : if (outer_matched_rows > 0)
3433 202214 : run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
3434 :
3435 : /* Add inner run cost for additional unmatched outer tuples */
3436 560778 : if (outer_unmatched_rows > 0)
3437 370458 : run_cost += outer_unmatched_rows * inner_rescan_run_cost;
3438 : }
3439 : }
3440 : else
3441 : {
3442 : /* Normal-case source costs were included in preliminary estimate */
3443 :
3444 : /* Compute number of tuples processed (not number emitted!) */
3445 378942 : ntuples = outer_path_rows * inner_path_rows;
3446 : }
3447 :
3448 : /* CPU costs */
3449 1059014 : cost_qual_eval(&restrict_qual_cost, path->jpath.joinrestrictinfo, root);
3450 1059014 : startup_cost += restrict_qual_cost.startup;
3451 1059014 : cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
3452 1059014 : run_cost += cpu_per_tuple * ntuples;
3453 :
3454 : /* tlist eval costs are paid per output row, not per tuple scanned */
3455 1059014 : startup_cost += path->jpath.path.pathtarget->cost.startup;
3456 1059014 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3457 :
3458 1059014 : path->jpath.path.startup_cost = startup_cost;
3459 1059014 : path->jpath.path.total_cost = startup_cost + run_cost;
3460 1059014 : }
3461 :
3462 : /*
3463 : * initial_cost_mergejoin
3464 : * Preliminary estimate of the cost of a mergejoin path.
3465 : *
3466 : * This must quickly produce lower-bound estimates of the path's startup and
3467 : * total costs. If we are unable to eliminate the proposed path from
3468 : * consideration using the lower bounds, final_cost_mergejoin will be called
3469 : * to obtain the final estimates.
3470 : *
3471 : * The exact division of labor between this function and final_cost_mergejoin
3472 : * is private to them, and represents a tradeoff between speed of the initial
3473 : * estimate and getting a tight lower bound. We choose to not examine the
3474 : * join quals here, except for obtaining the scan selectivity estimate which
3475 : * is really essential (but fortunately, use of caching keeps the cost of
3476 : * getting that down to something reasonable).
3477 : * We also assume that cost_sort is cheap enough to use here.
3478 : *
3479 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3480 : * other data to be used by final_cost_mergejoin
3481 : * 'jointype' is the type of join to be performed
3482 : * 'mergeclauses' is the list of joinclauses to be used as merge clauses
3483 : * 'outer_path' is the outer input to the join
3484 : * 'inner_path' is the inner input to the join
3485 : * 'outersortkeys' is the list of sort keys for the outer path
3486 : * 'innersortkeys' is the list of sort keys for the inner path
3487 : * 'extra' contains miscellaneous information about the join
3488 : *
3489 : * Note: outersortkeys and innersortkeys should be NIL if no explicit
3490 : * sort is needed because the respective source path is already ordered.
3491 : */
3492 : void
3493 933472 : initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
3494 : JoinType jointype,
3495 : List *mergeclauses,
3496 : Path *outer_path, Path *inner_path,
3497 : List *outersortkeys, List *innersortkeys,
3498 : JoinPathExtraData *extra)
3499 : {
3500 933472 : Cost startup_cost = 0;
3501 933472 : Cost run_cost = 0;
3502 933472 : double outer_path_rows = outer_path->rows;
3503 933472 : double inner_path_rows = inner_path->rows;
3504 : Cost inner_run_cost;
3505 : double outer_rows,
3506 : inner_rows,
3507 : outer_skip_rows,
3508 : inner_skip_rows;
3509 : Selectivity outerstartsel,
3510 : outerendsel,
3511 : innerstartsel,
3512 : innerendsel;
3513 : Path sort_path; /* dummy for result of cost_sort */
3514 :
3515 : /* Protect some assumptions below that rowcounts aren't zero */
3516 933472 : if (outer_path_rows <= 0)
3517 96 : outer_path_rows = 1;
3518 933472 : if (inner_path_rows <= 0)
3519 126 : inner_path_rows = 1;
3520 :
3521 : /*
3522 : * A merge join will stop as soon as it exhausts either input stream
3523 : * (unless it's an outer join, in which case the outer side has to be
3524 : * scanned all the way anyway). Estimate fraction of the left and right
3525 : * inputs that will actually need to be scanned. Likewise, we can
3526 : * estimate the number of rows that will be skipped before the first join
3527 : * pair is found, which should be factored into startup cost. We use only
3528 : * the first (most significant) merge clause for this purpose. Since
3529 : * mergejoinscansel() is a fairly expensive computation, we cache the
3530 : * results in the merge clause RestrictInfo.
3531 : */
3532 933472 : if (mergeclauses && jointype != JOIN_FULL)
3533 927368 : {
3534 927368 : RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
3535 : List *opathkeys;
3536 : List *ipathkeys;
3537 : PathKey *opathkey;
3538 : PathKey *ipathkey;
3539 : MergeScanSelCache *cache;
3540 :
3541 : /* Get the input pathkeys to determine the sort-order details */
3542 927368 : opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
3543 927368 : ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
3544 : Assert(opathkeys);
3545 : Assert(ipathkeys);
3546 927368 : opathkey = (PathKey *) linitial(opathkeys);
3547 927368 : ipathkey = (PathKey *) linitial(ipathkeys);
3548 : /* debugging check */
3549 927368 : if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
3550 927368 : opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
3551 927368 : opathkey->pk_strategy != ipathkey->pk_strategy ||
3552 927368 : opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
3553 0 : elog(ERROR, "left and right pathkeys do not match in mergejoin");
3554 :
3555 : /* Get the selectivity with caching */
3556 927368 : cache = cached_scansel(root, firstclause, opathkey);
3557 :
3558 927368 : if (bms_is_subset(firstclause->left_relids,
3559 927368 : outer_path->parent->relids))
3560 : {
3561 : /* left side of clause is outer */
3562 494852 : outerstartsel = cache->leftstartsel;
3563 494852 : outerendsel = cache->leftendsel;
3564 494852 : innerstartsel = cache->rightstartsel;
3565 494852 : innerendsel = cache->rightendsel;
3566 : }
3567 : else
3568 : {
3569 : /* left side of clause is inner */
3570 432516 : outerstartsel = cache->rightstartsel;
3571 432516 : outerendsel = cache->rightendsel;
3572 432516 : innerstartsel = cache->leftstartsel;
3573 432516 : innerendsel = cache->leftendsel;
3574 : }
3575 927368 : if (jointype == JOIN_LEFT ||
3576 : jointype == JOIN_ANTI)
3577 : {
3578 153160 : outerstartsel = 0.0;
3579 153160 : outerendsel = 1.0;
3580 : }
3581 774208 : else if (jointype == JOIN_RIGHT ||
3582 : jointype == JOIN_RIGHT_ANTI)
3583 : {
3584 157162 : innerstartsel = 0.0;
3585 157162 : innerendsel = 1.0;
3586 : }
3587 : }
3588 : else
3589 : {
3590 : /* cope with clauseless or full mergejoin */
3591 6104 : outerstartsel = innerstartsel = 0.0;
3592 6104 : outerendsel = innerendsel = 1.0;
3593 : }
3594 :
3595 : /*
3596 : * Convert selectivities to row counts. We force outer_rows and
3597 : * inner_rows to be at least 1, but the skip_rows estimates can be zero.
3598 : */
3599 933472 : outer_skip_rows = rint(outer_path_rows * outerstartsel);
3600 933472 : inner_skip_rows = rint(inner_path_rows * innerstartsel);
3601 933472 : outer_rows = clamp_row_est(outer_path_rows * outerendsel);
3602 933472 : inner_rows = clamp_row_est(inner_path_rows * innerendsel);
3603 :
3604 : Assert(outer_skip_rows <= outer_rows);
3605 : Assert(inner_skip_rows <= inner_rows);
3606 :
3607 : /*
3608 : * Readjust scan selectivities to account for above rounding. This is
3609 : * normally an insignificant effect, but when there are only a few rows in
3610 : * the inputs, failing to do this makes for a large percentage error.
3611 : */
3612 933472 : outerstartsel = outer_skip_rows / outer_path_rows;
3613 933472 : innerstartsel = inner_skip_rows / inner_path_rows;
3614 933472 : outerendsel = outer_rows / outer_path_rows;
3615 933472 : innerendsel = inner_rows / inner_path_rows;
3616 :
3617 : Assert(outerstartsel <= outerendsel);
3618 : Assert(innerstartsel <= innerendsel);
3619 :
3620 : /* cost of source data */
3621 :
3622 933472 : if (outersortkeys) /* do we need to sort outer? */
3623 : {
3624 444590 : cost_sort(&sort_path,
3625 : root,
3626 : outersortkeys,
3627 : outer_path->total_cost,
3628 : outer_path_rows,
3629 444590 : outer_path->pathtarget->width,
3630 : 0.0,
3631 : work_mem,
3632 : -1.0);
3633 444590 : startup_cost += sort_path.startup_cost;
3634 444590 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3635 444590 : * outerstartsel;
3636 444590 : run_cost += (sort_path.total_cost - sort_path.startup_cost)
3637 444590 : * (outerendsel - outerstartsel);
3638 : }
3639 : else
3640 : {
3641 488882 : startup_cost += outer_path->startup_cost;
3642 488882 : startup_cost += (outer_path->total_cost - outer_path->startup_cost)
3643 488882 : * outerstartsel;
3644 488882 : run_cost += (outer_path->total_cost - outer_path->startup_cost)
3645 488882 : * (outerendsel - outerstartsel);
3646 : }
3647 :
3648 933472 : if (innersortkeys) /* do we need to sort inner? */
3649 : {
3650 728050 : cost_sort(&sort_path,
3651 : root,
3652 : innersortkeys,
3653 : inner_path->total_cost,
3654 : inner_path_rows,
3655 728050 : inner_path->pathtarget->width,
3656 : 0.0,
3657 : work_mem,
3658 : -1.0);
3659 728050 : startup_cost += sort_path.startup_cost;
3660 728050 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3661 728050 : * innerstartsel;
3662 728050 : inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
3663 728050 : * (innerendsel - innerstartsel);
3664 : }
3665 : else
3666 : {
3667 205422 : startup_cost += inner_path->startup_cost;
3668 205422 : startup_cost += (inner_path->total_cost - inner_path->startup_cost)
3669 205422 : * innerstartsel;
3670 205422 : inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
3671 205422 : * (innerendsel - innerstartsel);
3672 : }
3673 :
3674 : /*
3675 : * We can't yet determine whether rescanning occurs, or whether
3676 : * materialization of the inner input should be done. The minimum
3677 : * possible inner input cost, regardless of rescan and materialization
3678 : * considerations, is inner_run_cost. We include that in
3679 : * workspace->total_cost, but not yet in run_cost.
3680 : */
3681 :
3682 : /* CPU costs left for later */
3683 :
3684 : /* Public result fields */
3685 933472 : workspace->startup_cost = startup_cost;
3686 933472 : workspace->total_cost = startup_cost + run_cost + inner_run_cost;
3687 : /* Save private data for final_cost_mergejoin */
3688 933472 : workspace->run_cost = run_cost;
3689 933472 : workspace->inner_run_cost = inner_run_cost;
3690 933472 : workspace->outer_rows = outer_rows;
3691 933472 : workspace->inner_rows = inner_rows;
3692 933472 : workspace->outer_skip_rows = outer_skip_rows;
3693 933472 : workspace->inner_skip_rows = inner_skip_rows;
3694 933472 : }
3695 :
3696 : /*
3697 : * final_cost_mergejoin
3698 : * Final estimate of the cost and result size of a mergejoin path.
3699 : *
3700 : * Unlike other costsize functions, this routine makes two actual decisions:
3701 : * whether the executor will need to do mark/restore, and whether we should
3702 : * materialize the inner path. It would be logically cleaner to build
3703 : * separate paths testing these alternatives, but that would require repeating
3704 : * most of the cost calculations, which are not all that cheap. Since the
3705 : * choice will not affect output pathkeys or startup cost, only total cost,
3706 : * there is no possibility of wanting to keep more than one path. So it seems
3707 : * best to make the decisions here and record them in the path's
3708 : * skip_mark_restore and materialize_inner fields.
3709 : *
3710 : * Mark/restore overhead is usually required, but can be skipped if we know
3711 : * that the executor need find only one match per outer tuple, and that the
3712 : * mergeclauses are sufficient to identify a match.
3713 : *
3714 : * We materialize the inner path if we need mark/restore and either the inner
3715 : * path can't support mark/restore, or it's cheaper to use an interposed
3716 : * Material node to handle mark/restore.
3717 : *
3718 : * 'path' is already filled in except for the rows and cost fields and
3719 : * skip_mark_restore and materialize_inner
3720 : * 'workspace' is the result from initial_cost_mergejoin
3721 : * 'extra' contains miscellaneous information about the join
3722 : */
3723 : void
3724 241182 : final_cost_mergejoin(PlannerInfo *root, MergePath *path,
3725 : JoinCostWorkspace *workspace,
3726 : JoinPathExtraData *extra)
3727 : {
3728 241182 : Path *outer_path = path->jpath.outerjoinpath;
3729 241182 : Path *inner_path = path->jpath.innerjoinpath;
3730 241182 : double inner_path_rows = inner_path->rows;
3731 241182 : List *mergeclauses = path->path_mergeclauses;
3732 241182 : List *innersortkeys = path->innersortkeys;
3733 241182 : Cost startup_cost = workspace->startup_cost;
3734 241182 : Cost run_cost = workspace->run_cost;
3735 241182 : Cost inner_run_cost = workspace->inner_run_cost;
3736 241182 : double outer_rows = workspace->outer_rows;
3737 241182 : double inner_rows = workspace->inner_rows;
3738 241182 : double outer_skip_rows = workspace->outer_skip_rows;
3739 241182 : double inner_skip_rows = workspace->inner_skip_rows;
3740 : Cost cpu_per_tuple,
3741 : bare_inner_cost,
3742 : mat_inner_cost;
3743 : QualCost merge_qual_cost;
3744 : QualCost qp_qual_cost;
3745 : double mergejointuples,
3746 : rescannedtuples;
3747 : double rescanratio;
3748 :
3749 : /* Protect some assumptions below that rowcounts aren't zero */
3750 241182 : if (inner_path_rows <= 0)
3751 90 : inner_path_rows = 1;
3752 :
3753 : /* Mark the path with the correct row estimate */
3754 241182 : if (path->jpath.path.param_info)
3755 654 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3756 : else
3757 240528 : path->jpath.path.rows = path->jpath.path.parent->rows;
3758 :
3759 : /* For partial paths, scale row estimate. */
3760 241182 : if (path->jpath.path.parallel_workers > 0)
3761 : {
3762 9020 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3763 :
3764 9020 : path->jpath.path.rows =
3765 9020 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3766 : }
3767 :
3768 : /*
3769 : * We could include disable_cost in the preliminary estimate, but that
3770 : * would amount to optimizing for the case where the join method is
3771 : * disabled, which doesn't seem like the way to bet.
3772 : */
3773 241182 : if (!enable_mergejoin)
3774 0 : startup_cost += disable_cost;
3775 :
3776 : /*
3777 : * Compute cost of the mergequals and qpquals (other restriction clauses)
3778 : * separately.
3779 : */
3780 241182 : cost_qual_eval(&merge_qual_cost, mergeclauses, root);
3781 241182 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
3782 241182 : qp_qual_cost.startup -= merge_qual_cost.startup;
3783 241182 : qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
3784 :
3785 : /*
3786 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3787 : * executor will stop scanning for matches after the first match. When
3788 : * all the joinclauses are merge clauses, this means we don't ever need to
3789 : * back up the merge, and so we can skip mark/restore overhead.
3790 : */
3791 241182 : if ((path->jpath.jointype == JOIN_SEMI ||
3792 237488 : path->jpath.jointype == JOIN_ANTI ||
3793 345896 : extra->inner_unique) &&
3794 114542 : (list_length(path->jpath.joinrestrictinfo) ==
3795 114542 : list_length(path->path_mergeclauses)))
3796 98752 : path->skip_mark_restore = true;
3797 : else
3798 142430 : path->skip_mark_restore = false;
3799 :
3800 : /*
3801 : * Get approx # tuples passing the mergequals. We use approx_tuple_count
3802 : * here because we need an estimate done with JOIN_INNER semantics.
3803 : */
3804 241182 : mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
3805 :
3806 : /*
3807 : * When there are equal merge keys in the outer relation, the mergejoin
3808 : * must rescan any matching tuples in the inner relation. This means
3809 : * re-fetching inner tuples; we have to estimate how often that happens.
3810 : *
3811 : * For regular inner and outer joins, the number of re-fetches can be
3812 : * estimated approximately as size of merge join output minus size of
3813 : * inner relation. Assume that the distinct key values are 1, 2, ..., and
3814 : * denote the number of values of each key in the outer relation as m1,
3815 : * m2, ...; in the inner relation, n1, n2, ... Then we have
3816 : *
3817 : * size of join = m1 * n1 + m2 * n2 + ...
3818 : *
3819 : * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
3820 : * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
3821 : * relation
3822 : *
3823 : * This equation works correctly for outer tuples having no inner match
3824 : * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
3825 : * are effectively subtracting those from the number of rescanned tuples,
3826 : * when we should not. Can we do better without expensive selectivity
3827 : * computations?
3828 : *
3829 : * The whole issue is moot if we are working from a unique-ified outer
3830 : * input, or if we know we don't need to mark/restore at all.
3831 : */
3832 241182 : if (IsA(outer_path, UniquePath) || path->skip_mark_restore)
3833 99650 : rescannedtuples = 0;
3834 : else
3835 : {
3836 141532 : rescannedtuples = mergejointuples - inner_path_rows;
3837 : /* Must clamp because of possible underestimate */
3838 141532 : if (rescannedtuples < 0)
3839 55382 : rescannedtuples = 0;
3840 : }
3841 :
3842 : /*
3843 : * We'll inflate various costs this much to account for rescanning. Note
3844 : * that this is to be multiplied by something involving inner_rows, or
3845 : * another number related to the portion of the inner rel we'll scan.
3846 : */
3847 241182 : rescanratio = 1.0 + (rescannedtuples / inner_rows);
3848 :
3849 : /*
3850 : * Decide whether we want to materialize the inner input to shield it from
3851 : * mark/restore and performing re-fetches. Our cost model for regular
3852 : * re-fetches is that a re-fetch costs the same as an original fetch,
3853 : * which is probably an overestimate; but on the other hand we ignore the
3854 : * bookkeeping costs of mark/restore. Not clear if it's worth developing
3855 : * a more refined model. So we just need to inflate the inner run cost by
3856 : * rescanratio.
3857 : */
3858 241182 : bare_inner_cost = inner_run_cost * rescanratio;
3859 :
3860 : /*
3861 : * When we interpose a Material node the re-fetch cost is assumed to be
3862 : * just cpu_operator_cost per tuple, independently of the underlying
3863 : * plan's cost; and we charge an extra cpu_operator_cost per original
3864 : * fetch as well. Note that we're assuming the materialize node will
3865 : * never spill to disk, since it only has to remember tuples back to the
3866 : * last mark. (If there are a huge number of duplicates, our other cost
3867 : * factors will make the path so expensive that it probably won't get
3868 : * chosen anyway.) So we don't use cost_rescan here.
3869 : *
3870 : * Note: keep this estimate in sync with create_mergejoin_plan's labeling
3871 : * of the generated Material node.
3872 : */
3873 241182 : mat_inner_cost = inner_run_cost +
3874 241182 : cpu_operator_cost * inner_rows * rescanratio;
3875 :
3876 : /*
3877 : * If we don't need mark/restore at all, we don't need materialization.
3878 : */
3879 241182 : if (path->skip_mark_restore)
3880 98752 : path->materialize_inner = false;
3881 :
3882 : /*
3883 : * Prefer materializing if it looks cheaper, unless the user has asked to
3884 : * suppress materialization.
3885 : */
3886 142430 : else if (enable_material && mat_inner_cost < bare_inner_cost)
3887 1910 : path->materialize_inner = true;
3888 :
3889 : /*
3890 : * Even if materializing doesn't look cheaper, we *must* do it if the
3891 : * inner path is to be used directly (without sorting) and it doesn't
3892 : * support mark/restore.
3893 : *
3894 : * Since the inner side must be ordered, and only Sorts and IndexScans can
3895 : * create order to begin with, and they both support mark/restore, you
3896 : * might think there's no problem --- but you'd be wrong. Nestloop and
3897 : * merge joins can *preserve* the order of their inputs, so they can be
3898 : * selected as the input of a mergejoin, and they don't support
3899 : * mark/restore at present.
3900 : *
3901 : * We don't test the value of enable_material here, because
3902 : * materialization is required for correctness in this case, and turning
3903 : * it off does not entitle us to deliver an invalid plan.
3904 : */
3905 140520 : else if (innersortkeys == NIL &&
3906 10334 : !ExecSupportsMarkRestore(inner_path))
3907 1064 : path->materialize_inner = true;
3908 :
3909 : /*
3910 : * Also, force materializing if the inner path is to be sorted and the
3911 : * sort is expected to spill to disk. This is because the final merge
3912 : * pass can be done on-the-fly if it doesn't have to support mark/restore.
3913 : * We don't try to adjust the cost estimates for this consideration,
3914 : * though.
3915 : *
3916 : * Since materialization is a performance optimization in this case,
3917 : * rather than necessary for correctness, we skip it if enable_material is
3918 : * off.
3919 : */
3920 139456 : else if (enable_material && innersortkeys != NIL &&
3921 130138 : relation_byte_size(inner_path_rows,
3922 130138 : inner_path->pathtarget->width) >
3923 130138 : (work_mem * 1024L))
3924 172 : path->materialize_inner = true;
3925 : else
3926 139284 : path->materialize_inner = false;
3927 :
3928 : /* Charge the right incremental cost for the chosen case */
3929 241182 : if (path->materialize_inner)
3930 3146 : run_cost += mat_inner_cost;
3931 : else
3932 238036 : run_cost += bare_inner_cost;
3933 :
3934 : /* CPU costs */
3935 :
3936 : /*
3937 : * The number of tuple comparisons needed is approximately number of outer
3938 : * rows plus number of inner rows plus number of rescanned tuples (can we
3939 : * refine this?). At each one, we need to evaluate the mergejoin quals.
3940 : */
3941 241182 : startup_cost += merge_qual_cost.startup;
3942 241182 : startup_cost += merge_qual_cost.per_tuple *
3943 241182 : (outer_skip_rows + inner_skip_rows * rescanratio);
3944 241182 : run_cost += merge_qual_cost.per_tuple *
3945 241182 : ((outer_rows - outer_skip_rows) +
3946 241182 : (inner_rows - inner_skip_rows) * rescanratio);
3947 :
3948 : /*
3949 : * For each tuple that gets through the mergejoin proper, we charge
3950 : * cpu_tuple_cost plus the cost of evaluating additional restriction
3951 : * clauses that are to be applied at the join. (This is pessimistic since
3952 : * not all of the quals may get evaluated at each tuple.)
3953 : *
3954 : * Note: we could adjust for SEMI/ANTI joins skipping some qual
3955 : * evaluations here, but it's probably not worth the trouble.
3956 : */
3957 241182 : startup_cost += qp_qual_cost.startup;
3958 241182 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
3959 241182 : run_cost += cpu_per_tuple * mergejointuples;
3960 :
3961 : /* tlist eval costs are paid per output row, not per tuple scanned */
3962 241182 : startup_cost += path->jpath.path.pathtarget->cost.startup;
3963 241182 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3964 :
3965 241182 : path->jpath.path.startup_cost = startup_cost;
3966 241182 : path->jpath.path.total_cost = startup_cost + run_cost;
3967 241182 : }
3968 :
3969 : /*
3970 : * run mergejoinscansel() with caching
3971 : */
3972 : static MergeScanSelCache *
3973 927368 : cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
3974 : {
3975 : MergeScanSelCache *cache;
3976 : ListCell *lc;
3977 : Selectivity leftstartsel,
3978 : leftendsel,
3979 : rightstartsel,
3980 : rightendsel;
3981 : MemoryContext oldcontext;
3982 :
3983 : /* Do we have this result already? */
3984 927410 : foreach(lc, rinfo->scansel_cache)
3985 : {
3986 835920 : cache = (MergeScanSelCache *) lfirst(lc);
3987 835920 : if (cache->opfamily == pathkey->pk_opfamily &&
3988 835920 : cache->collation == pathkey->pk_eclass->ec_collation &&
3989 835920 : cache->strategy == pathkey->pk_strategy &&
3990 835878 : cache->nulls_first == pathkey->pk_nulls_first)
3991 835878 : return cache;
3992 : }
3993 :
3994 : /* Nope, do the computation */
3995 91490 : mergejoinscansel(root,
3996 91490 : (Node *) rinfo->clause,
3997 : pathkey->pk_opfamily,
3998 : pathkey->pk_strategy,
3999 91490 : pathkey->pk_nulls_first,
4000 : &leftstartsel,
4001 : &leftendsel,
4002 : &rightstartsel,
4003 : &rightendsel);
4004 :
4005 : /* Cache the result in suitably long-lived workspace */
4006 91490 : oldcontext = MemoryContextSwitchTo(root->planner_cxt);
4007 :
4008 91490 : cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
4009 91490 : cache->opfamily = pathkey->pk_opfamily;
4010 91490 : cache->collation = pathkey->pk_eclass->ec_collation;
4011 91490 : cache->strategy = pathkey->pk_strategy;
4012 91490 : cache->nulls_first = pathkey->pk_nulls_first;
4013 91490 : cache->leftstartsel = leftstartsel;
4014 91490 : cache->leftendsel = leftendsel;
4015 91490 : cache->rightstartsel = rightstartsel;
4016 91490 : cache->rightendsel = rightendsel;
4017 :
4018 91490 : rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
4019 :
4020 91490 : MemoryContextSwitchTo(oldcontext);
4021 :
4022 91490 : return cache;
4023 : }
4024 :
4025 : /*
4026 : * initial_cost_hashjoin
4027 : * Preliminary estimate of the cost of a hashjoin path.
4028 : *
4029 : * This must quickly produce lower-bound estimates of the path's startup and
4030 : * total costs. If we are unable to eliminate the proposed path from
4031 : * consideration using the lower bounds, final_cost_hashjoin will be called
4032 : * to obtain the final estimates.
4033 : *
4034 : * The exact division of labor between this function and final_cost_hashjoin
4035 : * is private to them, and represents a tradeoff between speed of the initial
4036 : * estimate and getting a tight lower bound. We choose to not examine the
4037 : * join quals here (other than by counting the number of hash clauses),
4038 : * so we can't do much with CPU costs. We do assume that
4039 : * ExecChooseHashTableSize is cheap enough to use here.
4040 : *
4041 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
4042 : * other data to be used by final_cost_hashjoin
4043 : * 'jointype' is the type of join to be performed
4044 : * 'hashclauses' is the list of joinclauses to be used as hash clauses
4045 : * 'outer_path' is the outer input to the join
4046 : * 'inner_path' is the inner input to the join
4047 : * 'extra' contains miscellaneous information about the join
4048 : * 'parallel_hash' indicates that inner_path is partial and that a shared
4049 : * hash table will be built in parallel
4050 : */
4051 : void
4052 517688 : initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
4053 : JoinType jointype,
4054 : List *hashclauses,
4055 : Path *outer_path, Path *inner_path,
4056 : JoinPathExtraData *extra,
4057 : bool parallel_hash)
4058 : {
4059 517688 : Cost startup_cost = 0;
4060 517688 : Cost run_cost = 0;
4061 517688 : double outer_path_rows = outer_path->rows;
4062 517688 : double inner_path_rows = inner_path->rows;
4063 517688 : double inner_path_rows_total = inner_path_rows;
4064 517688 : int num_hashclauses = list_length(hashclauses);
4065 : int numbuckets;
4066 : int numbatches;
4067 : int num_skew_mcvs;
4068 : size_t space_allowed; /* unused */
4069 :
4070 : /* cost of source data */
4071 517688 : startup_cost += outer_path->startup_cost;
4072 517688 : run_cost += outer_path->total_cost - outer_path->startup_cost;
4073 517688 : startup_cost += inner_path->total_cost;
4074 :
4075 : /*
4076 : * Cost of computing hash function: must do it once per input tuple. We
4077 : * charge one cpu_operator_cost for each column's hash function. Also,
4078 : * tack on one cpu_tuple_cost per inner row, to model the costs of
4079 : * inserting the row into the hashtable.
4080 : *
4081 : * XXX when a hashclause is more complex than a single operator, we really
4082 : * should charge the extra eval costs of the left or right side, as
4083 : * appropriate, here. This seems more work than it's worth at the moment.
4084 : */
4085 517688 : startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
4086 517688 : * inner_path_rows;
4087 517688 : run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
4088 :
4089 : /*
4090 : * If this is a parallel hash build, then the value we have for
4091 : * inner_rows_total currently refers only to the rows returned by each
4092 : * participant. For shared hash table size estimation, we need the total
4093 : * number, so we need to undo the division.
4094 : */
4095 517688 : if (parallel_hash)
4096 12024 : inner_path_rows_total *= get_parallel_divisor(inner_path);
4097 :
4098 : /*
4099 : * Get hash table size that executor would use for inner relation.
4100 : *
4101 : * XXX for the moment, always assume that skew optimization will be
4102 : * performed. As long as SKEW_HASH_MEM_PERCENT is small, it's not worth
4103 : * trying to determine that for sure.
4104 : *
4105 : * XXX at some point it might be interesting to try to account for skew
4106 : * optimization in the cost estimate, but for now, we don't.
4107 : */
4108 517688 : ExecChooseHashTableSize(inner_path_rows_total,
4109 517688 : inner_path->pathtarget->width,
4110 : true, /* useskew */
4111 : parallel_hash, /* try_combined_hash_mem */
4112 : outer_path->parallel_workers,
4113 : &space_allowed,
4114 : &numbuckets,
4115 : &numbatches,
4116 : &num_skew_mcvs);
4117 :
4118 : /*
4119 : * If inner relation is too big then we will need to "batch" the join,
4120 : * which implies writing and reading most of the tuples to disk an extra
4121 : * time. Charge seq_page_cost per page, since the I/O should be nice and
4122 : * sequential. Writing the inner rel counts as startup cost, all the rest
4123 : * as run cost.
4124 : */
4125 517688 : if (numbatches > 1)
4126 : {
4127 4460 : double outerpages = page_size(outer_path_rows,
4128 4460 : outer_path->pathtarget->width);
4129 4460 : double innerpages = page_size(inner_path_rows,
4130 4460 : inner_path->pathtarget->width);
4131 :
4132 4460 : startup_cost += seq_page_cost * innerpages;
4133 4460 : run_cost += seq_page_cost * (innerpages + 2 * outerpages);
4134 : }
4135 :
4136 : /* CPU costs left for later */
4137 :
4138 : /* Public result fields */
4139 517688 : workspace->startup_cost = startup_cost;
4140 517688 : workspace->total_cost = startup_cost + run_cost;
4141 : /* Save private data for final_cost_hashjoin */
4142 517688 : workspace->run_cost = run_cost;
4143 517688 : workspace->numbuckets = numbuckets;
4144 517688 : workspace->numbatches = numbatches;
4145 517688 : workspace->inner_rows_total = inner_path_rows_total;
4146 517688 : }
4147 :
4148 : /*
4149 : * final_cost_hashjoin
4150 : * Final estimate of the cost and result size of a hashjoin path.
4151 : *
4152 : * Note: the numbatches estimate is also saved into 'path' for use later
4153 : *
4154 : * 'path' is already filled in except for the rows and cost fields and
4155 : * num_batches
4156 : * 'workspace' is the result from initial_cost_hashjoin
4157 : * 'extra' contains miscellaneous information about the join
4158 : */
4159 : void
4160 219026 : final_cost_hashjoin(PlannerInfo *root, HashPath *path,
4161 : JoinCostWorkspace *workspace,
4162 : JoinPathExtraData *extra)
4163 : {
4164 219026 : Path *outer_path = path->jpath.outerjoinpath;
4165 219026 : Path *inner_path = path->jpath.innerjoinpath;
4166 219026 : double outer_path_rows = outer_path->rows;
4167 219026 : double inner_path_rows = inner_path->rows;
4168 219026 : double inner_path_rows_total = workspace->inner_rows_total;
4169 219026 : List *hashclauses = path->path_hashclauses;
4170 219026 : Cost startup_cost = workspace->startup_cost;
4171 219026 : Cost run_cost = workspace->run_cost;
4172 219026 : int numbuckets = workspace->numbuckets;
4173 219026 : int numbatches = workspace->numbatches;
4174 : Cost cpu_per_tuple;
4175 : QualCost hash_qual_cost;
4176 : QualCost qp_qual_cost;
4177 : double hashjointuples;
4178 : double virtualbuckets;
4179 : Selectivity innerbucketsize;
4180 : Selectivity innermcvfreq;
4181 : ListCell *hcl;
4182 :
4183 : /* Mark the path with the correct row estimate */
4184 219026 : if (path->jpath.path.param_info)
4185 1326 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
4186 : else
4187 217700 : path->jpath.path.rows = path->jpath.path.parent->rows;
4188 :
4189 : /* For partial paths, scale row estimate. */
4190 219026 : if (path->jpath.path.parallel_workers > 0)
4191 : {
4192 10938 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
4193 :
4194 10938 : path->jpath.path.rows =
4195 10938 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
4196 : }
4197 :
4198 : /*
4199 : * We could include disable_cost in the preliminary estimate, but that
4200 : * would amount to optimizing for the case where the join method is
4201 : * disabled, which doesn't seem like the way to bet.
4202 : */
4203 219026 : if (!enable_hashjoin)
4204 204 : startup_cost += disable_cost;
4205 :
4206 : /* mark the path with estimated # of batches */
4207 219026 : path->num_batches = numbatches;
4208 :
4209 : /* store the total number of tuples (sum of partial row estimates) */
4210 219026 : path->inner_rows_total = inner_path_rows_total;
4211 :
4212 : /* and compute the number of "virtual" buckets in the whole join */
4213 219026 : virtualbuckets = (double) numbuckets * (double) numbatches;
4214 :
4215 : /*
4216 : * Determine bucketsize fraction and MCV frequency for the inner relation.
4217 : * We use the smallest bucketsize or MCV frequency estimated for any
4218 : * individual hashclause; this is undoubtedly conservative.
4219 : *
4220 : * BUT: if inner relation has been unique-ified, we can assume it's good
4221 : * for hashing. This is important both because it's the right answer, and
4222 : * because we avoid contaminating the cache with a value that's wrong for
4223 : * non-unique-ified paths.
4224 : */
4225 219026 : if (IsA(inner_path, UniquePath))
4226 : {
4227 1922 : innerbucketsize = 1.0 / virtualbuckets;
4228 1922 : innermcvfreq = 0.0;
4229 : }
4230 : else
4231 : {
4232 217104 : innerbucketsize = 1.0;
4233 217104 : innermcvfreq = 1.0;
4234 454726 : foreach(hcl, hashclauses)
4235 : {
4236 237622 : RestrictInfo *restrictinfo = lfirst_node(RestrictInfo, hcl);
4237 : Selectivity thisbucketsize;
4238 : Selectivity thismcvfreq;
4239 :
4240 : /*
4241 : * First we have to figure out which side of the hashjoin clause
4242 : * is the inner side.
4243 : *
4244 : * Since we tend to visit the same clauses over and over when
4245 : * planning a large query, we cache the bucket stats estimates in
4246 : * the RestrictInfo node to avoid repeated lookups of statistics.
4247 : */
4248 237622 : if (bms_is_subset(restrictinfo->right_relids,
4249 237622 : inner_path->parent->relids))
4250 : {
4251 : /* righthand side is inner */
4252 125986 : thisbucketsize = restrictinfo->right_bucketsize;
4253 125986 : if (thisbucketsize < 0)
4254 : {
4255 : /* not cached yet */
4256 69248 : estimate_hash_bucket_stats(root,
4257 69248 : get_rightop(restrictinfo->clause),
4258 : virtualbuckets,
4259 : &restrictinfo->right_mcvfreq,
4260 : &restrictinfo->right_bucketsize);
4261 69248 : thisbucketsize = restrictinfo->right_bucketsize;
4262 : }
4263 125986 : thismcvfreq = restrictinfo->right_mcvfreq;
4264 : }
4265 : else
4266 : {
4267 : Assert(bms_is_subset(restrictinfo->left_relids,
4268 : inner_path->parent->relids));
4269 : /* lefthand side is inner */
4270 111636 : thisbucketsize = restrictinfo->left_bucketsize;
4271 111636 : if (thisbucketsize < 0)
4272 : {
4273 : /* not cached yet */
4274 60188 : estimate_hash_bucket_stats(root,
4275 60188 : get_leftop(restrictinfo->clause),
4276 : virtualbuckets,
4277 : &restrictinfo->left_mcvfreq,
4278 : &restrictinfo->left_bucketsize);
4279 60188 : thisbucketsize = restrictinfo->left_bucketsize;
4280 : }
4281 111636 : thismcvfreq = restrictinfo->left_mcvfreq;
4282 : }
4283 :
4284 237622 : if (innerbucketsize > thisbucketsize)
4285 154394 : innerbucketsize = thisbucketsize;
4286 237622 : if (innermcvfreq > thismcvfreq)
4287 221366 : innermcvfreq = thismcvfreq;
4288 : }
4289 : }
4290 :
4291 : /*
4292 : * If the bucket holding the inner MCV would exceed hash_mem, we don't
4293 : * want to hash unless there is really no other alternative, so apply
4294 : * disable_cost. (The executor normally copes with excessive memory usage
4295 : * by splitting batches, but obviously it cannot separate equal values
4296 : * that way, so it will be unable to drive the batch size below hash_mem
4297 : * when this is true.)
4298 : */
4299 219026 : if (relation_byte_size(clamp_row_est(inner_path_rows * innermcvfreq),
4300 438052 : inner_path->pathtarget->width) > get_hash_memory_limit())
4301 0 : startup_cost += disable_cost;
4302 :
4303 : /*
4304 : * Compute cost of the hashquals and qpquals (other restriction clauses)
4305 : * separately.
4306 : */
4307 219026 : cost_qual_eval(&hash_qual_cost, hashclauses, root);
4308 219026 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
4309 219026 : qp_qual_cost.startup -= hash_qual_cost.startup;
4310 219026 : qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
4311 :
4312 : /* CPU costs */
4313 :
4314 219026 : if (path->jpath.jointype == JOIN_SEMI ||
4315 216152 : path->jpath.jointype == JOIN_ANTI ||
4316 212032 : extra->inner_unique)
4317 93448 : {
4318 : double outer_matched_rows;
4319 : Selectivity inner_scan_frac;
4320 :
4321 : /*
4322 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
4323 : * executor will stop after the first match.
4324 : *
4325 : * For an outer-rel row that has at least one match, we can expect the
4326 : * bucket scan to stop after a fraction 1/(match_count+1) of the
4327 : * bucket's rows, if the matches are evenly distributed. Since they
4328 : * probably aren't quite evenly distributed, we apply a fuzz factor of
4329 : * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
4330 : * to clamp inner_scan_frac to at most 1.0; but since match_count is
4331 : * at least 1, no such clamp is needed now.)
4332 : */
4333 93448 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
4334 93448 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
4335 :
4336 93448 : startup_cost += hash_qual_cost.startup;
4337 186896 : run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
4338 93448 : clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
4339 :
4340 : /*
4341 : * For unmatched outer-rel rows, the picture is quite a lot different.
4342 : * In the first place, there is no reason to assume that these rows
4343 : * preferentially hit heavily-populated buckets; instead assume they
4344 : * are uncorrelated with the inner distribution and so they see an
4345 : * average bucket size of inner_path_rows / virtualbuckets. In the
4346 : * second place, it seems likely that they will have few if any exact
4347 : * hash-code matches and so very few of the tuples in the bucket will
4348 : * actually require eval of the hash quals. We don't have any good
4349 : * way to estimate how many will, but for the moment assume that the
4350 : * effective cost per bucket entry is one-tenth what it is for
4351 : * matchable tuples.
4352 : */
4353 186896 : run_cost += hash_qual_cost.per_tuple *
4354 186896 : (outer_path_rows - outer_matched_rows) *
4355 93448 : clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
4356 :
4357 : /* Get # of tuples that will pass the basic join */
4358 93448 : if (path->jpath.jointype == JOIN_ANTI)
4359 4120 : hashjointuples = outer_path_rows - outer_matched_rows;
4360 : else
4361 89328 : hashjointuples = outer_matched_rows;
4362 : }
4363 : else
4364 : {
4365 : /*
4366 : * The number of tuple comparisons needed is the number of outer
4367 : * tuples times the typical number of tuples in a hash bucket, which
4368 : * is the inner relation size times its bucketsize fraction. At each
4369 : * one, we need to evaluate the hashjoin quals. But actually,
4370 : * charging the full qual eval cost at each tuple is pessimistic,
4371 : * since we don't evaluate the quals unless the hash values match
4372 : * exactly. For lack of a better idea, halve the cost estimate to
4373 : * allow for that.
4374 : */
4375 125578 : startup_cost += hash_qual_cost.startup;
4376 251156 : run_cost += hash_qual_cost.per_tuple * outer_path_rows *
4377 125578 : clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
4378 :
4379 : /*
4380 : * Get approx # tuples passing the hashquals. We use
4381 : * approx_tuple_count here because we need an estimate done with
4382 : * JOIN_INNER semantics.
4383 : */
4384 125578 : hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
4385 : }
4386 :
4387 : /*
4388 : * For each tuple that gets through the hashjoin proper, we charge
4389 : * cpu_tuple_cost plus the cost of evaluating additional restriction
4390 : * clauses that are to be applied at the join. (This is pessimistic since
4391 : * not all of the quals may get evaluated at each tuple.)
4392 : */
4393 219026 : startup_cost += qp_qual_cost.startup;
4394 219026 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
4395 219026 : run_cost += cpu_per_tuple * hashjointuples;
4396 :
4397 : /* tlist eval costs are paid per output row, not per tuple scanned */
4398 219026 : startup_cost += path->jpath.path.pathtarget->cost.startup;
4399 219026 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
4400 :
4401 219026 : path->jpath.path.startup_cost = startup_cost;
4402 219026 : path->jpath.path.total_cost = startup_cost + run_cost;
4403 219026 : }
4404 :
4405 :
4406 : /*
4407 : * cost_subplan
4408 : * Figure the costs for a SubPlan (or initplan).
4409 : *
4410 : * Note: we could dig the subplan's Plan out of the root list, but in practice
4411 : * all callers have it handy already, so we make them pass it.
4412 : */
4413 : void
4414 35742 : cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
4415 : {
4416 : QualCost sp_cost;
4417 :
4418 : /* Figure any cost for evaluating the testexpr */
4419 35742 : cost_qual_eval(&sp_cost,
4420 35742 : make_ands_implicit((Expr *) subplan->testexpr),
4421 : root);
4422 :
4423 35742 : if (subplan->useHashTable)
4424 : {
4425 : /*
4426 : * If we are using a hash table for the subquery outputs, then the
4427 : * cost of evaluating the query is a one-time cost. We charge one
4428 : * cpu_operator_cost per tuple for the work of loading the hashtable,
4429 : * too.
4430 : */
4431 1890 : sp_cost.startup += plan->total_cost +
4432 1890 : cpu_operator_cost * plan->plan_rows;
4433 :
4434 : /*
4435 : * The per-tuple costs include the cost of evaluating the lefthand
4436 : * expressions, plus the cost of probing the hashtable. We already
4437 : * accounted for the lefthand expressions as part of the testexpr, and
4438 : * will also have counted one cpu_operator_cost for each comparison
4439 : * operator. That is probably too low for the probing cost, but it's
4440 : * hard to make a better estimate, so live with it for now.
4441 : */
4442 : }
4443 : else
4444 : {
4445 : /*
4446 : * Otherwise we will be rescanning the subplan output on each
4447 : * evaluation. We need to estimate how much of the output we will
4448 : * actually need to scan. NOTE: this logic should agree with the
4449 : * tuple_fraction estimates used by make_subplan() in
4450 : * plan/subselect.c.
4451 : */
4452 33852 : Cost plan_run_cost = plan->total_cost - plan->startup_cost;
4453 :
4454 33852 : if (subplan->subLinkType == EXISTS_SUBLINK)
4455 : {
4456 : /* we only need to fetch 1 tuple; clamp to avoid zero divide */
4457 2036 : sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
4458 : }
4459 31816 : else if (subplan->subLinkType == ALL_SUBLINK ||
4460 31798 : subplan->subLinkType == ANY_SUBLINK)
4461 : {
4462 : /* assume we need 50% of the tuples */
4463 100 : sp_cost.per_tuple += 0.50 * plan_run_cost;
4464 : /* also charge a cpu_operator_cost per row examined */
4465 100 : sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
4466 : }
4467 : else
4468 : {
4469 : /* assume we need all tuples */
4470 31716 : sp_cost.per_tuple += plan_run_cost;
4471 : }
4472 :
4473 : /*
4474 : * Also account for subplan's startup cost. If the subplan is
4475 : * uncorrelated or undirect correlated, AND its topmost node is one
4476 : * that materializes its output, assume that we'll only need to pay
4477 : * its startup cost once; otherwise assume we pay the startup cost
4478 : * every time.
4479 : */
4480 45044 : if (subplan->parParam == NIL &&
4481 11192 : ExecMaterializesOutput(nodeTag(plan)))
4482 504 : sp_cost.startup += plan->startup_cost;
4483 : else
4484 33348 : sp_cost.per_tuple += plan->startup_cost;
4485 : }
4486 :
4487 35742 : subplan->startup_cost = sp_cost.startup;
4488 35742 : subplan->per_call_cost = sp_cost.per_tuple;
4489 35742 : }
4490 :
4491 :
4492 : /*
4493 : * cost_rescan
4494 : * Given a finished Path, estimate the costs of rescanning it after
4495 : * having done so the first time. For some Path types a rescan is
4496 : * cheaper than an original scan (if no parameters change), and this
4497 : * function embodies knowledge about that. The default is to return
4498 : * the same costs stored in the Path. (Note that the cost estimates
4499 : * actually stored in Paths are always for first scans.)
4500 : *
4501 : * This function is not currently intended to model effects such as rescans
4502 : * being cheaper due to disk block caching; what we are concerned with is
4503 : * plan types wherein the executor caches results explicitly, or doesn't
4504 : * redo startup calculations, etc.
4505 : */
4506 : static void
4507 2167696 : cost_rescan(PlannerInfo *root, Path *path,
4508 : Cost *rescan_startup_cost, /* output parameters */
4509 : Cost *rescan_total_cost)
4510 : {
4511 2167696 : switch (path->pathtype)
4512 : {
4513 37762 : case T_FunctionScan:
4514 :
4515 : /*
4516 : * Currently, nodeFunctionscan.c always executes the function to
4517 : * completion before returning any rows, and caches the results in
4518 : * a tuplestore. So the function eval cost is all startup cost
4519 : * and isn't paid over again on rescans. However, all run costs
4520 : * will be paid over again.
4521 : */
4522 37762 : *rescan_startup_cost = 0;
4523 37762 : *rescan_total_cost = path->total_cost - path->startup_cost;
4524 37762 : break;
4525 99582 : case T_HashJoin:
4526 :
4527 : /*
4528 : * If it's a single-batch join, we don't need to rebuild the hash
4529 : * table during a rescan.
4530 : */
4531 99582 : if (((HashPath *) path)->num_batches == 1)
4532 : {
4533 : /* Startup cost is exactly the cost of hash table building */
4534 99582 : *rescan_startup_cost = 0;
4535 99582 : *rescan_total_cost = path->total_cost - path->startup_cost;
4536 : }
4537 : else
4538 : {
4539 : /* Otherwise, no special treatment */
4540 0 : *rescan_startup_cost = path->startup_cost;
4541 0 : *rescan_total_cost = path->total_cost;
4542 : }
4543 99582 : break;
4544 6660 : case T_CteScan:
4545 : case T_WorkTableScan:
4546 : {
4547 : /*
4548 : * These plan types materialize their final result in a
4549 : * tuplestore or tuplesort object. So the rescan cost is only
4550 : * cpu_tuple_cost per tuple, unless the result is large enough
4551 : * to spill to disk.
4552 : */
4553 6660 : Cost run_cost = cpu_tuple_cost * path->rows;
4554 6660 : double nbytes = relation_byte_size(path->rows,
4555 6660 : path->pathtarget->width);
4556 6660 : long work_mem_bytes = work_mem * 1024L;
4557 :
4558 6660 : if (nbytes > work_mem_bytes)
4559 : {
4560 : /* It will spill, so account for re-read cost */
4561 160 : double npages = ceil(nbytes / BLCKSZ);
4562 :
4563 160 : run_cost += seq_page_cost * npages;
4564 : }
4565 6660 : *rescan_startup_cost = 0;
4566 6660 : *rescan_total_cost = run_cost;
4567 : }
4568 6660 : break;
4569 739672 : case T_Material:
4570 : case T_Sort:
4571 : {
4572 : /*
4573 : * These plan types not only materialize their results, but do
4574 : * not implement qual filtering or projection. So they are
4575 : * even cheaper to rescan than the ones above. We charge only
4576 : * cpu_operator_cost per tuple. (Note: keep that in sync with
4577 : * the run_cost charge in cost_sort, and also see comments in
4578 : * cost_material before you change it.)
4579 : */
4580 739672 : Cost run_cost = cpu_operator_cost * path->rows;
4581 739672 : double nbytes = relation_byte_size(path->rows,
4582 739672 : path->pathtarget->width);
4583 739672 : long work_mem_bytes = work_mem * 1024L;
4584 :
4585 739672 : if (nbytes > work_mem_bytes)
4586 : {
4587 : /* It will spill, so account for re-read cost */
4588 9322 : double npages = ceil(nbytes / BLCKSZ);
4589 :
4590 9322 : run_cost += seq_page_cost * npages;
4591 : }
4592 739672 : *rescan_startup_cost = 0;
4593 739672 : *rescan_total_cost = run_cost;
4594 : }
4595 739672 : break;
4596 221440 : case T_Memoize:
4597 : /* All the hard work is done by cost_memoize_rescan */
4598 221440 : cost_memoize_rescan(root, (MemoizePath *) path,
4599 : rescan_startup_cost, rescan_total_cost);
4600 221440 : break;
4601 1062580 : default:
4602 1062580 : *rescan_startup_cost = path->startup_cost;
4603 1062580 : *rescan_total_cost = path->total_cost;
4604 1062580 : break;
4605 : }
4606 2167696 : }
4607 :
4608 :
4609 : /*
4610 : * cost_qual_eval
4611 : * Estimate the CPU costs of evaluating a WHERE clause.
4612 : * The input can be either an implicitly-ANDed list of boolean
4613 : * expressions, or a list of RestrictInfo nodes. (The latter is
4614 : * preferred since it allows caching of the results.)
4615 : * The result includes both a one-time (startup) component,
4616 : * and a per-evaluation component.
4617 : */
4618 : void
4619 3071096 : cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
4620 : {
4621 : cost_qual_eval_context context;
4622 : ListCell *l;
4623 :
4624 3071096 : context.root = root;
4625 3071096 : context.total.startup = 0;
4626 3071096 : context.total.per_tuple = 0;
4627 :
4628 : /* We don't charge any cost for the implicit ANDing at top level ... */
4629 :
4630 5713912 : foreach(l, quals)
4631 : {
4632 2642816 : Node *qual = (Node *) lfirst(l);
4633 :
4634 2642816 : cost_qual_eval_walker(qual, &context);
4635 : }
4636 :
4637 3071096 : *cost = context.total;
4638 3071096 : }
4639 :
4640 : /*
4641 : * cost_qual_eval_node
4642 : * As above, for a single RestrictInfo or expression.
4643 : */
4644 : void
4645 1493392 : cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
4646 : {
4647 : cost_qual_eval_context context;
4648 :
4649 1493392 : context.root = root;
4650 1493392 : context.total.startup = 0;
4651 1493392 : context.total.per_tuple = 0;
4652 :
4653 1493392 : cost_qual_eval_walker(qual, &context);
4654 :
4655 1493392 : *cost = context.total;
4656 1493392 : }
4657 :
4658 : static bool
4659 7008500 : cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
4660 : {
4661 7008500 : if (node == NULL)
4662 84566 : return false;
4663 :
4664 : /*
4665 : * RestrictInfo nodes contain an eval_cost field reserved for this
4666 : * routine's use, so that it's not necessary to evaluate the qual clause's
4667 : * cost more than once. If the clause's cost hasn't been computed yet,
4668 : * the field's startup value will contain -1.
4669 : */
4670 6923934 : if (IsA(node, RestrictInfo))
4671 : {
4672 2776310 : RestrictInfo *rinfo = (RestrictInfo *) node;
4673 :
4674 2776310 : if (rinfo->eval_cost.startup < 0)
4675 : {
4676 : cost_qual_eval_context locContext;
4677 :
4678 477268 : locContext.root = context->root;
4679 477268 : locContext.total.startup = 0;
4680 477268 : locContext.total.per_tuple = 0;
4681 :
4682 : /*
4683 : * For an OR clause, recurse into the marked-up tree so that we
4684 : * set the eval_cost for contained RestrictInfos too.
4685 : */
4686 477268 : if (rinfo->orclause)
4687 7788 : cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
4688 : else
4689 469480 : cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
4690 :
4691 : /*
4692 : * If the RestrictInfo is marked pseudoconstant, it will be tested
4693 : * only once, so treat its cost as all startup cost.
4694 : */
4695 477268 : if (rinfo->pseudoconstant)
4696 : {
4697 : /* count one execution during startup */
4698 7496 : locContext.total.startup += locContext.total.per_tuple;
4699 7496 : locContext.total.per_tuple = 0;
4700 : }
4701 477268 : rinfo->eval_cost = locContext.total;
4702 : }
4703 2776310 : context->total.startup += rinfo->eval_cost.startup;
4704 2776310 : context->total.per_tuple += rinfo->eval_cost.per_tuple;
4705 : /* do NOT recurse into children */
4706 2776310 : return false;
4707 : }
4708 :
4709 : /*
4710 : * For each operator or function node in the given tree, we charge the
4711 : * estimated execution cost given by pg_proc.procost (remember to multiply
4712 : * this by cpu_operator_cost).
4713 : *
4714 : * Vars and Consts are charged zero, and so are boolean operators (AND,
4715 : * OR, NOT). Simplistic, but a lot better than no model at all.
4716 : *
4717 : * Should we try to account for the possibility of short-circuit
4718 : * evaluation of AND/OR? Probably *not*, because that would make the
4719 : * results depend on the clause ordering, and we are not in any position
4720 : * to expect that the current ordering of the clauses is the one that's
4721 : * going to end up being used. The above per-RestrictInfo caching would
4722 : * not mix well with trying to re-order clauses anyway.
4723 : *
4724 : * Another issue that is entirely ignored here is that if a set-returning
4725 : * function is below top level in the tree, the functions/operators above
4726 : * it will need to be evaluated multiple times. In practical use, such
4727 : * cases arise so seldom as to not be worth the added complexity needed;
4728 : * moreover, since our rowcount estimates for functions tend to be pretty
4729 : * phony, the results would also be pretty phony.
4730 : */
4731 4147624 : if (IsA(node, FuncExpr))
4732 : {
4733 290102 : add_function_cost(context->root, ((FuncExpr *) node)->funcid, node,
4734 : &context->total);
4735 : }
4736 3857522 : else if (IsA(node, OpExpr) ||
4737 3303974 : IsA(node, DistinctExpr) ||
4738 3303104 : IsA(node, NullIfExpr))
4739 : {
4740 : /* rely on struct equivalence to treat these all alike */
4741 554524 : set_opfuncid((OpExpr *) node);
4742 554524 : add_function_cost(context->root, ((OpExpr *) node)->opfuncid, node,
4743 : &context->total);
4744 : }
4745 3302998 : else if (IsA(node, ScalarArrayOpExpr))
4746 : {
4747 34422 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
4748 34422 : Node *arraynode = (Node *) lsecond(saop->args);
4749 : QualCost sacosts;
4750 : QualCost hcosts;
4751 34422 : double estarraylen = estimate_array_length(context->root, arraynode);
4752 :
4753 34422 : set_sa_opfuncid(saop);
4754 34422 : sacosts.startup = sacosts.per_tuple = 0;
4755 34422 : add_function_cost(context->root, saop->opfuncid, NULL,
4756 : &sacosts);
4757 :
4758 34422 : if (OidIsValid(saop->hashfuncid))
4759 : {
4760 : /* Handle costs for hashed ScalarArrayOpExpr */
4761 266 : hcosts.startup = hcosts.per_tuple = 0;
4762 :
4763 266 : add_function_cost(context->root, saop->hashfuncid, NULL, &hcosts);
4764 266 : context->total.startup += sacosts.startup + hcosts.startup;
4765 :
4766 : /* Estimate the cost of building the hashtable. */
4767 266 : context->total.startup += estarraylen * hcosts.per_tuple;
4768 :
4769 : /*
4770 : * XXX should we charge a little bit for sacosts.per_tuple when
4771 : * building the table, or is it ok to assume there will be zero
4772 : * hash collision?
4773 : */
4774 :
4775 : /*
4776 : * Charge for hashtable lookups. Charge a single hash and a
4777 : * single comparison.
4778 : */
4779 266 : context->total.per_tuple += hcosts.per_tuple + sacosts.per_tuple;
4780 : }
4781 : else
4782 : {
4783 : /*
4784 : * Estimate that the operator will be applied to about half of the
4785 : * array elements before the answer is determined.
4786 : */
4787 34156 : context->total.startup += sacosts.startup;
4788 68312 : context->total.per_tuple += sacosts.per_tuple *
4789 34156 : estimate_array_length(context->root, arraynode) * 0.5;
4790 : }
4791 : }
4792 3268576 : else if (IsA(node, Aggref) ||
4793 3219506 : IsA(node, WindowFunc))
4794 : {
4795 : /*
4796 : * Aggref and WindowFunc nodes are (and should be) treated like Vars,
4797 : * ie, zero execution cost in the current model, because they behave
4798 : * essentially like Vars at execution. We disregard the costs of
4799 : * their input expressions for the same reason. The actual execution
4800 : * costs of the aggregate/window functions and their arguments have to
4801 : * be factored into plan-node-specific costing of the Agg or WindowAgg
4802 : * plan node.
4803 : */
4804 52426 : return false; /* don't recurse into children */
4805 : }
4806 3216150 : else if (IsA(node, GroupingFunc))
4807 : {
4808 : /* Treat this as having cost 1 */
4809 350 : context->total.per_tuple += cpu_operator_cost;
4810 350 : return false; /* don't recurse into children */
4811 : }
4812 3215800 : else if (IsA(node, CoerceViaIO))
4813 : {
4814 18778 : CoerceViaIO *iocoerce = (CoerceViaIO *) node;
4815 : Oid iofunc;
4816 : Oid typioparam;
4817 : bool typisvarlena;
4818 :
4819 : /* check the result type's input function */
4820 18778 : getTypeInputInfo(iocoerce->resulttype,
4821 : &iofunc, &typioparam);
4822 18778 : add_function_cost(context->root, iofunc, NULL,
4823 : &context->total);
4824 : /* check the input type's output function */
4825 18778 : getTypeOutputInfo(exprType((Node *) iocoerce->arg),
4826 : &iofunc, &typisvarlena);
4827 18778 : add_function_cost(context->root, iofunc, NULL,
4828 : &context->total);
4829 : }
4830 3197022 : else if (IsA(node, ArrayCoerceExpr))
4831 : {
4832 4124 : ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
4833 : QualCost perelemcost;
4834 :
4835 4124 : cost_qual_eval_node(&perelemcost, (Node *) acoerce->elemexpr,
4836 : context->root);
4837 4124 : context->total.startup += perelemcost.startup;
4838 4124 : if (perelemcost.per_tuple > 0)
4839 58 : context->total.per_tuple += perelemcost.per_tuple *
4840 58 : estimate_array_length(context->root, (Node *) acoerce->arg);
4841 : }
4842 3192898 : else if (IsA(node, RowCompareExpr))
4843 : {
4844 : /* Conservatively assume we will check all the columns */
4845 156 : RowCompareExpr *rcexpr = (RowCompareExpr *) node;
4846 : ListCell *lc;
4847 :
4848 522 : foreach(lc, rcexpr->opnos)
4849 : {
4850 366 : Oid opid = lfirst_oid(lc);
4851 :
4852 366 : add_function_cost(context->root, get_opcode(opid), NULL,
4853 : &context->total);
4854 : }
4855 : }
4856 3192742 : else if (IsA(node, MinMaxExpr) ||
4857 3192558 : IsA(node, SQLValueFunction) ||
4858 3188316 : IsA(node, XmlExpr) ||
4859 3187626 : IsA(node, CoerceToDomain) ||
4860 3179018 : IsA(node, NextValueExpr) ||
4861 3178548 : IsA(node, JsonExpr))
4862 : {
4863 : /* Treat all these as having cost 1 */
4864 16520 : context->total.per_tuple += cpu_operator_cost;
4865 : }
4866 3176222 : else if (IsA(node, SubLink))
4867 : {
4868 : /* This routine should not be applied to un-planned expressions */
4869 0 : elog(ERROR, "cannot handle unplanned sub-select");
4870 : }
4871 3176222 : else if (IsA(node, SubPlan))
4872 : {
4873 : /*
4874 : * A subplan node in an expression typically indicates that the
4875 : * subplan will be executed on each evaluation, so charge accordingly.
4876 : * (Sub-selects that can be executed as InitPlans have already been
4877 : * removed from the expression.)
4878 : */
4879 34330 : SubPlan *subplan = (SubPlan *) node;
4880 :
4881 34330 : context->total.startup += subplan->startup_cost;
4882 34330 : context->total.per_tuple += subplan->per_call_cost;
4883 :
4884 : /*
4885 : * We don't want to recurse into the testexpr, because it was already
4886 : * counted in the SubPlan node's costs. So we're done.
4887 : */
4888 34330 : return false;
4889 : }
4890 3141892 : else if (IsA(node, AlternativeSubPlan))
4891 : {
4892 : /*
4893 : * Arbitrarily use the first alternative plan for costing. (We should
4894 : * certainly only include one alternative, and we don't yet have
4895 : * enough information to know which one the executor is most likely to
4896 : * use.)
4897 : */
4898 1618 : AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
4899 :
4900 1618 : return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
4901 : context);
4902 : }
4903 3140274 : else if (IsA(node, PlaceHolderVar))
4904 : {
4905 : /*
4906 : * A PlaceHolderVar should be given cost zero when considering general
4907 : * expression evaluation costs. The expense of doing the contained
4908 : * expression is charged as part of the tlist eval costs of the scan
4909 : * or join where the PHV is first computed (see set_rel_width and
4910 : * add_placeholders_to_joinrel). If we charged it again here, we'd be
4911 : * double-counting the cost for each level of plan that the PHV
4912 : * bubbles up through. Hence, return without recursing into the
4913 : * phexpr.
4914 : */
4915 2520 : return false;
4916 : }
4917 :
4918 : /* recurse into children */
4919 4056380 : return expression_tree_walker(node, cost_qual_eval_walker,
4920 : (void *) context);
4921 : }
4922 :
4923 : /*
4924 : * get_restriction_qual_cost
4925 : * Compute evaluation costs of a baserel's restriction quals, plus any
4926 : * movable join quals that have been pushed down to the scan.
4927 : * Results are returned into *qpqual_cost.
4928 : *
4929 : * This is a convenience subroutine that works for seqscans and other cases
4930 : * where all the given quals will be evaluated the hard way. It's not useful
4931 : * for cost_index(), for example, where the index machinery takes care of
4932 : * some of the quals. We assume baserestrictcost was previously set by
4933 : * set_baserel_size_estimates().
4934 : */
4935 : static void
4936 883338 : get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
4937 : ParamPathInfo *param_info,
4938 : QualCost *qpqual_cost)
4939 : {
4940 883338 : if (param_info)
4941 : {
4942 : /* Include costs of pushed-down clauses */
4943 184088 : cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
4944 :
4945 184088 : qpqual_cost->startup += baserel->baserestrictcost.startup;
4946 184088 : qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
4947 : }
4948 : else
4949 699250 : *qpqual_cost = baserel->baserestrictcost;
4950 883338 : }
4951 :
4952 :
4953 : /*
4954 : * compute_semi_anti_join_factors
4955 : * Estimate how much of the inner input a SEMI, ANTI, or inner_unique join
4956 : * can be expected to scan.
4957 : *
4958 : * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
4959 : * inner rows as soon as it finds a match to the current outer row.
4960 : * The same happens if we have detected the inner rel is unique.
4961 : * We should therefore adjust some of the cost components for this effect.
4962 : * This function computes some estimates needed for these adjustments.
4963 : * These estimates will be the same regardless of the particular paths used
4964 : * for the outer and inner relation, so we compute these once and then pass
4965 : * them to all the join cost estimation functions.
4966 : *
4967 : * Input parameters:
4968 : * joinrel: join relation under consideration
4969 : * outerrel: outer relation under consideration
4970 : * innerrel: inner relation under consideration
4971 : * jointype: if not JOIN_SEMI or JOIN_ANTI, we assume it's inner_unique
4972 : * sjinfo: SpecialJoinInfo relevant to this join
4973 : * restrictlist: join quals
4974 : * Output parameters:
4975 : * *semifactors is filled in (see pathnodes.h for field definitions)
4976 : */
4977 : void
4978 162044 : compute_semi_anti_join_factors(PlannerInfo *root,
4979 : RelOptInfo *joinrel,
4980 : RelOptInfo *outerrel,
4981 : RelOptInfo *innerrel,
4982 : JoinType jointype,
4983 : SpecialJoinInfo *sjinfo,
4984 : List *restrictlist,
4985 : SemiAntiJoinFactors *semifactors)
4986 : {
4987 : Selectivity jselec;
4988 : Selectivity nselec;
4989 : Selectivity avgmatch;
4990 : SpecialJoinInfo norm_sjinfo;
4991 : List *joinquals;
4992 : ListCell *l;
4993 :
4994 : /*
4995 : * In an ANTI join, we must ignore clauses that are "pushed down", since
4996 : * those won't affect the match logic. In a SEMI join, we do not
4997 : * distinguish joinquals from "pushed down" quals, so just use the whole
4998 : * restrictinfo list. For other outer join types, we should consider only
4999 : * non-pushed-down quals, so that this devolves to an IS_OUTER_JOIN check.
5000 : */
5001 162044 : if (IS_OUTER_JOIN(jointype))
5002 : {
5003 62022 : joinquals = NIL;
5004 135398 : foreach(l, restrictlist)
5005 : {
5006 73376 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
5007 :
5008 73376 : if (!RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5009 69462 : joinquals = lappend(joinquals, rinfo);
5010 : }
5011 : }
5012 : else
5013 100022 : joinquals = restrictlist;
5014 :
5015 : /*
5016 : * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
5017 : */
5018 162044 : jselec = clauselist_selectivity(root,
5019 : joinquals,
5020 : 0,
5021 : (jointype == JOIN_ANTI) ? JOIN_ANTI : JOIN_SEMI,
5022 : sjinfo);
5023 :
5024 : /*
5025 : * Also get the normal inner-join selectivity of the join clauses.
5026 : */
5027 162044 : init_dummy_sjinfo(&norm_sjinfo, outerrel->relids, innerrel->relids);
5028 :
5029 162044 : nselec = clauselist_selectivity(root,
5030 : joinquals,
5031 : 0,
5032 : JOIN_INNER,
5033 : &norm_sjinfo);
5034 :
5035 : /* Avoid leaking a lot of ListCells */
5036 162044 : if (IS_OUTER_JOIN(jointype))
5037 62022 : list_free(joinquals);
5038 :
5039 : /*
5040 : * jselec can be interpreted as the fraction of outer-rel rows that have
5041 : * any matches (this is true for both SEMI and ANTI cases). And nselec is
5042 : * the fraction of the Cartesian product that matches. So, the average
5043 : * number of matches for each outer-rel row that has at least one match is
5044 : * nselec * inner_rows / jselec.
5045 : *
5046 : * Note: it is correct to use the inner rel's "rows" count here, even
5047 : * though we might later be considering a parameterized inner path with
5048 : * fewer rows. This is because we have included all the join clauses in
5049 : * the selectivity estimate.
5050 : */
5051 162044 : if (jselec > 0) /* protect against zero divide */
5052 : {
5053 161704 : avgmatch = nselec * innerrel->rows / jselec;
5054 : /* Clamp to sane range */
5055 161704 : avgmatch = Max(1.0, avgmatch);
5056 : }
5057 : else
5058 340 : avgmatch = 1.0;
5059 :
5060 162044 : semifactors->outer_match_frac = jselec;
5061 162044 : semifactors->match_count = avgmatch;
5062 162044 : }
5063 :
5064 : /*
5065 : * has_indexed_join_quals
5066 : * Check whether all the joinquals of a nestloop join are used as
5067 : * inner index quals.
5068 : *
5069 : * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
5070 : * indexscan) that uses all the joinquals as indexquals, we can assume that an
5071 : * unmatched outer tuple is cheap to process, whereas otherwise it's probably
5072 : * expensive.
5073 : */
5074 : static bool
5075 680072 : has_indexed_join_quals(NestPath *path)
5076 : {
5077 680072 : JoinPath *joinpath = &path->jpath;
5078 680072 : Relids joinrelids = joinpath->path.parent->relids;
5079 680072 : Path *innerpath = joinpath->innerjoinpath;
5080 : List *indexclauses;
5081 : bool found_one;
5082 : ListCell *lc;
5083 :
5084 : /* If join still has quals to evaluate, it's not fast */
5085 680072 : if (joinpath->joinrestrictinfo != NIL)
5086 476548 : return false;
5087 : /* Nor if the inner path isn't parameterized at all */
5088 203524 : if (innerpath->param_info == NULL)
5089 4800 : return false;
5090 :
5091 : /* Find the indexclauses list for the inner scan */
5092 198724 : switch (innerpath->pathtype)
5093 : {
5094 122430 : case T_IndexScan:
5095 : case T_IndexOnlyScan:
5096 122430 : indexclauses = ((IndexPath *) innerpath)->indexclauses;
5097 122430 : break;
5098 282 : case T_BitmapHeapScan:
5099 : {
5100 : /* Accept only a simple bitmap scan, not AND/OR cases */
5101 282 : Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
5102 :
5103 282 : if (IsA(bmqual, IndexPath))
5104 234 : indexclauses = ((IndexPath *) bmqual)->indexclauses;
5105 : else
5106 48 : return false;
5107 234 : break;
5108 : }
5109 76012 : default:
5110 :
5111 : /*
5112 : * If it's not a simple indexscan, it probably doesn't run quickly
5113 : * for zero rows out, even if it's a parameterized path using all
5114 : * the joinquals.
5115 : */
5116 76012 : return false;
5117 : }
5118 :
5119 : /*
5120 : * Examine the inner path's param clauses. Any that are from the outer
5121 : * path must be found in the indexclauses list, either exactly or in an
5122 : * equivalent form generated by equivclass.c. Also, we must find at least
5123 : * one such clause, else it's a clauseless join which isn't fast.
5124 : */
5125 122664 : found_one = false;
5126 244774 : foreach(lc, innerpath->param_info->ppi_clauses)
5127 : {
5128 125012 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
5129 :
5130 125012 : if (join_clause_is_movable_into(rinfo,
5131 125012 : innerpath->parent->relids,
5132 : joinrelids))
5133 : {
5134 124544 : if (!is_redundant_with_indexclauses(rinfo, indexclauses))
5135 2902 : return false;
5136 121642 : found_one = true;
5137 : }
5138 : }
5139 119762 : return found_one;
5140 : }
5141 :
5142 :
5143 : /*
5144 : * approx_tuple_count
5145 : * Quick-and-dirty estimation of the number of join rows passing
5146 : * a set of qual conditions.
5147 : *
5148 : * The quals can be either an implicitly-ANDed list of boolean expressions,
5149 : * or a list of RestrictInfo nodes (typically the latter).
5150 : *
5151 : * We intentionally compute the selectivity under JOIN_INNER rules, even
5152 : * if it's some type of outer join. This is appropriate because we are
5153 : * trying to figure out how many tuples pass the initial merge or hash
5154 : * join step.
5155 : *
5156 : * This is quick-and-dirty because we bypass clauselist_selectivity, and
5157 : * simply multiply the independent clause selectivities together. Now
5158 : * clauselist_selectivity often can't do any better than that anyhow, but
5159 : * for some situations (such as range constraints) it is smarter. However,
5160 : * we can't effectively cache the results of clauselist_selectivity, whereas
5161 : * the individual clause selectivities can be and are cached.
5162 : *
5163 : * Since we are only using the results to estimate how many potential
5164 : * output tuples are generated and passed through qpqual checking, it
5165 : * seems OK to live with the approximation.
5166 : */
5167 : static double
5168 366760 : approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
5169 : {
5170 : double tuples;
5171 366760 : double outer_tuples = path->outerjoinpath->rows;
5172 366760 : double inner_tuples = path->innerjoinpath->rows;
5173 : SpecialJoinInfo sjinfo;
5174 366760 : Selectivity selec = 1.0;
5175 : ListCell *l;
5176 :
5177 : /*
5178 : * Make up a SpecialJoinInfo for JOIN_INNER semantics.
5179 : */
5180 366760 : init_dummy_sjinfo(&sjinfo, path->outerjoinpath->parent->relids,
5181 366760 : path->innerjoinpath->parent->relids);
5182 :
5183 : /* Get the approximate selectivity */
5184 781660 : foreach(l, quals)
5185 : {
5186 414900 : Node *qual = (Node *) lfirst(l);
5187 :
5188 : /* Note that clause_selectivity will be able to cache its result */
5189 414900 : selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
5190 : }
5191 :
5192 : /* Apply it to the input relation sizes */
5193 366760 : tuples = selec * outer_tuples * inner_tuples;
5194 :
5195 366760 : return clamp_row_est(tuples);
5196 : }
5197 :
5198 :
5199 : /*
5200 : * set_baserel_size_estimates
5201 : * Set the size estimates for the given base relation.
5202 : *
5203 : * The rel's targetlist and restrictinfo list must have been constructed
5204 : * already, and rel->tuples must be set.
5205 : *
5206 : * We set the following fields of the rel node:
5207 : * rows: the estimated number of output tuples (after applying
5208 : * restriction clauses).
5209 : * width: the estimated average output tuple width in bytes.
5210 : * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
5211 : */
5212 : void
5213 417616 : set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5214 : {
5215 : double nrows;
5216 :
5217 : /* Should only be applied to base relations */
5218 : Assert(rel->relid > 0);
5219 :
5220 835208 : nrows = rel->tuples *
5221 417616 : clauselist_selectivity(root,
5222 : rel->baserestrictinfo,
5223 : 0,
5224 : JOIN_INNER,
5225 : NULL);
5226 :
5227 417592 : rel->rows = clamp_row_est(nrows);
5228 :
5229 417592 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
5230 :
5231 417592 : set_rel_width(root, rel);
5232 417592 : }
5233 :
5234 : /*
5235 : * get_parameterized_baserel_size
5236 : * Make a size estimate for a parameterized scan of a base relation.
5237 : *
5238 : * 'param_clauses' lists the additional join clauses to be used.
5239 : *
5240 : * set_baserel_size_estimates must have been applied already.
5241 : */
5242 : double
5243 119848 : get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel,
5244 : List *param_clauses)
5245 : {
5246 : List *allclauses;
5247 : double nrows;
5248 :
5249 : /*
5250 : * Estimate the number of rows returned by the parameterized scan, knowing
5251 : * that it will apply all the extra join clauses as well as the rel's own
5252 : * restriction clauses. Note that we force the clauses to be treated as
5253 : * non-join clauses during selectivity estimation.
5254 : */
5255 119848 : allclauses = list_concat_copy(param_clauses, rel->baserestrictinfo);
5256 239696 : nrows = rel->tuples *
5257 119848 : clauselist_selectivity(root,
5258 : allclauses,
5259 119848 : rel->relid, /* do not use 0! */
5260 : JOIN_INNER,
5261 : NULL);
5262 119848 : nrows = clamp_row_est(nrows);
5263 : /* For safety, make sure result is not more than the base estimate */
5264 119848 : if (nrows > rel->rows)
5265 0 : nrows = rel->rows;
5266 119848 : return nrows;
5267 : }
5268 :
5269 : /*
5270 : * set_joinrel_size_estimates
5271 : * Set the size estimates for the given join relation.
5272 : *
5273 : * The rel's targetlist must have been constructed already, and a
5274 : * restriction clause list that matches the given component rels must
5275 : * be provided.
5276 : *
5277 : * Since there is more than one way to make a joinrel for more than two
5278 : * base relations, the results we get here could depend on which component
5279 : * rel pair is provided. In theory we should get the same answers no matter
5280 : * which pair is provided; in practice, since the selectivity estimation
5281 : * routines don't handle all cases equally well, we might not. But there's
5282 : * not much to be done about it. (Would it make sense to repeat the
5283 : * calculations for each pair of input rels that's encountered, and somehow
5284 : * average the results? Probably way more trouble than it's worth, and
5285 : * anyway we must keep the rowcount estimate the same for all paths for the
5286 : * joinrel.)
5287 : *
5288 : * We set only the rows field here. The reltarget field was already set by
5289 : * build_joinrel_tlist, and baserestrictcost is not used for join rels.
5290 : */
5291 : void
5292 171496 : set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
5293 : RelOptInfo *outer_rel,
5294 : RelOptInfo *inner_rel,
5295 : SpecialJoinInfo *sjinfo,
5296 : List *restrictlist)
5297 : {
5298 171496 : rel->rows = calc_joinrel_size_estimate(root,
5299 : rel,
5300 : outer_rel,
5301 : inner_rel,
5302 : outer_rel->rows,
5303 : inner_rel->rows,
5304 : sjinfo,
5305 : restrictlist);
5306 171496 : }
5307 :
5308 : /*
5309 : * get_parameterized_joinrel_size
5310 : * Make a size estimate for a parameterized scan of a join relation.
5311 : *
5312 : * 'rel' is the joinrel under consideration.
5313 : * 'outer_path', 'inner_path' are (probably also parameterized) Paths that
5314 : * produce the relations being joined.
5315 : * 'sjinfo' is any SpecialJoinInfo relevant to this join.
5316 : * 'restrict_clauses' lists the join clauses that need to be applied at the
5317 : * join node (including any movable clauses that were moved down to this join,
5318 : * and not including any movable clauses that were pushed down into the
5319 : * child paths).
5320 : *
5321 : * set_joinrel_size_estimates must have been applied already.
5322 : */
5323 : double
5324 6874 : get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel,
5325 : Path *outer_path,
5326 : Path *inner_path,
5327 : SpecialJoinInfo *sjinfo,
5328 : List *restrict_clauses)
5329 : {
5330 : double nrows;
5331 :
5332 : /*
5333 : * Estimate the number of rows returned by the parameterized join as the
5334 : * sizes of the input paths times the selectivity of the clauses that have
5335 : * ended up at this join node.
5336 : *
5337 : * As with set_joinrel_size_estimates, the rowcount estimate could depend
5338 : * on the pair of input paths provided, though ideally we'd get the same
5339 : * estimate for any pair with the same parameterization.
5340 : */
5341 6874 : nrows = calc_joinrel_size_estimate(root,
5342 : rel,
5343 : outer_path->parent,
5344 : inner_path->parent,
5345 : outer_path->rows,
5346 : inner_path->rows,
5347 : sjinfo,
5348 : restrict_clauses);
5349 : /* For safety, make sure result is not more than the base estimate */
5350 6874 : if (nrows > rel->rows)
5351 12 : nrows = rel->rows;
5352 6874 : return nrows;
5353 : }
5354 :
5355 : /*
5356 : * calc_joinrel_size_estimate
5357 : * Workhorse for set_joinrel_size_estimates and
5358 : * get_parameterized_joinrel_size.
5359 : *
5360 : * outer_rel/inner_rel are the relations being joined, but they should be
5361 : * assumed to have sizes outer_rows/inner_rows; those numbers might be less
5362 : * than what rel->rows says, when we are considering parameterized paths.
5363 : */
5364 : static double
5365 178370 : calc_joinrel_size_estimate(PlannerInfo *root,
5366 : RelOptInfo *joinrel,
5367 : RelOptInfo *outer_rel,
5368 : RelOptInfo *inner_rel,
5369 : double outer_rows,
5370 : double inner_rows,
5371 : SpecialJoinInfo *sjinfo,
5372 : List *restrictlist)
5373 : {
5374 178370 : JoinType jointype = sjinfo->jointype;
5375 : Selectivity fkselec;
5376 : Selectivity jselec;
5377 : Selectivity pselec;
5378 : double nrows;
5379 :
5380 : /*
5381 : * Compute joinclause selectivity. Note that we are only considering
5382 : * clauses that become restriction clauses at this join level; we are not
5383 : * double-counting them because they were not considered in estimating the
5384 : * sizes of the component rels.
5385 : *
5386 : * First, see whether any of the joinclauses can be matched to known FK
5387 : * constraints. If so, drop those clauses from the restrictlist, and
5388 : * instead estimate their selectivity using FK semantics. (We do this
5389 : * without regard to whether said clauses are local or "pushed down".
5390 : * Probably, an FK-matching clause could never be seen as pushed down at
5391 : * an outer join, since it would be strict and hence would be grounds for
5392 : * join strength reduction.) fkselec gets the net selectivity for
5393 : * FK-matching clauses, or 1.0 if there are none.
5394 : */
5395 178370 : fkselec = get_foreign_key_join_selectivity(root,
5396 : outer_rel->relids,
5397 : inner_rel->relids,
5398 : sjinfo,
5399 : &restrictlist);
5400 :
5401 : /*
5402 : * For an outer join, we have to distinguish the selectivity of the join's
5403 : * own clauses (JOIN/ON conditions) from any clauses that were "pushed
5404 : * down". For inner joins we just count them all as joinclauses.
5405 : */
5406 178370 : if (IS_OUTER_JOIN(jointype))
5407 : {
5408 66186 : List *joinquals = NIL;
5409 66186 : List *pushedquals = NIL;
5410 : ListCell *l;
5411 :
5412 : /* Grovel through the clauses to separate into two lists */
5413 146340 : foreach(l, restrictlist)
5414 : {
5415 80154 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
5416 :
5417 80154 : if (RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5418 3750 : pushedquals = lappend(pushedquals, rinfo);
5419 : else
5420 76404 : joinquals = lappend(joinquals, rinfo);
5421 : }
5422 :
5423 : /* Get the separate selectivities */
5424 66186 : jselec = clauselist_selectivity(root,
5425 : joinquals,
5426 : 0,
5427 : jointype,
5428 : sjinfo);
5429 66186 : pselec = clauselist_selectivity(root,
5430 : pushedquals,
5431 : 0,
5432 : jointype,
5433 : sjinfo);
5434 :
5435 : /* Avoid leaking a lot of ListCells */
5436 66186 : list_free(joinquals);
5437 66186 : list_free(pushedquals);
5438 : }
5439 : else
5440 : {
5441 112184 : jselec = clauselist_selectivity(root,
5442 : restrictlist,
5443 : 0,
5444 : jointype,
5445 : sjinfo);
5446 112184 : pselec = 0.0; /* not used, keep compiler quiet */
5447 : }
5448 :
5449 : /*
5450 : * Basically, we multiply size of Cartesian product by selectivity.
5451 : *
5452 : * If we are doing an outer join, take that into account: the joinqual
5453 : * selectivity has to be clamped using the knowledge that the output must
5454 : * be at least as large as the non-nullable input. However, any
5455 : * pushed-down quals are applied after the outer join, so their
5456 : * selectivity applies fully.
5457 : *
5458 : * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
5459 : * of LHS rows that have matches, and we apply that straightforwardly.
5460 : */
5461 178370 : switch (jointype)
5462 : {
5463 107632 : case JOIN_INNER:
5464 107632 : nrows = outer_rows * inner_rows * fkselec * jselec;
5465 : /* pselec not used */
5466 107632 : break;
5467 60098 : case JOIN_LEFT:
5468 60098 : nrows = outer_rows * inner_rows * fkselec * jselec;
5469 60098 : if (nrows < outer_rows)
5470 19204 : nrows = outer_rows;
5471 60098 : nrows *= pselec;
5472 60098 : break;
5473 1678 : case JOIN_FULL:
5474 1678 : nrows = outer_rows * inner_rows * fkselec * jselec;
5475 1678 : if (nrows < outer_rows)
5476 1080 : nrows = outer_rows;
5477 1678 : if (nrows < inner_rows)
5478 120 : nrows = inner_rows;
5479 1678 : nrows *= pselec;
5480 1678 : break;
5481 4552 : case JOIN_SEMI:
5482 4552 : nrows = outer_rows * fkselec * jselec;
5483 : /* pselec not used */
5484 4552 : break;
5485 4410 : case JOIN_ANTI:
5486 4410 : nrows = outer_rows * (1.0 - fkselec * jselec);
5487 4410 : nrows *= pselec;
5488 4410 : break;
5489 0 : default:
5490 : /* other values not expected here */
5491 0 : elog(ERROR, "unrecognized join type: %d", (int) jointype);
5492 : nrows = 0; /* keep compiler quiet */
5493 : break;
5494 : }
5495 :
5496 178370 : return clamp_row_est(nrows);
5497 : }
5498 :
5499 : /*
5500 : * get_foreign_key_join_selectivity
5501 : * Estimate join selectivity for foreign-key-related clauses.
5502 : *
5503 : * Remove any clauses that can be matched to FK constraints from *restrictlist,
5504 : * and return a substitute estimate of their selectivity. 1.0 is returned
5505 : * when there are no such clauses.
5506 : *
5507 : * The reason for treating such clauses specially is that we can get better
5508 : * estimates this way than by relying on clauselist_selectivity(), especially
5509 : * for multi-column FKs where that function's assumption that the clauses are
5510 : * independent falls down badly. But even with single-column FKs, we may be
5511 : * able to get a better answer when the pg_statistic stats are missing or out
5512 : * of date.
5513 : */
5514 : static Selectivity
5515 178370 : get_foreign_key_join_selectivity(PlannerInfo *root,
5516 : Relids outer_relids,
5517 : Relids inner_relids,
5518 : SpecialJoinInfo *sjinfo,
5519 : List **restrictlist)
5520 : {
5521 178370 : Selectivity fkselec = 1.0;
5522 178370 : JoinType jointype = sjinfo->jointype;
5523 178370 : List *worklist = *restrictlist;
5524 : ListCell *lc;
5525 :
5526 : /* Consider each FK constraint that is known to match the query */
5527 180270 : foreach(lc, root->fkey_list)
5528 : {
5529 1900 : ForeignKeyOptInfo *fkinfo = (ForeignKeyOptInfo *) lfirst(lc);
5530 : bool ref_is_outer;
5531 : List *removedlist;
5532 : ListCell *cell;
5533 :
5534 : /*
5535 : * This FK is not relevant unless it connects a baserel on one side of
5536 : * this join to a baserel on the other side.
5537 : */
5538 3454 : if (bms_is_member(fkinfo->con_relid, outer_relids) &&
5539 1554 : bms_is_member(fkinfo->ref_relid, inner_relids))
5540 1362 : ref_is_outer = false;
5541 884 : else if (bms_is_member(fkinfo->ref_relid, outer_relids) &&
5542 346 : bms_is_member(fkinfo->con_relid, inner_relids))
5543 124 : ref_is_outer = true;
5544 : else
5545 414 : continue;
5546 :
5547 : /*
5548 : * If we're dealing with a semi/anti join, and the FK's referenced
5549 : * relation is on the outside, then knowledge of the FK doesn't help
5550 : * us figure out what we need to know (which is the fraction of outer
5551 : * rows that have matches). On the other hand, if the referenced rel
5552 : * is on the inside, then all outer rows must have matches in the
5553 : * referenced table (ignoring nulls). But any restriction or join
5554 : * clauses that filter that table will reduce the fraction of matches.
5555 : * We can account for restriction clauses, but it's too hard to guess
5556 : * how many table rows would get through a join that's inside the RHS.
5557 : * Hence, if either case applies, punt and ignore the FK.
5558 : */
5559 1486 : if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
5560 970 : (ref_is_outer || bms_membership(inner_relids) != BMS_SINGLETON))
5561 6 : continue;
5562 :
5563 : /*
5564 : * Modify the restrictlist by removing clauses that match the FK (and
5565 : * putting them into removedlist instead). It seems unsafe to modify
5566 : * the originally-passed List structure, so we make a shallow copy the
5567 : * first time through.
5568 : */
5569 1480 : if (worklist == *restrictlist)
5570 1256 : worklist = list_copy(worklist);
5571 :
5572 1480 : removedlist = NIL;
5573 3036 : foreach(cell, worklist)
5574 : {
5575 1556 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
5576 1556 : bool remove_it = false;
5577 : int i;
5578 :
5579 : /* Drop this clause if it matches any column of the FK */
5580 1942 : for (i = 0; i < fkinfo->nkeys; i++)
5581 : {
5582 1912 : if (rinfo->parent_ec)
5583 : {
5584 : /*
5585 : * EC-derived clauses can only match by EC. It is okay to
5586 : * consider any clause derived from the same EC as
5587 : * matching the FK: even if equivclass.c chose to generate
5588 : * a clause equating some other pair of Vars, it could
5589 : * have generated one equating the FK's Vars. So for
5590 : * purposes of estimation, we can act as though it did so.
5591 : *
5592 : * Note: checking parent_ec is a bit of a cheat because
5593 : * there are EC-derived clauses that don't have parent_ec
5594 : * set; but such clauses must compare expressions that
5595 : * aren't just Vars, so they cannot match the FK anyway.
5596 : */
5597 304 : if (fkinfo->eclass[i] == rinfo->parent_ec)
5598 : {
5599 298 : remove_it = true;
5600 298 : break;
5601 : }
5602 : }
5603 : else
5604 : {
5605 : /*
5606 : * Otherwise, see if rinfo was previously matched to FK as
5607 : * a "loose" clause.
5608 : */
5609 1608 : if (list_member_ptr(fkinfo->rinfos[i], rinfo))
5610 : {
5611 1228 : remove_it = true;
5612 1228 : break;
5613 : }
5614 : }
5615 : }
5616 1556 : if (remove_it)
5617 : {
5618 1526 : worklist = foreach_delete_current(worklist, cell);
5619 1526 : removedlist = lappend(removedlist, rinfo);
5620 : }
5621 : }
5622 :
5623 : /*
5624 : * If we failed to remove all the matching clauses we expected to
5625 : * find, chicken out and ignore this FK; applying its selectivity
5626 : * might result in double-counting. Put any clauses we did manage to
5627 : * remove back into the worklist.
5628 : *
5629 : * Since the matching clauses are known not outerjoin-delayed, they
5630 : * would normally have appeared in the initial joinclause list. If we
5631 : * didn't find them, there are two possibilities:
5632 : *
5633 : * 1. If the FK match is based on an EC that is ec_has_const, it won't
5634 : * have generated any join clauses at all. We discount such ECs while
5635 : * checking to see if we have "all" the clauses. (Below, we'll adjust
5636 : * the selectivity estimate for this case.)
5637 : *
5638 : * 2. The clauses were matched to some other FK in a previous
5639 : * iteration of this loop, and thus removed from worklist. (A likely
5640 : * case is that two FKs are matched to the same EC; there will be only
5641 : * one EC-derived clause in the initial list, so the first FK will
5642 : * consume it.) Applying both FKs' selectivity independently risks
5643 : * underestimating the join size; in particular, this would undo one
5644 : * of the main things that ECs were invented for, namely to avoid
5645 : * double-counting the selectivity of redundant equality conditions.
5646 : * Later we might think of a reasonable way to combine the estimates,
5647 : * but for now, just punt, since this is a fairly uncommon situation.
5648 : */
5649 1480 : if (removedlist == NIL ||
5650 1194 : list_length(removedlist) !=
5651 1194 : (fkinfo->nmatched_ec - fkinfo->nconst_ec + fkinfo->nmatched_ri))
5652 : {
5653 286 : worklist = list_concat(worklist, removedlist);
5654 286 : continue;
5655 : }
5656 :
5657 : /*
5658 : * Finally we get to the payoff: estimate selectivity using the
5659 : * knowledge that each referencing row will match exactly one row in
5660 : * the referenced table.
5661 : *
5662 : * XXX that's not true in the presence of nulls in the referencing
5663 : * column(s), so in principle we should derate the estimate for those.
5664 : * However (1) if there are any strict restriction clauses for the
5665 : * referencing column(s) elsewhere in the query, derating here would
5666 : * be double-counting the null fraction, and (2) it's not very clear
5667 : * how to combine null fractions for multiple referencing columns. So
5668 : * we do nothing for now about correcting for nulls.
5669 : *
5670 : * XXX another point here is that if either side of an FK constraint
5671 : * is an inheritance parent, we estimate as though the constraint
5672 : * covers all its children as well. This is not an unreasonable
5673 : * assumption for a referencing table, ie the user probably applied
5674 : * identical constraints to all child tables (though perhaps we ought
5675 : * to check that). But it's not possible to have done that for a
5676 : * referenced table. Fortunately, precisely because that doesn't
5677 : * work, it is uncommon in practice to have an FK referencing a parent
5678 : * table. So, at least for now, disregard inheritance here.
5679 : */
5680 1194 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
5681 746 : {
5682 : /*
5683 : * For JOIN_SEMI and JOIN_ANTI, we only get here when the FK's
5684 : * referenced table is exactly the inside of the join. The join
5685 : * selectivity is defined as the fraction of LHS rows that have
5686 : * matches. The FK implies that every LHS row has a match *in the
5687 : * referenced table*; but any restriction clauses on it will
5688 : * reduce the number of matches. Hence we take the join
5689 : * selectivity as equal to the selectivity of the table's
5690 : * restriction clauses, which is rows / tuples; but we must guard
5691 : * against tuples == 0.
5692 : */
5693 746 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5694 746 : double ref_tuples = Max(ref_rel->tuples, 1.0);
5695 :
5696 746 : fkselec *= ref_rel->rows / ref_tuples;
5697 : }
5698 : else
5699 : {
5700 : /*
5701 : * Otherwise, selectivity is exactly 1/referenced-table-size; but
5702 : * guard against tuples == 0. Note we should use the raw table
5703 : * tuple count, not any estimate of its filtered or joined size.
5704 : */
5705 448 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5706 448 : double ref_tuples = Max(ref_rel->tuples, 1.0);
5707 :
5708 448 : fkselec *= 1.0 / ref_tuples;
5709 : }
5710 :
5711 : /*
5712 : * If any of the FK columns participated in ec_has_const ECs, then
5713 : * equivclass.c will have generated "var = const" restrictions for
5714 : * each side of the join, thus reducing the sizes of both input
5715 : * relations. Taking the fkselec at face value would amount to
5716 : * double-counting the selectivity of the constant restriction for the
5717 : * referencing Var. Hence, look for the restriction clause(s) that
5718 : * were applied to the referencing Var(s), and divide out their
5719 : * selectivity to correct for this.
5720 : */
5721 1194 : if (fkinfo->nconst_ec > 0)
5722 : {
5723 24 : for (int i = 0; i < fkinfo->nkeys; i++)
5724 : {
5725 18 : EquivalenceClass *ec = fkinfo->eclass[i];
5726 :
5727 18 : if (ec && ec->ec_has_const)
5728 : {
5729 6 : EquivalenceMember *em = fkinfo->fk_eclass_member[i];
5730 6 : RestrictInfo *rinfo = find_derived_clause_for_ec_member(ec,
5731 : em);
5732 :
5733 6 : if (rinfo)
5734 : {
5735 : Selectivity s0;
5736 :
5737 6 : s0 = clause_selectivity(root,
5738 : (Node *) rinfo,
5739 : 0,
5740 : jointype,
5741 : sjinfo);
5742 6 : if (s0 > 0)
5743 6 : fkselec /= s0;
5744 : }
5745 : }
5746 : }
5747 : }
5748 : }
5749 :
5750 178370 : *restrictlist = worklist;
5751 178370 : CLAMP_PROBABILITY(fkselec);
5752 178370 : return fkselec;
5753 : }
5754 :
5755 : /*
5756 : * set_subquery_size_estimates
5757 : * Set the size estimates for a base relation that is a subquery.
5758 : *
5759 : * The rel's targetlist and restrictinfo list must have been constructed
5760 : * already, and the Paths for the subquery must have been completed.
5761 : * We look at the subquery's PlannerInfo to extract data.
5762 : *
5763 : * We set the same fields as set_baserel_size_estimates.
5764 : */
5765 : void
5766 19796 : set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5767 : {
5768 19796 : PlannerInfo *subroot = rel->subroot;
5769 : RelOptInfo *sub_final_rel;
5770 : ListCell *lc;
5771 :
5772 : /* Should only be applied to base relations that are subqueries */
5773 : Assert(rel->relid > 0);
5774 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_SUBQUERY);
5775 :
5776 : /*
5777 : * Copy raw number of output rows from subquery. All of its paths should
5778 : * have the same output rowcount, so just look at cheapest-total.
5779 : */
5780 19796 : sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
5781 19796 : rel->tuples = sub_final_rel->cheapest_total_path->rows;
5782 :
5783 : /*
5784 : * Compute per-output-column width estimates by examining the subquery's
5785 : * targetlist. For any output that is a plain Var, get the width estimate
5786 : * that was made while planning the subquery. Otherwise, we leave it to
5787 : * set_rel_width to fill in a datatype-based default estimate.
5788 : */
5789 79940 : foreach(lc, subroot->parse->targetList)
5790 : {
5791 60144 : TargetEntry *te = lfirst_node(TargetEntry, lc);
5792 60144 : Node *texpr = (Node *) te->expr;
5793 60144 : int32 item_width = 0;
5794 :
5795 : /* junk columns aren't visible to upper query */
5796 60144 : if (te->resjunk)
5797 620 : continue;
5798 :
5799 : /*
5800 : * The subquery could be an expansion of a view that's had columns
5801 : * added to it since the current query was parsed, so that there are
5802 : * non-junk tlist columns in it that don't correspond to any column
5803 : * visible at our query level. Ignore such columns.
5804 : */
5805 59524 : if (te->resno < rel->min_attr || te->resno > rel->max_attr)
5806 0 : continue;
5807 :
5808 : /*
5809 : * XXX This currently doesn't work for subqueries containing set
5810 : * operations, because the Vars in their tlists are bogus references
5811 : * to the first leaf subquery, which wouldn't give the right answer
5812 : * even if we could still get to its PlannerInfo.
5813 : *
5814 : * Also, the subquery could be an appendrel for which all branches are
5815 : * known empty due to constraint exclusion, in which case
5816 : * set_append_rel_pathlist will have left the attr_widths set to zero.
5817 : *
5818 : * In either case, we just leave the width estimate zero until
5819 : * set_rel_width fixes it.
5820 : */
5821 59524 : if (IsA(texpr, Var) &&
5822 27150 : subroot->parse->setOperations == NULL)
5823 : {
5824 25676 : Var *var = (Var *) texpr;
5825 25676 : RelOptInfo *subrel = find_base_rel(subroot, var->varno);
5826 :
5827 25676 : item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
5828 : }
5829 59524 : rel->attr_widths[te->resno - rel->min_attr] = item_width;
5830 : }
5831 :
5832 : /* Now estimate number of output rows, etc */
5833 19796 : set_baserel_size_estimates(root, rel);
5834 19796 : }
5835 :
5836 : /*
5837 : * set_function_size_estimates
5838 : * Set the size estimates for a base relation that is a function call.
5839 : *
5840 : * The rel's targetlist and restrictinfo list must have been constructed
5841 : * already.
5842 : *
5843 : * We set the same fields as set_baserel_size_estimates.
5844 : */
5845 : void
5846 39568 : set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5847 : {
5848 : RangeTblEntry *rte;
5849 : ListCell *lc;
5850 :
5851 : /* Should only be applied to base relations that are functions */
5852 : Assert(rel->relid > 0);
5853 39568 : rte = planner_rt_fetch(rel->relid, root);
5854 : Assert(rte->rtekind == RTE_FUNCTION);
5855 :
5856 : /*
5857 : * Estimate number of rows the functions will return. The rowcount of the
5858 : * node is that of the largest function result.
5859 : */
5860 39568 : rel->tuples = 0;
5861 79448 : foreach(lc, rte->functions)
5862 : {
5863 39880 : RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
5864 39880 : double ntup = expression_returns_set_rows(root, rtfunc->funcexpr);
5865 :
5866 39880 : if (ntup > rel->tuples)
5867 39592 : rel->tuples = ntup;
5868 : }
5869 :
5870 : /* Now estimate number of output rows, etc */
5871 39568 : set_baserel_size_estimates(root, rel);
5872 39568 : }
5873 :
5874 : /*
5875 : * set_function_size_estimates
5876 : * Set the size estimates for a base relation that is a function call.
5877 : *
5878 : * The rel's targetlist and restrictinfo list must have been constructed
5879 : * already.
5880 : *
5881 : * We set the same fields as set_tablefunc_size_estimates.
5882 : */
5883 : void
5884 548 : set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5885 : {
5886 : /* Should only be applied to base relations that are functions */
5887 : Assert(rel->relid > 0);
5888 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_TABLEFUNC);
5889 :
5890 548 : rel->tuples = 100;
5891 :
5892 : /* Now estimate number of output rows, etc */
5893 548 : set_baserel_size_estimates(root, rel);
5894 548 : }
5895 :
5896 : /*
5897 : * set_values_size_estimates
5898 : * Set the size estimates for a base relation that is a values list.
5899 : *
5900 : * The rel's targetlist and restrictinfo list must have been constructed
5901 : * already.
5902 : *
5903 : * We set the same fields as set_baserel_size_estimates.
5904 : */
5905 : void
5906 7660 : set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5907 : {
5908 : RangeTblEntry *rte;
5909 :
5910 : /* Should only be applied to base relations that are values lists */
5911 : Assert(rel->relid > 0);
5912 7660 : rte = planner_rt_fetch(rel->relid, root);
5913 : Assert(rte->rtekind == RTE_VALUES);
5914 :
5915 : /*
5916 : * Estimate number of rows the values list will return. We know this
5917 : * precisely based on the list length (well, barring set-returning
5918 : * functions in list items, but that's a refinement not catered for
5919 : * anywhere else either).
5920 : */
5921 7660 : rel->tuples = list_length(rte->values_lists);
5922 :
5923 : /* Now estimate number of output rows, etc */
5924 7660 : set_baserel_size_estimates(root, rel);
5925 7660 : }
5926 :
5927 : /*
5928 : * set_cte_size_estimates
5929 : * Set the size estimates for a base relation that is a CTE reference.
5930 : *
5931 : * The rel's targetlist and restrictinfo list must have been constructed
5932 : * already, and we need an estimate of the number of rows returned by the CTE
5933 : * (if a regular CTE) or the non-recursive term (if a self-reference).
5934 : *
5935 : * We set the same fields as set_baserel_size_estimates.
5936 : */
5937 : void
5938 4046 : set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
5939 : {
5940 : RangeTblEntry *rte;
5941 :
5942 : /* Should only be applied to base relations that are CTE references */
5943 : Assert(rel->relid > 0);
5944 4046 : rte = planner_rt_fetch(rel->relid, root);
5945 : Assert(rte->rtekind == RTE_CTE);
5946 :
5947 4046 : if (rte->self_reference)
5948 : {
5949 : /*
5950 : * In a self-reference, we assume the average worktable size is a
5951 : * multiple of the nonrecursive term's size. The best multiplier will
5952 : * vary depending on query "fan-out", so make its value adjustable.
5953 : */
5954 810 : rel->tuples = clamp_row_est(recursive_worktable_factor * cte_rows);
5955 : }
5956 : else
5957 : {
5958 : /* Otherwise just believe the CTE's rowcount estimate */
5959 3236 : rel->tuples = cte_rows;
5960 : }
5961 :
5962 : /* Now estimate number of output rows, etc */
5963 4046 : set_baserel_size_estimates(root, rel);
5964 4046 : }
5965 :
5966 : /*
5967 : * set_namedtuplestore_size_estimates
5968 : * Set the size estimates for a base relation that is a tuplestore reference.
5969 : *
5970 : * The rel's targetlist and restrictinfo list must have been constructed
5971 : * already.
5972 : *
5973 : * We set the same fields as set_baserel_size_estimates.
5974 : */
5975 : void
5976 442 : set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5977 : {
5978 : RangeTblEntry *rte;
5979 :
5980 : /* Should only be applied to base relations that are tuplestore references */
5981 : Assert(rel->relid > 0);
5982 442 : rte = planner_rt_fetch(rel->relid, root);
5983 : Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
5984 :
5985 : /*
5986 : * Use the estimate provided by the code which is generating the named
5987 : * tuplestore. In some cases, the actual number might be available; in
5988 : * others the same plan will be re-used, so a "typical" value might be
5989 : * estimated and used.
5990 : */
5991 442 : rel->tuples = rte->enrtuples;
5992 442 : if (rel->tuples < 0)
5993 0 : rel->tuples = 1000;
5994 :
5995 : /* Now estimate number of output rows, etc */
5996 442 : set_baserel_size_estimates(root, rel);
5997 442 : }
5998 :
5999 : /*
6000 : * set_result_size_estimates
6001 : * Set the size estimates for an RTE_RESULT base relation
6002 : *
6003 : * The rel's targetlist and restrictinfo list must have been constructed
6004 : * already.
6005 : *
6006 : * We set the same fields as set_baserel_size_estimates.
6007 : */
6008 : void
6009 1506 : set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6010 : {
6011 : /* Should only be applied to RTE_RESULT base relations */
6012 : Assert(rel->relid > 0);
6013 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_RESULT);
6014 :
6015 : /* RTE_RESULT always generates a single row, natively */
6016 1506 : rel->tuples = 1;
6017 :
6018 : /* Now estimate number of output rows, etc */
6019 1506 : set_baserel_size_estimates(root, rel);
6020 1506 : }
6021 :
6022 : /*
6023 : * set_foreign_size_estimates
6024 : * Set the size estimates for a base relation that is a foreign table.
6025 : *
6026 : * There is not a whole lot that we can do here; the foreign-data wrapper
6027 : * is responsible for producing useful estimates. We can do a decent job
6028 : * of estimating baserestrictcost, so we set that, and we also set up width
6029 : * using what will be purely datatype-driven estimates from the targetlist.
6030 : * There is no way to do anything sane with the rows value, so we just put
6031 : * a default estimate and hope that the wrapper can improve on it. The
6032 : * wrapper's GetForeignRelSize function will be called momentarily.
6033 : *
6034 : * The rel's targetlist and restrictinfo list must have been constructed
6035 : * already.
6036 : */
6037 : void
6038 2358 : set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6039 : {
6040 : /* Should only be applied to base relations */
6041 : Assert(rel->relid > 0);
6042 :
6043 2358 : rel->rows = 1000; /* entirely bogus default estimate */
6044 :
6045 2358 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
6046 :
6047 2358 : set_rel_width(root, rel);
6048 2358 : }
6049 :
6050 :
6051 : /*
6052 : * set_rel_width
6053 : * Set the estimated output width of a base relation.
6054 : *
6055 : * The estimated output width is the sum of the per-attribute width estimates
6056 : * for the actually-referenced columns, plus any PHVs or other expressions
6057 : * that have to be calculated at this relation. This is the amount of data
6058 : * we'd need to pass upwards in case of a sort, hash, etc.
6059 : *
6060 : * This function also sets reltarget->cost, so it's a bit misnamed now.
6061 : *
6062 : * NB: this works best on plain relations because it prefers to look at
6063 : * real Vars. For subqueries, set_subquery_size_estimates will already have
6064 : * copied up whatever per-column estimates were made within the subquery,
6065 : * and for other types of rels there isn't much we can do anyway. We fall
6066 : * back on (fairly stupid) datatype-based width estimates if we can't get
6067 : * any better number.
6068 : *
6069 : * The per-attribute width estimates are cached for possible re-use while
6070 : * building join relations or post-scan/join pathtargets.
6071 : */
6072 : static void
6073 419950 : set_rel_width(PlannerInfo *root, RelOptInfo *rel)
6074 : {
6075 419950 : Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
6076 419950 : int64 tuple_width = 0;
6077 419950 : bool have_wholerow_var = false;
6078 : ListCell *lc;
6079 :
6080 : /* Vars are assumed to have cost zero, but other exprs do not */
6081 419950 : rel->reltarget->cost.startup = 0;
6082 419950 : rel->reltarget->cost.per_tuple = 0;
6083 :
6084 1475028 : foreach(lc, rel->reltarget->exprs)
6085 : {
6086 1055078 : Node *node = (Node *) lfirst(lc);
6087 :
6088 : /*
6089 : * Ordinarily, a Var in a rel's targetlist must belong to that rel;
6090 : * but there are corner cases involving LATERAL references where that
6091 : * isn't so. If the Var has the wrong varno, fall through to the
6092 : * generic case (it doesn't seem worth the trouble to be any smarter).
6093 : */
6094 1055078 : if (IsA(node, Var) &&
6095 1036858 : ((Var *) node)->varno == rel->relid)
6096 275474 : {
6097 1036792 : Var *var = (Var *) node;
6098 : int ndx;
6099 : int32 item_width;
6100 :
6101 : Assert(var->varattno >= rel->min_attr);
6102 : Assert(var->varattno <= rel->max_attr);
6103 :
6104 1036792 : ndx = var->varattno - rel->min_attr;
6105 :
6106 : /*
6107 : * If it's a whole-row Var, we'll deal with it below after we have
6108 : * already cached as many attr widths as possible.
6109 : */
6110 1036792 : if (var->varattno == 0)
6111 : {
6112 2758 : have_wholerow_var = true;
6113 2758 : continue;
6114 : }
6115 :
6116 : /*
6117 : * The width may have been cached already (especially if it's a
6118 : * subquery), so don't duplicate effort.
6119 : */
6120 1034034 : if (rel->attr_widths[ndx] > 0)
6121 : {
6122 212962 : tuple_width += rel->attr_widths[ndx];
6123 212962 : continue;
6124 : }
6125 :
6126 : /* Try to get column width from statistics */
6127 821072 : if (reloid != InvalidOid && var->varattno > 0)
6128 : {
6129 639166 : item_width = get_attavgwidth(reloid, var->varattno);
6130 639166 : if (item_width > 0)
6131 : {
6132 545598 : rel->attr_widths[ndx] = item_width;
6133 545598 : tuple_width += item_width;
6134 545598 : continue;
6135 : }
6136 : }
6137 :
6138 : /*
6139 : * Not a plain relation, or can't find statistics for it. Estimate
6140 : * using just the type info.
6141 : */
6142 275474 : item_width = get_typavgwidth(var->vartype, var->vartypmod);
6143 : Assert(item_width > 0);
6144 275474 : rel->attr_widths[ndx] = item_width;
6145 275474 : tuple_width += item_width;
6146 : }
6147 18286 : else if (IsA(node, PlaceHolderVar))
6148 : {
6149 : /*
6150 : * We will need to evaluate the PHV's contained expression while
6151 : * scanning this rel, so be sure to include it in reltarget->cost.
6152 : */
6153 1144 : PlaceHolderVar *phv = (PlaceHolderVar *) node;
6154 1144 : PlaceHolderInfo *phinfo = find_placeholder_info(root, phv);
6155 : QualCost cost;
6156 :
6157 1144 : tuple_width += phinfo->ph_width;
6158 1144 : cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
6159 1144 : rel->reltarget->cost.startup += cost.startup;
6160 1144 : rel->reltarget->cost.per_tuple += cost.per_tuple;
6161 : }
6162 : else
6163 : {
6164 : /*
6165 : * We could be looking at an expression pulled up from a subquery,
6166 : * or a ROW() representing a whole-row child Var, etc. Do what we
6167 : * can using the expression type information.
6168 : */
6169 : int32 item_width;
6170 : QualCost cost;
6171 :
6172 17142 : item_width = get_typavgwidth(exprType(node), exprTypmod(node));
6173 : Assert(item_width > 0);
6174 17142 : tuple_width += item_width;
6175 : /* Not entirely clear if we need to account for cost, but do so */
6176 17142 : cost_qual_eval_node(&cost, node, root);
6177 17142 : rel->reltarget->cost.startup += cost.startup;
6178 17142 : rel->reltarget->cost.per_tuple += cost.per_tuple;
6179 : }
6180 : }
6181 :
6182 : /*
6183 : * If we have a whole-row reference, estimate its width as the sum of
6184 : * per-column widths plus heap tuple header overhead.
6185 : */
6186 419950 : if (have_wholerow_var)
6187 : {
6188 2758 : int64 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
6189 :
6190 2758 : if (reloid != InvalidOid)
6191 : {
6192 : /* Real relation, so estimate true tuple width */
6193 2172 : wholerow_width += get_relation_data_width(reloid,
6194 2172 : rel->attr_widths - rel->min_attr);
6195 : }
6196 : else
6197 : {
6198 : /* Do what we can with info for a phony rel */
6199 : AttrNumber i;
6200 :
6201 1512 : for (i = 1; i <= rel->max_attr; i++)
6202 926 : wholerow_width += rel->attr_widths[i - rel->min_attr];
6203 : }
6204 :
6205 2758 : rel->attr_widths[0 - rel->min_attr] = clamp_width_est(wholerow_width);
6206 :
6207 : /*
6208 : * Include the whole-row Var as part of the output tuple. Yes, that
6209 : * really is what happens at runtime.
6210 : */
6211 2758 : tuple_width += wholerow_width;
6212 : }
6213 :
6214 419950 : rel->reltarget->width = clamp_width_est(tuple_width);
6215 419950 : }
6216 :
6217 : /*
6218 : * set_pathtarget_cost_width
6219 : * Set the estimated eval cost and output width of a PathTarget tlist.
6220 : *
6221 : * As a notational convenience, returns the same PathTarget pointer passed in.
6222 : *
6223 : * Most, though not quite all, uses of this function occur after we've run
6224 : * set_rel_width() for base relations; so we can usually obtain cached width
6225 : * estimates for Vars. If we can't, fall back on datatype-based width
6226 : * estimates. Present early-planning uses of PathTargets don't need accurate
6227 : * widths badly enough to justify going to the catalogs for better data.
6228 : */
6229 : PathTarget *
6230 551348 : set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
6231 : {
6232 551348 : int64 tuple_width = 0;
6233 : ListCell *lc;
6234 :
6235 : /* Vars are assumed to have cost zero, but other exprs do not */
6236 551348 : target->cost.startup = 0;
6237 551348 : target->cost.per_tuple = 0;
6238 :
6239 1823962 : foreach(lc, target->exprs)
6240 : {
6241 1272614 : Node *node = (Node *) lfirst(lc);
6242 :
6243 1272614 : tuple_width += get_expr_width(root, node);
6244 :
6245 : /* For non-Vars, account for evaluation cost */
6246 1272614 : if (!IsA(node, Var))
6247 : {
6248 : QualCost cost;
6249 :
6250 566964 : cost_qual_eval_node(&cost, node, root);
6251 566964 : target->cost.startup += cost.startup;
6252 566964 : target->cost.per_tuple += cost.per_tuple;
6253 : }
6254 : }
6255 :
6256 551348 : target->width = clamp_width_est(tuple_width);
6257 :
6258 551348 : return target;
6259 : }
6260 :
6261 : /*
6262 : * get_expr_width
6263 : * Estimate the width of the given expr attempting to use the width
6264 : * cached in a Var's owning RelOptInfo, else fallback on the type's
6265 : * average width when unable to or when the given Node is not a Var.
6266 : */
6267 : static int32
6268 1515746 : get_expr_width(PlannerInfo *root, const Node *expr)
6269 : {
6270 : int32 width;
6271 :
6272 1515746 : if (IsA(expr, Var))
6273 : {
6274 941420 : const Var *var = (const Var *) expr;
6275 :
6276 : /* We should not see any upper-level Vars here */
6277 : Assert(var->varlevelsup == 0);
6278 :
6279 : /* Try to get data from RelOptInfo cache */
6280 941420 : if (!IS_SPECIAL_VARNO(var->varno) &&
6281 936474 : var->varno < root->simple_rel_array_size)
6282 : {
6283 936474 : RelOptInfo *rel = root->simple_rel_array[var->varno];
6284 :
6285 936474 : if (rel != NULL &&
6286 913528 : var->varattno >= rel->min_attr &&
6287 913528 : var->varattno <= rel->max_attr)
6288 : {
6289 913528 : int ndx = var->varattno - rel->min_attr;
6290 :
6291 913528 : if (rel->attr_widths[ndx] > 0)
6292 888794 : return rel->attr_widths[ndx];
6293 : }
6294 : }
6295 :
6296 : /*
6297 : * No cached data available, so estimate using just the type info.
6298 : */
6299 52626 : width = get_typavgwidth(var->vartype, var->vartypmod);
6300 : Assert(width > 0);
6301 :
6302 52626 : return width;
6303 : }
6304 :
6305 574326 : width = get_typavgwidth(exprType(expr), exprTypmod(expr));
6306 : Assert(width > 0);
6307 574326 : return width;
6308 : }
6309 :
6310 : /*
6311 : * relation_byte_size
6312 : * Estimate the storage space in bytes for a given number of tuples
6313 : * of a given width (size in bytes).
6314 : */
6315 : static double
6316 3031418 : relation_byte_size(double tuples, int width)
6317 : {
6318 3031418 : return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
6319 : }
6320 :
6321 : /*
6322 : * page_size
6323 : * Returns an estimate of the number of pages covered by a given
6324 : * number of tuples of a given width (size in bytes).
6325 : */
6326 : static double
6327 8920 : page_size(double tuples, int width)
6328 : {
6329 8920 : return ceil(relation_byte_size(tuples, width) / BLCKSZ);
6330 : }
6331 :
6332 : /*
6333 : * Estimate the fraction of the work that each worker will do given the
6334 : * number of workers budgeted for the path.
6335 : */
6336 : static double
6337 171508 : get_parallel_divisor(Path *path)
6338 : {
6339 171508 : double parallel_divisor = path->parallel_workers;
6340 :
6341 : /*
6342 : * Early experience with parallel query suggests that when there is only
6343 : * one worker, the leader often makes a very substantial contribution to
6344 : * executing the parallel portion of the plan, but as more workers are
6345 : * added, it does less and less, because it's busy reading tuples from the
6346 : * workers and doing whatever non-parallel post-processing is needed. By
6347 : * the time we reach 4 workers, the leader no longer makes a meaningful
6348 : * contribution. Thus, for now, estimate that the leader spends 30% of
6349 : * its time servicing each worker, and the remainder executing the
6350 : * parallel plan.
6351 : */
6352 171508 : if (parallel_leader_participation)
6353 : {
6354 : double leader_contribution;
6355 :
6356 170230 : leader_contribution = 1.0 - (0.3 * path->parallel_workers);
6357 170230 : if (leader_contribution > 0)
6358 167878 : parallel_divisor += leader_contribution;
6359 : }
6360 :
6361 171508 : return parallel_divisor;
6362 : }
6363 :
6364 : /*
6365 : * compute_bitmap_pages
6366 : * Estimate number of pages fetched from heap in a bitmap heap scan.
6367 : *
6368 : * 'baserel' is the relation to be scanned
6369 : * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
6370 : * 'loop_count' is the number of repetitions of the indexscan to factor into
6371 : * estimates of caching behavior
6372 : *
6373 : * If cost_p isn't NULL, the indexTotalCost estimate is returned in *cost_p.
6374 : * If tuples_p isn't NULL, the tuples_fetched estimate is returned in *tuples_p.
6375 : */
6376 : double
6377 567652 : compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel,
6378 : Path *bitmapqual, double loop_count,
6379 : Cost *cost_p, double *tuples_p)
6380 : {
6381 : Cost indexTotalCost;
6382 : Selectivity indexSelectivity;
6383 : double T;
6384 : double pages_fetched;
6385 : double tuples_fetched;
6386 : double heap_pages;
6387 : long maxentries;
6388 :
6389 : /*
6390 : * Fetch total cost of obtaining the bitmap, as well as its total
6391 : * selectivity.
6392 : */
6393 567652 : cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
6394 :
6395 : /*
6396 : * Estimate number of main-table pages fetched.
6397 : */
6398 567652 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
6399 :
6400 567652 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
6401 :
6402 : /*
6403 : * For a single scan, the number of heap pages that need to be fetched is
6404 : * the same as the Mackert and Lohman formula for the case T <= b (ie, no
6405 : * re-reads needed).
6406 : */
6407 567652 : pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
6408 :
6409 : /*
6410 : * Calculate the number of pages fetched from the heap. Then based on
6411 : * current work_mem estimate get the estimated maxentries in the bitmap.
6412 : * (Note that we always do this calculation based on the number of pages
6413 : * that would be fetched in a single iteration, even if loop_count > 1.
6414 : * That's correct, because only that number of entries will be stored in
6415 : * the bitmap at one time.)
6416 : */
6417 567652 : heap_pages = Min(pages_fetched, baserel->pages);
6418 567652 : maxentries = tbm_calculate_entries(work_mem * 1024L);
6419 :
6420 567652 : if (loop_count > 1)
6421 : {
6422 : /*
6423 : * For repeated bitmap scans, scale up the number of tuples fetched in
6424 : * the Mackert and Lohman formula by the number of scans, so that we
6425 : * estimate the number of pages fetched by all the scans. Then
6426 : * pro-rate for one scan.
6427 : */
6428 109824 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
6429 : baserel->pages,
6430 : get_indexpath_pages(bitmapqual),
6431 : root);
6432 109824 : pages_fetched /= loop_count;
6433 : }
6434 :
6435 567652 : if (pages_fetched >= T)
6436 49960 : pages_fetched = T;
6437 : else
6438 517692 : pages_fetched = ceil(pages_fetched);
6439 :
6440 567652 : if (maxentries < heap_pages)
6441 : {
6442 : double exact_pages;
6443 : double lossy_pages;
6444 :
6445 : /*
6446 : * Crude approximation of the number of lossy pages. Because of the
6447 : * way tbm_lossify() is coded, the number of lossy pages increases
6448 : * very sharply as soon as we run short of memory; this formula has
6449 : * that property and seems to perform adequately in testing, but it's
6450 : * possible we could do better somehow.
6451 : */
6452 18 : lossy_pages = Max(0, heap_pages - maxentries / 2);
6453 18 : exact_pages = heap_pages - lossy_pages;
6454 :
6455 : /*
6456 : * If there are lossy pages then recompute the number of tuples
6457 : * processed by the bitmap heap node. We assume here that the chance
6458 : * of a given tuple coming from an exact page is the same as the
6459 : * chance that a given page is exact. This might not be true, but
6460 : * it's not clear how we can do any better.
6461 : */
6462 18 : if (lossy_pages > 0)
6463 : tuples_fetched =
6464 18 : clamp_row_est(indexSelectivity *
6465 18 : (exact_pages / heap_pages) * baserel->tuples +
6466 18 : (lossy_pages / heap_pages) * baserel->tuples);
6467 : }
6468 :
6469 567652 : if (cost_p)
6470 440278 : *cost_p = indexTotalCost;
6471 567652 : if (tuples_p)
6472 440278 : *tuples_p = tuples_fetched;
6473 :
6474 567652 : return pages_fetched;
6475 : }
6476 :
6477 : /*
6478 : * compute_gather_rows
6479 : * Estimate number of rows for gather (merge) nodes.
6480 : *
6481 : * In a parallel plan, each worker's row estimate is determined by dividing the
6482 : * total number of rows by parallel_divisor, which accounts for the leader's
6483 : * contribution in addition to the number of workers. Accordingly, when
6484 : * estimating the number of rows for gather (merge) nodes, we multiply the rows
6485 : * per worker by the same parallel_divisor to undo the division.
6486 : */
6487 : double
6488 26012 : compute_gather_rows(Path *path)
6489 : {
6490 : Assert(path->parallel_workers > 0);
6491 :
6492 26012 : return clamp_row_est(path->rows * get_parallel_divisor(path));
6493 : }
|