LCOV - code coverage report
Current view: top level - src/backend/optimizer/path - costsize.c (source / functions) Hit Total Coverage
Test: PostgreSQL 14devel Lines: 1589 1625 97.8 %
Date: 2020-12-05 17:06:23 Functions: 68 68 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * costsize.c
       4             :  *    Routines to compute (and set) relation sizes and path costs
       5             :  *
       6             :  * Path costs are measured in arbitrary units established by these basic
       7             :  * parameters:
       8             :  *
       9             :  *  seq_page_cost       Cost of a sequential page fetch
      10             :  *  random_page_cost    Cost of a non-sequential page fetch
      11             :  *  cpu_tuple_cost      Cost of typical CPU time to process a tuple
      12             :  *  cpu_index_tuple_cost  Cost of typical CPU time to process an index tuple
      13             :  *  cpu_operator_cost   Cost of CPU time to execute an operator or function
      14             :  *  parallel_tuple_cost Cost of CPU time to pass a tuple from worker to leader backend
      15             :  *  parallel_setup_cost Cost of setting up shared memory for parallelism
      16             :  *
      17             :  * We expect that the kernel will typically do some amount of read-ahead
      18             :  * optimization; this in conjunction with seek costs means that seq_page_cost
      19             :  * is normally considerably less than random_page_cost.  (However, if the
      20             :  * database is fully cached in RAM, it is reasonable to set them equal.)
      21             :  *
      22             :  * We also use a rough estimate "effective_cache_size" of the number of
      23             :  * disk pages in Postgres + OS-level disk cache.  (We can't simply use
      24             :  * NBuffers for this purpose because that would ignore the effects of
      25             :  * the kernel's disk cache.)
      26             :  *
      27             :  * Obviously, taking constants for these values is an oversimplification,
      28             :  * but it's tough enough to get any useful estimates even at this level of
      29             :  * detail.  Note that all of these parameters are user-settable, in case
      30             :  * the default values are drastically off for a particular platform.
      31             :  *
      32             :  * seq_page_cost and random_page_cost can also be overridden for an individual
      33             :  * tablespace, in case some data is on a fast disk and other data is on a slow
      34             :  * disk.  Per-tablespace overrides never apply to temporary work files such as
      35             :  * an external sort or a materialize node that overflows work_mem.
      36             :  *
      37             :  * We compute two separate costs for each path:
      38             :  *      total_cost: total estimated cost to fetch all tuples
      39             :  *      startup_cost: cost that is expended before first tuple is fetched
      40             :  * In some scenarios, such as when there is a LIMIT or we are implementing
      41             :  * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
      42             :  * path's result.  A caller can estimate the cost of fetching a partial
      43             :  * result by interpolating between startup_cost and total_cost.  In detail:
      44             :  *      actual_cost = startup_cost +
      45             :  *          (total_cost - startup_cost) * tuples_to_fetch / path->rows;
      46             :  * Note that a base relation's rows count (and, by extension, plan_rows for
      47             :  * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
      48             :  * that this equation works properly.  (Note: while path->rows is never zero
      49             :  * for ordinary relations, it is zero for paths for provably-empty relations,
      50             :  * so beware of division-by-zero.)  The LIMIT is applied as a top-level
      51             :  * plan node.
      52             :  *
      53             :  * For largely historical reasons, most of the routines in this module use
      54             :  * the passed result Path only to store their results (rows, startup_cost and
      55             :  * total_cost) into.  All the input data they need is passed as separate
      56             :  * parameters, even though much of it could be extracted from the Path.
      57             :  * An exception is made for the cost_XXXjoin() routines, which expect all
      58             :  * the other fields of the passed XXXPath to be filled in, and similarly
      59             :  * cost_index() assumes the passed IndexPath is valid except for its output
      60             :  * values.
      61             :  *
      62             :  *
      63             :  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
      64             :  * Portions Copyright (c) 1994, Regents of the University of California
      65             :  *
      66             :  * IDENTIFICATION
      67             :  *    src/backend/optimizer/path/costsize.c
      68             :  *
      69             :  *-------------------------------------------------------------------------
      70             :  */
      71             : 
      72             : #include "postgres.h"
      73             : 
      74             : #include <math.h>
      75             : 
      76             : #include "access/amapi.h"
      77             : #include "access/htup_details.h"
      78             : #include "access/tsmapi.h"
      79             : #include "executor/executor.h"
      80             : #include "executor/nodeAgg.h"
      81             : #include "executor/nodeHash.h"
      82             : #include "miscadmin.h"
      83             : #include "nodes/makefuncs.h"
      84             : #include "nodes/nodeFuncs.h"
      85             : #include "optimizer/clauses.h"
      86             : #include "optimizer/cost.h"
      87             : #include "optimizer/optimizer.h"
      88             : #include "optimizer/pathnode.h"
      89             : #include "optimizer/paths.h"
      90             : #include "optimizer/placeholder.h"
      91             : #include "optimizer/plancat.h"
      92             : #include "optimizer/planmain.h"
      93             : #include "optimizer/restrictinfo.h"
      94             : #include "parser/parsetree.h"
      95             : #include "utils/lsyscache.h"
      96             : #include "utils/selfuncs.h"
      97             : #include "utils/spccache.h"
      98             : #include "utils/tuplesort.h"
      99             : 
     100             : 
     101             : #define LOG2(x)  (log(x) / 0.693147180559945)
     102             : 
     103             : /*
     104             :  * Append and MergeAppend nodes are less expensive than some other operations
     105             :  * which use cpu_tuple_cost; instead of adding a separate GUC, estimate the
     106             :  * per-tuple cost as cpu_tuple_cost multiplied by this value.
     107             :  */
     108             : #define APPEND_CPU_COST_MULTIPLIER 0.5
     109             : 
     110             : /*
     111             :  * Maximum value for row estimates.  We cap row estimates to this to help
     112             :  * ensure that costs based on these estimates remain within the range of what
     113             :  * double can represent.  add_path() wouldn't act sanely given infinite or NaN
     114             :  * cost values.
     115             :  */
     116             : #define MAXIMUM_ROWCOUNT 1e100
     117             : 
     118             : double      seq_page_cost = DEFAULT_SEQ_PAGE_COST;
     119             : double      random_page_cost = DEFAULT_RANDOM_PAGE_COST;
     120             : double      cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
     121             : double      cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
     122             : double      cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
     123             : double      parallel_tuple_cost = DEFAULT_PARALLEL_TUPLE_COST;
     124             : double      parallel_setup_cost = DEFAULT_PARALLEL_SETUP_COST;
     125             : 
     126             : int         effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
     127             : 
     128             : Cost        disable_cost = 1.0e10;
     129             : 
     130             : int         max_parallel_workers_per_gather = 2;
     131             : 
     132             : bool        enable_seqscan = true;
     133             : bool        enable_indexscan = true;
     134             : bool        enable_indexonlyscan = true;
     135             : bool        enable_bitmapscan = true;
     136             : bool        enable_tidscan = true;
     137             : bool        enable_sort = true;
     138             : bool        enable_incremental_sort = true;
     139             : bool        enable_hashagg = true;
     140             : bool        enable_nestloop = true;
     141             : bool        enable_material = true;
     142             : bool        enable_mergejoin = true;
     143             : bool        enable_hashjoin = true;
     144             : bool        enable_gathermerge = true;
     145             : bool        enable_partitionwise_join = false;
     146             : bool        enable_partitionwise_aggregate = false;
     147             : bool        enable_parallel_append = true;
     148             : bool        enable_parallel_hash = true;
     149             : bool        enable_partition_pruning = true;
     150             : 
     151             : typedef struct
     152             : {
     153             :     PlannerInfo *root;
     154             :     QualCost    total;
     155             : } cost_qual_eval_context;
     156             : 
     157             : static List *extract_nonindex_conditions(List *qual_clauses, List *indexclauses);
     158             : static MergeScanSelCache *cached_scansel(PlannerInfo *root,
     159             :                                          RestrictInfo *rinfo,
     160             :                                          PathKey *pathkey);
     161             : static void cost_rescan(PlannerInfo *root, Path *path,
     162             :                         Cost *rescan_startup_cost, Cost *rescan_total_cost);
     163             : static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
     164             : static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
     165             :                                       ParamPathInfo *param_info,
     166             :                                       QualCost *qpqual_cost);
     167             : static bool has_indexed_join_quals(NestPath *joinpath);
     168             : static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
     169             :                                  List *quals);
     170             : static double calc_joinrel_size_estimate(PlannerInfo *root,
     171             :                                          RelOptInfo *joinrel,
     172             :                                          RelOptInfo *outer_rel,
     173             :                                          RelOptInfo *inner_rel,
     174             :                                          double outer_rows,
     175             :                                          double inner_rows,
     176             :                                          SpecialJoinInfo *sjinfo,
     177             :                                          List *restrictlist);
     178             : static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root,
     179             :                                                     Relids outer_relids,
     180             :                                                     Relids inner_relids,
     181             :                                                     SpecialJoinInfo *sjinfo,
     182             :                                                     List **restrictlist);
     183             : static Cost append_nonpartial_cost(List *subpaths, int numpaths,
     184             :                                    int parallel_workers);
     185             : static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
     186             : static double relation_byte_size(double tuples, int width);
     187             : static double page_size(double tuples, int width);
     188             : static double get_parallel_divisor(Path *path);
     189             : 
     190             : 
     191             : /*
     192             :  * clamp_row_est
     193             :  *      Force a row-count estimate to a sane value.
     194             :  */
     195             : double
     196     3749034 : clamp_row_est(double nrows)
     197             : {
     198             :     /*
     199             :      * Avoid infinite and NaN row estimates.  Costs derived from such values
     200             :      * are going to be useless.  Also force the estimate to be at least one
     201             :      * row, to make explain output look better and to avoid possible
     202             :      * divide-by-zero when interpolating costs.  Make it an integer, too.
     203             :      */
     204     3749034 :     if (nrows > MAXIMUM_ROWCOUNT || isnan(nrows))
     205           0 :         nrows = MAXIMUM_ROWCOUNT;
     206     3749034 :     else if (nrows <= 1.0)
     207     1667950 :         nrows = 1.0;
     208             :     else
     209     2081084 :         nrows = rint(nrows);
     210             : 
     211     3749034 :     return nrows;
     212             : }
     213             : 
     214             : 
     215             : /*
     216             :  * cost_seqscan
     217             :  *    Determines and returns the cost of scanning a relation sequentially.
     218             :  *
     219             :  * 'baserel' is the relation to be scanned
     220             :  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
     221             :  */
     222             : void
     223      259926 : cost_seqscan(Path *path, PlannerInfo *root,
     224             :              RelOptInfo *baserel, ParamPathInfo *param_info)
     225             : {
     226      259926 :     Cost        startup_cost = 0;
     227             :     Cost        cpu_run_cost;
     228             :     Cost        disk_run_cost;
     229             :     double      spc_seq_page_cost;
     230             :     QualCost    qpqual_cost;
     231             :     Cost        cpu_per_tuple;
     232             : 
     233             :     /* Should only be applied to base relations */
     234             :     Assert(baserel->relid > 0);
     235             :     Assert(baserel->rtekind == RTE_RELATION);
     236             : 
     237             :     /* Mark the path with the correct row estimate */
     238      259926 :     if (param_info)
     239         380 :         path->rows = param_info->ppi_rows;
     240             :     else
     241      259546 :         path->rows = baserel->rows;
     242             : 
     243      259926 :     if (!enable_seqscan)
     244        7818 :         startup_cost += disable_cost;
     245             : 
     246             :     /* fetch estimated page cost for tablespace containing table */
     247      259926 :     get_tablespace_page_costs(baserel->reltablespace,
     248             :                               NULL,
     249             :                               &spc_seq_page_cost);
     250             : 
     251             :     /*
     252             :      * disk costs
     253             :      */
     254      259926 :     disk_run_cost = spc_seq_page_cost * baserel->pages;
     255             : 
     256             :     /* CPU costs */
     257      259926 :     get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
     258             : 
     259      259926 :     startup_cost += qpqual_cost.startup;
     260      259926 :     cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
     261      259926 :     cpu_run_cost = cpu_per_tuple * baserel->tuples;
     262             :     /* tlist eval costs are paid per output row, not per tuple scanned */
     263      259926 :     startup_cost += path->pathtarget->cost.startup;
     264      259926 :     cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
     265             : 
     266             :     /* Adjust costing for parallelism, if used. */
     267      259926 :     if (path->parallel_workers > 0)
     268             :     {
     269       15866 :         double      parallel_divisor = get_parallel_divisor(path);
     270             : 
     271             :         /* The CPU cost is divided among all the workers. */
     272       15866 :         cpu_run_cost /= parallel_divisor;
     273             : 
     274             :         /*
     275             :          * It may be possible to amortize some of the I/O cost, but probably
     276             :          * not very much, because most operating systems already do aggressive
     277             :          * prefetching.  For now, we assume that the disk run cost can't be
     278             :          * amortized at all.
     279             :          */
     280             : 
     281             :         /*
     282             :          * In the case of a parallel plan, the row count needs to represent
     283             :          * the number of tuples processed per worker.
     284             :          */
     285       15866 :         path->rows = clamp_row_est(path->rows / parallel_divisor);
     286             :     }
     287             : 
     288      259926 :     path->startup_cost = startup_cost;
     289      259926 :     path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
     290      259926 : }
     291             : 
     292             : /*
     293             :  * cost_samplescan
     294             :  *    Determines and returns the cost of scanning a relation using sampling.
     295             :  *
     296             :  * 'baserel' is the relation to be scanned
     297             :  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
     298             :  */
     299             : void
     300         180 : cost_samplescan(Path *path, PlannerInfo *root,
     301             :                 RelOptInfo *baserel, ParamPathInfo *param_info)
     302             : {
     303         180 :     Cost        startup_cost = 0;
     304         180 :     Cost        run_cost = 0;
     305             :     RangeTblEntry *rte;
     306             :     TableSampleClause *tsc;
     307             :     TsmRoutine *tsm;
     308             :     double      spc_seq_page_cost,
     309             :                 spc_random_page_cost,
     310             :                 spc_page_cost;
     311             :     QualCost    qpqual_cost;
     312             :     Cost        cpu_per_tuple;
     313             : 
     314             :     /* Should only be applied to base relations with tablesample clauses */
     315             :     Assert(baserel->relid > 0);
     316         180 :     rte = planner_rt_fetch(baserel->relid, root);
     317             :     Assert(rte->rtekind == RTE_RELATION);
     318         180 :     tsc = rte->tablesample;
     319             :     Assert(tsc != NULL);
     320         180 :     tsm = GetTsmRoutine(tsc->tsmhandler);
     321             : 
     322             :     /* Mark the path with the correct row estimate */
     323         180 :     if (param_info)
     324          12 :         path->rows = param_info->ppi_rows;
     325             :     else
     326         168 :         path->rows = baserel->rows;
     327             : 
     328             :     /* fetch estimated page cost for tablespace containing table */
     329         180 :     get_tablespace_page_costs(baserel->reltablespace,
     330             :                               &spc_random_page_cost,
     331             :                               &spc_seq_page_cost);
     332             : 
     333             :     /* if NextSampleBlock is used, assume random access, else sequential */
     334         360 :     spc_page_cost = (tsm->NextSampleBlock != NULL) ?
     335         180 :         spc_random_page_cost : spc_seq_page_cost;
     336             : 
     337             :     /*
     338             :      * disk costs (recall that baserel->pages has already been set to the
     339             :      * number of pages the sampling method will visit)
     340             :      */
     341         180 :     run_cost += spc_page_cost * baserel->pages;
     342             : 
     343             :     /*
     344             :      * CPU costs (recall that baserel->tuples has already been set to the
     345             :      * number of tuples the sampling method will select).  Note that we ignore
     346             :      * execution cost of the TABLESAMPLE parameter expressions; they will be
     347             :      * evaluated only once per scan, and in most usages they'll likely be
     348             :      * simple constants anyway.  We also don't charge anything for the
     349             :      * calculations the sampling method might do internally.
     350             :      */
     351         180 :     get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
     352             : 
     353         180 :     startup_cost += qpqual_cost.startup;
     354         180 :     cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
     355         180 :     run_cost += cpu_per_tuple * baserel->tuples;
     356             :     /* tlist eval costs are paid per output row, not per tuple scanned */
     357         180 :     startup_cost += path->pathtarget->cost.startup;
     358         180 :     run_cost += path->pathtarget->cost.per_tuple * path->rows;
     359             : 
     360         180 :     path->startup_cost = startup_cost;
     361         180 :     path->total_cost = startup_cost + run_cost;
     362         180 : }
     363             : 
     364             : /*
     365             :  * cost_gather
     366             :  *    Determines and returns the cost of gather path.
     367             :  *
     368             :  * 'rel' is the relation to be operated upon
     369             :  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
     370             :  * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
     371             :  * both 'rel' and 'param_info'.  This is useful when the path doesn't exactly
     372             :  * correspond to any particular RelOptInfo.
     373             :  */
     374             : void
     375       10932 : cost_gather(GatherPath *path, PlannerInfo *root,
     376             :             RelOptInfo *rel, ParamPathInfo *param_info,
     377             :             double *rows)
     378             : {
     379       10932 :     Cost        startup_cost = 0;
     380       10932 :     Cost        run_cost = 0;
     381             : 
     382             :     /* Mark the path with the correct row estimate */
     383       10932 :     if (rows)
     384         972 :         path->path.rows = *rows;
     385        9960 :     else if (param_info)
     386           0 :         path->path.rows = param_info->ppi_rows;
     387             :     else
     388        9960 :         path->path.rows = rel->rows;
     389             : 
     390       10932 :     startup_cost = path->subpath->startup_cost;
     391             : 
     392       10932 :     run_cost = path->subpath->total_cost - path->subpath->startup_cost;
     393             : 
     394             :     /* Parallel setup and communication cost. */
     395       10932 :     startup_cost += parallel_setup_cost;
     396       10932 :     run_cost += parallel_tuple_cost * path->path.rows;
     397             : 
     398       10932 :     path->path.startup_cost = startup_cost;
     399       10932 :     path->path.total_cost = (startup_cost + run_cost);
     400       10932 : }
     401             : 
     402             : /*
     403             :  * cost_gather_merge
     404             :  *    Determines and returns the cost of gather merge path.
     405             :  *
     406             :  * GatherMerge merges several pre-sorted input streams, using a heap that at
     407             :  * any given instant holds the next tuple from each stream. If there are N
     408             :  * streams, we need about N*log2(N) tuple comparisons to construct the heap at
     409             :  * startup, and then for each output tuple, about log2(N) comparisons to
     410             :  * replace the top heap entry with the next tuple from the same stream.
     411             :  */
     412             : void
     413        4000 : cost_gather_merge(GatherMergePath *path, PlannerInfo *root,
     414             :                   RelOptInfo *rel, ParamPathInfo *param_info,
     415             :                   Cost input_startup_cost, Cost input_total_cost,
     416             :                   double *rows)
     417             : {
     418        4000 :     Cost        startup_cost = 0;
     419        4000 :     Cost        run_cost = 0;
     420             :     Cost        comparison_cost;
     421             :     double      N;
     422             :     double      logN;
     423             : 
     424             :     /* Mark the path with the correct row estimate */
     425        4000 :     if (rows)
     426        3374 :         path->path.rows = *rows;
     427         626 :     else if (param_info)
     428           0 :         path->path.rows = param_info->ppi_rows;
     429             :     else
     430         626 :         path->path.rows = rel->rows;
     431             : 
     432        4000 :     if (!enable_gathermerge)
     433           0 :         startup_cost += disable_cost;
     434             : 
     435             :     /*
     436             :      * Add one to the number of workers to account for the leader.  This might
     437             :      * be overgenerous since the leader will do less work than other workers
     438             :      * in typical cases, but we'll go with it for now.
     439             :      */
     440             :     Assert(path->num_workers > 0);
     441        4000 :     N = (double) path->num_workers + 1;
     442        4000 :     logN = LOG2(N);
     443             : 
     444             :     /* Assumed cost per tuple comparison */
     445        4000 :     comparison_cost = 2.0 * cpu_operator_cost;
     446             : 
     447             :     /* Heap creation cost */
     448        4000 :     startup_cost += comparison_cost * N * logN;
     449             : 
     450             :     /* Per-tuple heap maintenance cost */
     451        4000 :     run_cost += path->path.rows * comparison_cost * logN;
     452             : 
     453             :     /* small cost for heap management, like cost_merge_append */
     454        4000 :     run_cost += cpu_operator_cost * path->path.rows;
     455             : 
     456             :     /*
     457             :      * Parallel setup and communication cost.  Since Gather Merge, unlike
     458             :      * Gather, requires us to block until a tuple is available from every
     459             :      * worker, we bump the IPC cost up a little bit as compared with Gather.
     460             :      * For lack of a better idea, charge an extra 5%.
     461             :      */
     462        4000 :     startup_cost += parallel_setup_cost;
     463        4000 :     run_cost += parallel_tuple_cost * path->path.rows * 1.05;
     464             : 
     465        4000 :     path->path.startup_cost = startup_cost + input_startup_cost;
     466        4000 :     path->path.total_cost = (startup_cost + run_cost + input_total_cost);
     467        4000 : }
     468             : 
     469             : /*
     470             :  * cost_index
     471             :  *    Determines and returns the cost of scanning a relation using an index.
     472             :  *
     473             :  * 'path' describes the indexscan under consideration, and is complete
     474             :  *      except for the fields to be set by this routine
     475             :  * 'loop_count' is the number of repetitions of the indexscan to factor into
     476             :  *      estimates of caching behavior
     477             :  *
     478             :  * In addition to rows, startup_cost and total_cost, cost_index() sets the
     479             :  * path's indextotalcost and indexselectivity fields.  These values will be
     480             :  * needed if the IndexPath is used in a BitmapIndexScan.
     481             :  *
     482             :  * NOTE: path->indexquals must contain only clauses usable as index
     483             :  * restrictions.  Any additional quals evaluated as qpquals may reduce the
     484             :  * number of returned tuples, but they won't reduce the number of tuples
     485             :  * we have to fetch from the table, so they don't reduce the scan cost.
     486             :  */
     487             : void
     488      426856 : cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
     489             :            bool partial_path)
     490             : {
     491      426856 :     IndexOptInfo *index = path->indexinfo;
     492      426856 :     RelOptInfo *baserel = index->rel;
     493      426856 :     bool        indexonly = (path->path.pathtype == T_IndexOnlyScan);
     494             :     amcostestimate_function amcostestimate;
     495             :     List       *qpquals;
     496      426856 :     Cost        startup_cost = 0;
     497      426856 :     Cost        run_cost = 0;
     498      426856 :     Cost        cpu_run_cost = 0;
     499             :     Cost        indexStartupCost;
     500             :     Cost        indexTotalCost;
     501             :     Selectivity indexSelectivity;
     502             :     double      indexCorrelation,
     503             :                 csquared;
     504             :     double      spc_seq_page_cost,
     505             :                 spc_random_page_cost;
     506             :     Cost        min_IO_cost,
     507             :                 max_IO_cost;
     508             :     QualCost    qpqual_cost;
     509             :     Cost        cpu_per_tuple;
     510             :     double      tuples_fetched;
     511             :     double      pages_fetched;
     512             :     double      rand_heap_pages;
     513             :     double      index_pages;
     514             : 
     515             :     /* Should only be applied to base relations */
     516             :     Assert(IsA(baserel, RelOptInfo) &&
     517             :            IsA(index, IndexOptInfo));
     518             :     Assert(baserel->relid > 0);
     519             :     Assert(baserel->rtekind == RTE_RELATION);
     520             : 
     521             :     /*
     522             :      * Mark the path with the correct row estimate, and identify which quals
     523             :      * will need to be enforced as qpquals.  We need not check any quals that
     524             :      * are implied by the index's predicate, so we can use indrestrictinfo not
     525             :      * baserestrictinfo as the list of relevant restriction clauses for the
     526             :      * rel.
     527             :      */
     528      426856 :     if (path->path.param_info)
     529             :     {
     530       71952 :         path->path.rows = path->path.param_info->ppi_rows;
     531             :         /* qpquals come from the rel's restriction clauses and ppi_clauses */
     532       71952 :         qpquals = list_concat(extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
     533             :                                                           path->indexclauses),
     534       71952 :                               extract_nonindex_conditions(path->path.param_info->ppi_clauses,
     535             :                                                           path->indexclauses));
     536             :     }
     537             :     else
     538             :     {
     539      354904 :         path->path.rows = baserel->rows;
     540             :         /* qpquals come from just the rel's restriction clauses */
     541      354904 :         qpquals = extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
     542             :                                               path->indexclauses);
     543             :     }
     544             : 
     545      426856 :     if (!enable_indexscan)
     546        2330 :         startup_cost += disable_cost;
     547             :     /* we don't need to check enable_indexonlyscan; indxpath.c does that */
     548             : 
     549             :     /*
     550             :      * Call index-access-method-specific code to estimate the processing cost
     551             :      * for scanning the index, as well as the selectivity of the index (ie,
     552             :      * the fraction of main-table tuples we will have to retrieve) and its
     553             :      * correlation to the main-table tuple order.  We need a cast here because
     554             :      * pathnodes.h uses a weak function type to avoid including amapi.h.
     555             :      */
     556      426856 :     amcostestimate = (amcostestimate_function) index->amcostestimate;
     557      426856 :     amcostestimate(root, path, loop_count,
     558             :                    &indexStartupCost, &indexTotalCost,
     559             :                    &indexSelectivity, &indexCorrelation,
     560             :                    &index_pages);
     561             : 
     562             :     /*
     563             :      * Save amcostestimate's results for possible use in bitmap scan planning.
     564             :      * We don't bother to save indexStartupCost or indexCorrelation, because a
     565             :      * bitmap scan doesn't care about either.
     566             :      */
     567      426856 :     path->indextotalcost = indexTotalCost;
     568      426856 :     path->indexselectivity = indexSelectivity;
     569             : 
     570             :     /* all costs for touching index itself included here */
     571      426856 :     startup_cost += indexStartupCost;
     572      426856 :     run_cost += indexTotalCost - indexStartupCost;
     573             : 
     574             :     /* estimate number of main-table tuples fetched */
     575      426856 :     tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
     576             : 
     577             :     /* fetch estimated page costs for tablespace containing table */
     578      426856 :     get_tablespace_page_costs(baserel->reltablespace,
     579             :                               &spc_random_page_cost,
     580             :                               &spc_seq_page_cost);
     581             : 
     582             :     /*----------
     583             :      * Estimate number of main-table pages fetched, and compute I/O cost.
     584             :      *
     585             :      * When the index ordering is uncorrelated with the table ordering,
     586             :      * we use an approximation proposed by Mackert and Lohman (see
     587             :      * index_pages_fetched() for details) to compute the number of pages
     588             :      * fetched, and then charge spc_random_page_cost per page fetched.
     589             :      *
     590             :      * When the index ordering is exactly correlated with the table ordering
     591             :      * (just after a CLUSTER, for example), the number of pages fetched should
     592             :      * be exactly selectivity * table_size.  What's more, all but the first
     593             :      * will be sequential fetches, not the random fetches that occur in the
     594             :      * uncorrelated case.  So if the number of pages is more than 1, we
     595             :      * ought to charge
     596             :      *      spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
     597             :      * For partially-correlated indexes, we ought to charge somewhere between
     598             :      * these two estimates.  We currently interpolate linearly between the
     599             :      * estimates based on the correlation squared (XXX is that appropriate?).
     600             :      *
     601             :      * If it's an index-only scan, then we will not need to fetch any heap
     602             :      * pages for which the visibility map shows all tuples are visible.
     603             :      * Hence, reduce the estimated number of heap fetches accordingly.
     604             :      * We use the measured fraction of the entire heap that is all-visible,
     605             :      * which might not be particularly relevant to the subset of the heap
     606             :      * that this query will fetch; but it's not clear how to do better.
     607             :      *----------
     608             :      */
     609      426856 :     if (loop_count > 1)
     610             :     {
     611             :         /*
     612             :          * For repeated indexscans, the appropriate estimate for the
     613             :          * uncorrelated case is to scale up the number of tuples fetched in
     614             :          * the Mackert and Lohman formula by the number of scans, so that we
     615             :          * estimate the number of pages fetched by all the scans; then
     616             :          * pro-rate the costs for one scan.  In this case we assume all the
     617             :          * fetches are random accesses.
     618             :          */
     619       41770 :         pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
     620             :                                             baserel->pages,
     621       41770 :                                             (double) index->pages,
     622             :                                             root);
     623             : 
     624       41770 :         if (indexonly)
     625        5110 :             pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
     626             : 
     627       41770 :         rand_heap_pages = pages_fetched;
     628             : 
     629       41770 :         max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
     630             : 
     631             :         /*
     632             :          * In the perfectly correlated case, the number of pages touched by
     633             :          * each scan is selectivity * table_size, and we can use the Mackert
     634             :          * and Lohman formula at the page level to estimate how much work is
     635             :          * saved by caching across scans.  We still assume all the fetches are
     636             :          * random, though, which is an overestimate that's hard to correct for
     637             :          * without double-counting the cache effects.  (But in most cases
     638             :          * where such a plan is actually interesting, only one page would get
     639             :          * fetched per scan anyway, so it shouldn't matter much.)
     640             :          */
     641       41770 :         pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
     642             : 
     643       41770 :         pages_fetched = index_pages_fetched(pages_fetched * loop_count,
     644             :                                             baserel->pages,
     645       41770 :                                             (double) index->pages,
     646             :                                             root);
     647             : 
     648       41770 :         if (indexonly)
     649        5110 :             pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
     650             : 
     651       41770 :         min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
     652             :     }
     653             :     else
     654             :     {
     655             :         /*
     656             :          * Normal case: apply the Mackert and Lohman formula, and then
     657             :          * interpolate between that and the correlation-derived result.
     658             :          */
     659      385086 :         pages_fetched = index_pages_fetched(tuples_fetched,
     660             :                                             baserel->pages,
     661      385086 :                                             (double) index->pages,
     662             :                                             root);
     663             : 
     664      385086 :         if (indexonly)
     665       29938 :             pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
     666             : 
     667      385086 :         rand_heap_pages = pages_fetched;
     668             : 
     669             :         /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
     670      385086 :         max_IO_cost = pages_fetched * spc_random_page_cost;
     671             : 
     672             :         /* min_IO_cost is for the perfectly correlated case (csquared=1) */
     673      385086 :         pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
     674             : 
     675      385086 :         if (indexonly)
     676       29938 :             pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
     677             : 
     678      385086 :         if (pages_fetched > 0)
     679             :         {
     680      348252 :             min_IO_cost = spc_random_page_cost;
     681      348252 :             if (pages_fetched > 1)
     682       82000 :                 min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
     683             :         }
     684             :         else
     685       36834 :             min_IO_cost = 0;
     686             :     }
     687             : 
     688      426856 :     if (partial_path)
     689             :     {
     690             :         /*
     691             :          * For index only scans compute workers based on number of index pages
     692             :          * fetched; the number of heap pages we fetch might be so small as to
     693             :          * effectively rule out parallelism, which we don't want to do.
     694             :          */
     695      140174 :         if (indexonly)
     696       11270 :             rand_heap_pages = -1;
     697             : 
     698             :         /*
     699             :          * Estimate the number of parallel workers required to scan index. Use
     700             :          * the number of heap pages computed considering heap fetches won't be
     701             :          * sequential as for parallel scans the pages are accessed in random
     702             :          * order.
     703             :          */
     704      140174 :         path->path.parallel_workers = compute_parallel_worker(baserel,
     705             :                                                               rand_heap_pages,
     706             :                                                               index_pages,
     707             :                                                               max_parallel_workers_per_gather);
     708             : 
     709             :         /*
     710             :          * Fall out if workers can't be assigned for parallel scan, because in
     711             :          * such a case this path will be rejected.  So there is no benefit in
     712             :          * doing extra computation.
     713             :          */
     714      140174 :         if (path->path.parallel_workers <= 0)
     715      133332 :             return;
     716             : 
     717        6842 :         path->path.parallel_aware = true;
     718             :     }
     719             : 
     720             :     /*
     721             :      * Now interpolate based on estimated index order correlation to get total
     722             :      * disk I/O cost for main table accesses.
     723             :      */
     724      293524 :     csquared = indexCorrelation * indexCorrelation;
     725             : 
     726      293524 :     run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
     727             : 
     728             :     /*
     729             :      * Estimate CPU costs per tuple.
     730             :      *
     731             :      * What we want here is cpu_tuple_cost plus the evaluation costs of any
     732             :      * qual clauses that we have to evaluate as qpquals.
     733             :      */
     734      293524 :     cost_qual_eval(&qpqual_cost, qpquals, root);
     735             : 
     736      293524 :     startup_cost += qpqual_cost.startup;
     737      293524 :     cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
     738             : 
     739      293524 :     cpu_run_cost += cpu_per_tuple * tuples_fetched;
     740             : 
     741             :     /* tlist eval costs are paid per output row, not per tuple scanned */
     742      293524 :     startup_cost += path->path.pathtarget->cost.startup;
     743      293524 :     cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
     744             : 
     745             :     /* Adjust costing for parallelism, if used. */
     746      293524 :     if (path->path.parallel_workers > 0)
     747             :     {
     748        6842 :         double      parallel_divisor = get_parallel_divisor(&path->path);
     749             : 
     750        6842 :         path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
     751             : 
     752             :         /* The CPU cost is divided among all the workers. */
     753        6842 :         cpu_run_cost /= parallel_divisor;
     754             :     }
     755             : 
     756      293524 :     run_cost += cpu_run_cost;
     757             : 
     758      293524 :     path->path.startup_cost = startup_cost;
     759      293524 :     path->path.total_cost = startup_cost + run_cost;
     760             : }
     761             : 
     762             : /*
     763             :  * extract_nonindex_conditions
     764             :  *
     765             :  * Given a list of quals to be enforced in an indexscan, extract the ones that
     766             :  * will have to be applied as qpquals (ie, the index machinery won't handle
     767             :  * them).  Here we detect only whether a qual clause is directly redundant
     768             :  * with some indexclause.  If the index path is chosen for use, createplan.c
     769             :  * will try a bit harder to get rid of redundant qual conditions; specifically
     770             :  * it will see if quals can be proven to be implied by the indexquals.  But
     771             :  * it does not seem worth the cycles to try to factor that in at this stage,
     772             :  * since we're only trying to estimate qual eval costs.  Otherwise this must
     773             :  * match the logic in create_indexscan_plan().
     774             :  *
     775             :  * qual_clauses, and the result, are lists of RestrictInfos.
     776             :  * indexclauses is a list of IndexClauses.
     777             :  */
     778             : static List *
     779      498808 : extract_nonindex_conditions(List *qual_clauses, List *indexclauses)
     780             : {
     781      498808 :     List       *result = NIL;
     782             :     ListCell   *lc;
     783             : 
     784     1040580 :     foreach(lc, qual_clauses)
     785             :     {
     786      541772 :         RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
     787             : 
     788      541772 :         if (rinfo->pseudoconstant)
     789        1616 :             continue;           /* we may drop pseudoconstants here */
     790      540156 :         if (is_redundant_with_indexclauses(rinfo, indexclauses))
     791      352882 :             continue;           /* dup or derived from same EquivalenceClass */
     792             :         /* ... skip the predicate proof attempt createplan.c will try ... */
     793      187274 :         result = lappend(result, rinfo);
     794             :     }
     795      498808 :     return result;
     796             : }
     797             : 
     798             : /*
     799             :  * index_pages_fetched
     800             :  *    Estimate the number of pages actually fetched after accounting for
     801             :  *    cache effects.
     802             :  *
     803             :  * We use an approximation proposed by Mackert and Lohman, "Index Scans
     804             :  * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
     805             :  * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
     806             :  * The Mackert and Lohman approximation is that the number of pages
     807             :  * fetched is
     808             :  *  PF =
     809             :  *      min(2TNs/(2T+Ns), T)            when T <= b
     810             :  *      2TNs/(2T+Ns)                    when T > b and Ns <= 2Tb/(2T-b)
     811             :  *      b + (Ns - 2Tb/(2T-b))*(T-b)/T   when T > b and Ns > 2Tb/(2T-b)
     812             :  * where
     813             :  *      T = # pages in table
     814             :  *      N = # tuples in table
     815             :  *      s = selectivity = fraction of table to be scanned
     816             :  *      b = # buffer pages available (we include kernel space here)
     817             :  *
     818             :  * We assume that effective_cache_size is the total number of buffer pages
     819             :  * available for the whole query, and pro-rate that space across all the
     820             :  * tables in the query and the index currently under consideration.  (This
     821             :  * ignores space needed for other indexes used by the query, but since we
     822             :  * don't know which indexes will get used, we can't estimate that very well;
     823             :  * and in any case counting all the tables may well be an overestimate, since
     824             :  * depending on the join plan not all the tables may be scanned concurrently.)
     825             :  *
     826             :  * The product Ns is the number of tuples fetched; we pass in that
     827             :  * product rather than calculating it here.  "pages" is the number of pages
     828             :  * in the object under consideration (either an index or a table).
     829             :  * "index_pages" is the amount to add to the total table space, which was
     830             :  * computed for us by make_one_rel.
     831             :  *
     832             :  * Caller is expected to have ensured that tuples_fetched is greater than zero
     833             :  * and rounded to integer (see clamp_row_est).  The result will likewise be
     834             :  * greater than zero and integral.
     835             :  */
     836             : double
     837      587102 : index_pages_fetched(double tuples_fetched, BlockNumber pages,
     838             :                     double index_pages, PlannerInfo *root)
     839             : {
     840             :     double      pages_fetched;
     841             :     double      total_pages;
     842             :     double      T,
     843             :                 b;
     844             : 
     845             :     /* T is # pages in table, but don't allow it to be zero */
     846      587102 :     T = (pages > 1) ? (double) pages : 1.0;
     847             : 
     848             :     /* Compute number of pages assumed to be competing for cache space */
     849      587102 :     total_pages = root->total_table_pages + index_pages;
     850      587102 :     total_pages = Max(total_pages, 1.0);
     851             :     Assert(T <= total_pages);
     852             : 
     853             :     /* b is pro-rated share of effective_cache_size */
     854      587102 :     b = (double) effective_cache_size * T / total_pages;
     855             : 
     856             :     /* force it positive and integral */
     857      587102 :     if (b <= 1.0)
     858           0 :         b = 1.0;
     859             :     else
     860      587102 :         b = ceil(b);
     861             : 
     862             :     /* This part is the Mackert and Lohman formula */
     863      587102 :     if (T <= b)
     864             :     {
     865      587102 :         pages_fetched =
     866      587102 :             (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
     867      587102 :         if (pages_fetched >= T)
     868      290252 :             pages_fetched = T;
     869             :         else
     870      296850 :             pages_fetched = ceil(pages_fetched);
     871             :     }
     872             :     else
     873             :     {
     874             :         double      lim;
     875             : 
     876           0 :         lim = (2.0 * T * b) / (2.0 * T - b);
     877           0 :         if (tuples_fetched <= lim)
     878             :         {
     879           0 :             pages_fetched =
     880           0 :                 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
     881             :         }
     882             :         else
     883             :         {
     884           0 :             pages_fetched =
     885           0 :                 b + (tuples_fetched - lim) * (T - b) / T;
     886             :         }
     887           0 :         pages_fetched = ceil(pages_fetched);
     888             :     }
     889      587102 :     return pages_fetched;
     890             : }
     891             : 
     892             : /*
     893             :  * get_indexpath_pages
     894             :  *      Determine the total size of the indexes used in a bitmap index path.
     895             :  *
     896             :  * Note: if the same index is used more than once in a bitmap tree, we will
     897             :  * count it multiple times, which perhaps is the wrong thing ... but it's
     898             :  * not completely clear, and detecting duplicates is difficult, so ignore it
     899             :  * for now.
     900             :  */
     901             : static double
     902       85922 : get_indexpath_pages(Path *bitmapqual)
     903             : {
     904       85922 :     double      result = 0;
     905             :     ListCell   *l;
     906             : 
     907       85922 :     if (IsA(bitmapqual, BitmapAndPath))
     908             :     {
     909        8956 :         BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
     910             : 
     911       26868 :         foreach(l, apath->bitmapquals)
     912             :         {
     913       17912 :             result += get_indexpath_pages((Path *) lfirst(l));
     914             :         }
     915             :     }
     916       76966 :     else if (IsA(bitmapqual, BitmapOrPath))
     917             :     {
     918          34 :         BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
     919             : 
     920         102 :         foreach(l, opath->bitmapquals)
     921             :         {
     922          68 :             result += get_indexpath_pages((Path *) lfirst(l));
     923             :         }
     924             :     }
     925       76932 :     else if (IsA(bitmapqual, IndexPath))
     926             :     {
     927       76932 :         IndexPath  *ipath = (IndexPath *) bitmapqual;
     928             : 
     929       76932 :         result = (double) ipath->indexinfo->pages;
     930             :     }
     931             :     else
     932           0 :         elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
     933             : 
     934       85922 :     return result;
     935             : }
     936             : 
     937             : /*
     938             :  * cost_bitmap_heap_scan
     939             :  *    Determines and returns the cost of scanning a relation using a bitmap
     940             :  *    index-then-heap plan.
     941             :  *
     942             :  * 'baserel' is the relation to be scanned
     943             :  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
     944             :  * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
     945             :  * 'loop_count' is the number of repetitions of the indexscan to factor into
     946             :  *      estimates of caching behavior
     947             :  *
     948             :  * Note: the component IndexPaths in bitmapqual should have been costed
     949             :  * using the same loop_count.
     950             :  */
     951             : void
     952      303152 : cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
     953             :                       ParamPathInfo *param_info,
     954             :                       Path *bitmapqual, double loop_count)
     955             : {
     956      303152 :     Cost        startup_cost = 0;
     957      303152 :     Cost        run_cost = 0;
     958             :     Cost        indexTotalCost;
     959             :     QualCost    qpqual_cost;
     960             :     Cost        cpu_per_tuple;
     961             :     Cost        cost_per_page;
     962             :     Cost        cpu_run_cost;
     963             :     double      tuples_fetched;
     964             :     double      pages_fetched;
     965             :     double      spc_seq_page_cost,
     966             :                 spc_random_page_cost;
     967             :     double      T;
     968             : 
     969             :     /* Should only be applied to base relations */
     970             :     Assert(IsA(baserel, RelOptInfo));
     971             :     Assert(baserel->relid > 0);
     972             :     Assert(baserel->rtekind == RTE_RELATION);
     973             : 
     974             :     /* Mark the path with the correct row estimate */
     975      303152 :     if (param_info)
     976      101152 :         path->rows = param_info->ppi_rows;
     977             :     else
     978      202000 :         path->rows = baserel->rows;
     979             : 
     980      303152 :     if (!enable_bitmapscan)
     981        4020 :         startup_cost += disable_cost;
     982             : 
     983      303152 :     pages_fetched = compute_bitmap_pages(root, baserel, bitmapqual,
     984             :                                          loop_count, &indexTotalCost,
     985             :                                          &tuples_fetched);
     986             : 
     987      303152 :     startup_cost += indexTotalCost;
     988      303152 :     T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
     989             : 
     990             :     /* Fetch estimated page costs for tablespace containing table. */
     991      303152 :     get_tablespace_page_costs(baserel->reltablespace,
     992             :                               &spc_random_page_cost,
     993             :                               &spc_seq_page_cost);
     994             : 
     995             :     /*
     996             :      * For small numbers of pages we should charge spc_random_page_cost
     997             :      * apiece, while if nearly all the table's pages are being read, it's more
     998             :      * appropriate to charge spc_seq_page_cost apiece.  The effect is
     999             :      * nonlinear, too. For lack of a better idea, interpolate like this to
    1000             :      * determine the cost per page.
    1001             :      */
    1002      303152 :     if (pages_fetched >= 2.0)
    1003      140340 :         cost_per_page = spc_random_page_cost -
    1004       70170 :             (spc_random_page_cost - spc_seq_page_cost)
    1005       70170 :             * sqrt(pages_fetched / T);
    1006             :     else
    1007      232982 :         cost_per_page = spc_random_page_cost;
    1008             : 
    1009      303152 :     run_cost += pages_fetched * cost_per_page;
    1010             : 
    1011             :     /*
    1012             :      * Estimate CPU costs per tuple.
    1013             :      *
    1014             :      * Often the indexquals don't need to be rechecked at each tuple ... but
    1015             :      * not always, especially not if there are enough tuples involved that the
    1016             :      * bitmaps become lossy.  For the moment, just assume they will be
    1017             :      * rechecked always.  This means we charge the full freight for all the
    1018             :      * scan clauses.
    1019             :      */
    1020      303152 :     get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
    1021             : 
    1022      303152 :     startup_cost += qpqual_cost.startup;
    1023      303152 :     cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
    1024      303152 :     cpu_run_cost = cpu_per_tuple * tuples_fetched;
    1025             : 
    1026             :     /* Adjust costing for parallelism, if used. */
    1027      303152 :     if (path->parallel_workers > 0)
    1028             :     {
    1029        3320 :         double      parallel_divisor = get_parallel_divisor(path);
    1030             : 
    1031             :         /* The CPU cost is divided among all the workers. */
    1032        3320 :         cpu_run_cost /= parallel_divisor;
    1033             : 
    1034        3320 :         path->rows = clamp_row_est(path->rows / parallel_divisor);
    1035             :     }
    1036             : 
    1037             : 
    1038      303152 :     run_cost += cpu_run_cost;
    1039             : 
    1040             :     /* tlist eval costs are paid per output row, not per tuple scanned */
    1041      303152 :     startup_cost += path->pathtarget->cost.startup;
    1042      303152 :     run_cost += path->pathtarget->cost.per_tuple * path->rows;
    1043             : 
    1044      303152 :     path->startup_cost = startup_cost;
    1045      303152 :     path->total_cost = startup_cost + run_cost;
    1046      303152 : }
    1047             : 
    1048             : /*
    1049             :  * cost_bitmap_tree_node
    1050             :  *      Extract cost and selectivity from a bitmap tree node (index/and/or)
    1051             :  */
    1052             : void
    1053      530784 : cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
    1054             : {
    1055      530784 :     if (IsA(path, IndexPath))
    1056             :     {
    1057      510000 :         *cost = ((IndexPath *) path)->indextotalcost;
    1058      510000 :         *selec = ((IndexPath *) path)->indexselectivity;
    1059             : 
    1060             :         /*
    1061             :          * Charge a small amount per retrieved tuple to reflect the costs of
    1062             :          * manipulating the bitmap.  This is mostly to make sure that a bitmap
    1063             :          * scan doesn't look to be the same cost as an indexscan to retrieve a
    1064             :          * single tuple.
    1065             :          */
    1066      510000 :         *cost += 0.1 * cpu_operator_cost * path->rows;
    1067             :     }
    1068       20784 :     else if (IsA(path, BitmapAndPath))
    1069             :     {
    1070       19480 :         *cost = path->total_cost;
    1071       19480 :         *selec = ((BitmapAndPath *) path)->bitmapselectivity;
    1072             :     }
    1073        1304 :     else if (IsA(path, BitmapOrPath))
    1074             :     {
    1075        1304 :         *cost = path->total_cost;
    1076        1304 :         *selec = ((BitmapOrPath *) path)->bitmapselectivity;
    1077             :     }
    1078             :     else
    1079             :     {
    1080           0 :         elog(ERROR, "unrecognized node type: %d", nodeTag(path));
    1081             :         *cost = *selec = 0;     /* keep compiler quiet */
    1082             :     }
    1083      530784 : }
    1084             : 
    1085             : /*
    1086             :  * cost_bitmap_and_node
    1087             :  *      Estimate the cost of a BitmapAnd node
    1088             :  *
    1089             :  * Note that this considers only the costs of index scanning and bitmap
    1090             :  * creation, not the eventual heap access.  In that sense the object isn't
    1091             :  * truly a Path, but it has enough path-like properties (costs in particular)
    1092             :  * to warrant treating it as one.  We don't bother to set the path rows field,
    1093             :  * however.
    1094             :  */
    1095             : void
    1096       19440 : cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
    1097             : {
    1098             :     Cost        totalCost;
    1099             :     Selectivity selec;
    1100             :     ListCell   *l;
    1101             : 
    1102             :     /*
    1103             :      * We estimate AND selectivity on the assumption that the inputs are
    1104             :      * independent.  This is probably often wrong, but we don't have the info
    1105             :      * to do better.
    1106             :      *
    1107             :      * The runtime cost of the BitmapAnd itself is estimated at 100x
    1108             :      * cpu_operator_cost for each tbm_intersect needed.  Probably too small,
    1109             :      * definitely too simplistic?
    1110             :      */
    1111       19440 :     totalCost = 0.0;
    1112       19440 :     selec = 1.0;
    1113       58320 :     foreach(l, path->bitmapquals)
    1114             :     {
    1115       38880 :         Path       *subpath = (Path *) lfirst(l);
    1116             :         Cost        subCost;
    1117             :         Selectivity subselec;
    1118             : 
    1119       38880 :         cost_bitmap_tree_node(subpath, &subCost, &subselec);
    1120             : 
    1121       38880 :         selec *= subselec;
    1122             : 
    1123       38880 :         totalCost += subCost;
    1124       38880 :         if (l != list_head(path->bitmapquals))
    1125       19440 :             totalCost += 100.0 * cpu_operator_cost;
    1126             :     }
    1127       19440 :     path->bitmapselectivity = selec;
    1128       19440 :     path->path.rows = 0;     /* per above, not used */
    1129       19440 :     path->path.startup_cost = totalCost;
    1130       19440 :     path->path.total_cost = totalCost;
    1131       19440 : }
    1132             : 
    1133             : /*
    1134             :  * cost_bitmap_or_node
    1135             :  *      Estimate the cost of a BitmapOr node
    1136             :  *
    1137             :  * See comments for cost_bitmap_and_node.
    1138             :  */
    1139             : void
    1140         418 : cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
    1141             : {
    1142             :     Cost        totalCost;
    1143             :     Selectivity selec;
    1144             :     ListCell   *l;
    1145             : 
    1146             :     /*
    1147             :      * We estimate OR selectivity on the assumption that the inputs are
    1148             :      * non-overlapping, since that's often the case in "x IN (list)" type
    1149             :      * situations.  Of course, we clamp to 1.0 at the end.
    1150             :      *
    1151             :      * The runtime cost of the BitmapOr itself is estimated at 100x
    1152             :      * cpu_operator_cost for each tbm_union needed.  Probably too small,
    1153             :      * definitely too simplistic?  We are aware that the tbm_unions are
    1154             :      * optimized out when the inputs are BitmapIndexScans.
    1155             :      */
    1156         418 :     totalCost = 0.0;
    1157         418 :     selec = 0.0;
    1158        1294 :     foreach(l, path->bitmapquals)
    1159             :     {
    1160         876 :         Path       *subpath = (Path *) lfirst(l);
    1161             :         Cost        subCost;
    1162             :         Selectivity subselec;
    1163             : 
    1164         876 :         cost_bitmap_tree_node(subpath, &subCost, &subselec);
    1165             : 
    1166         876 :         selec += subselec;
    1167             : 
    1168         876 :         totalCost += subCost;
    1169         876 :         if (l != list_head(path->bitmapquals) &&
    1170         458 :             !IsA(subpath, IndexPath))
    1171          20 :             totalCost += 100.0 * cpu_operator_cost;
    1172             :     }
    1173         418 :     path->bitmapselectivity = Min(selec, 1.0);
    1174         418 :     path->path.rows = 0;     /* per above, not used */
    1175         418 :     path->path.startup_cost = totalCost;
    1176         418 :     path->path.total_cost = totalCost;
    1177         418 : }
    1178             : 
    1179             : /*
    1180             :  * cost_tidscan
    1181             :  *    Determines and returns the cost of scanning a relation using TIDs.
    1182             :  *
    1183             :  * 'baserel' is the relation to be scanned
    1184             :  * 'tidquals' is the list of TID-checkable quals
    1185             :  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
    1186             :  */
    1187             : void
    1188         620 : cost_tidscan(Path *path, PlannerInfo *root,
    1189             :              RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
    1190             : {
    1191         620 :     Cost        startup_cost = 0;
    1192         620 :     Cost        run_cost = 0;
    1193         620 :     bool        isCurrentOf = false;
    1194             :     QualCost    qpqual_cost;
    1195             :     Cost        cpu_per_tuple;
    1196             :     QualCost    tid_qual_cost;
    1197             :     int         ntuples;
    1198             :     ListCell   *l;
    1199             :     double      spc_random_page_cost;
    1200             : 
    1201             :     /* Should only be applied to base relations */
    1202             :     Assert(baserel->relid > 0);
    1203             :     Assert(baserel->rtekind == RTE_RELATION);
    1204             : 
    1205             :     /* Mark the path with the correct row estimate */
    1206         620 :     if (param_info)
    1207          78 :         path->rows = param_info->ppi_rows;
    1208             :     else
    1209         542 :         path->rows = baserel->rows;
    1210             : 
    1211             :     /* Count how many tuples we expect to retrieve */
    1212         620 :     ntuples = 0;
    1213        1256 :     foreach(l, tidquals)
    1214             :     {
    1215         636 :         RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
    1216         636 :         Expr       *qual = rinfo->clause;
    1217             : 
    1218         636 :         if (IsA(qual, ScalarArrayOpExpr))
    1219             :         {
    1220             :             /* Each element of the array yields 1 tuple */
    1221          20 :             ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) qual;
    1222          20 :             Node       *arraynode = (Node *) lsecond(saop->args);
    1223             : 
    1224          20 :             ntuples += estimate_array_length(arraynode);
    1225             :         }
    1226         616 :         else if (IsA(qual, CurrentOfExpr))
    1227             :         {
    1228             :             /* CURRENT OF yields 1 tuple */
    1229         380 :             isCurrentOf = true;
    1230         380 :             ntuples++;
    1231             :         }
    1232             :         else
    1233             :         {
    1234             :             /* It's just CTID = something, count 1 tuple */
    1235         236 :             ntuples++;
    1236             :         }
    1237             :     }
    1238             : 
    1239             :     /*
    1240             :      * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
    1241             :      * understands how to do it correctly.  Therefore, honor enable_tidscan
    1242             :      * only when CURRENT OF isn't present.  Also note that cost_qual_eval
    1243             :      * counts a CurrentOfExpr as having startup cost disable_cost, which we
    1244             :      * subtract off here; that's to prevent other plan types such as seqscan
    1245             :      * from winning.
    1246             :      */
    1247         620 :     if (isCurrentOf)
    1248             :     {
    1249             :         Assert(baserel->baserestrictcost.startup >= disable_cost);
    1250         380 :         startup_cost -= disable_cost;
    1251             :     }
    1252         240 :     else if (!enable_tidscan)
    1253           0 :         startup_cost += disable_cost;
    1254             : 
    1255             :     /*
    1256             :      * The TID qual expressions will be computed once, any other baserestrict
    1257             :      * quals once per retrieved tuple.
    1258             :      */
    1259         620 :     cost_qual_eval(&tid_qual_cost, tidquals, root);
    1260             : 
    1261             :     /* fetch estimated page cost for tablespace containing table */
    1262         620 :     get_tablespace_page_costs(baserel->reltablespace,
    1263             :                               &spc_random_page_cost,
    1264             :                               NULL);
    1265             : 
    1266             :     /* disk costs --- assume each tuple on a different page */
    1267         620 :     run_cost += spc_random_page_cost * ntuples;
    1268             : 
    1269             :     /* Add scanning CPU costs */
    1270         620 :     get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
    1271             : 
    1272             :     /* XXX currently we assume TID quals are a subset of qpquals */
    1273         620 :     startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
    1274        1240 :     cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
    1275         620 :         tid_qual_cost.per_tuple;
    1276         620 :     run_cost += cpu_per_tuple * ntuples;
    1277             : 
    1278             :     /* tlist eval costs are paid per output row, not per tuple scanned */
    1279         620 :     startup_cost += path->pathtarget->cost.startup;
    1280         620 :     run_cost += path->pathtarget->cost.per_tuple * path->rows;
    1281             : 
    1282         620 :     path->startup_cost = startup_cost;
    1283         620 :     path->total_cost = startup_cost + run_cost;
    1284         620 : }
    1285             : 
    1286             : /*
    1287             :  * cost_subqueryscan
    1288             :  *    Determines and returns the cost of scanning a subquery RTE.
    1289             :  *
    1290             :  * 'baserel' is the relation to be scanned
    1291             :  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
    1292             :  */
    1293             : void
    1294       10170 : cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root,
    1295             :                   RelOptInfo *baserel, ParamPathInfo *param_info)
    1296             : {
    1297             :     Cost        startup_cost;
    1298             :     Cost        run_cost;
    1299             :     QualCost    qpqual_cost;
    1300             :     Cost        cpu_per_tuple;
    1301             : 
    1302             :     /* Should only be applied to base relations that are subqueries */
    1303             :     Assert(baserel->relid > 0);
    1304             :     Assert(baserel->rtekind == RTE_SUBQUERY);
    1305             : 
    1306             :     /* Mark the path with the correct row estimate */
    1307       10170 :     if (param_info)
    1308         264 :         path->path.rows = param_info->ppi_rows;
    1309             :     else
    1310        9906 :         path->path.rows = baserel->rows;
    1311             : 
    1312             :     /*
    1313             :      * Cost of path is cost of evaluating the subplan, plus cost of evaluating
    1314             :      * any restriction clauses and tlist that will be attached to the
    1315             :      * SubqueryScan node, plus cpu_tuple_cost to account for selection and
    1316             :      * projection overhead.
    1317             :      */
    1318       10170 :     path->path.startup_cost = path->subpath->startup_cost;
    1319       10170 :     path->path.total_cost = path->subpath->total_cost;
    1320             : 
    1321       10170 :     get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
    1322             : 
    1323       10170 :     startup_cost = qpqual_cost.startup;
    1324       10170 :     cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
    1325       10170 :     run_cost = cpu_per_tuple * baserel->tuples;
    1326             : 
    1327             :     /* tlist eval costs are paid per output row, not per tuple scanned */
    1328       10170 :     startup_cost += path->path.pathtarget->cost.startup;
    1329       10170 :     run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
    1330             : 
    1331       10170 :     path->path.startup_cost += startup_cost;
    1332       10170 :     path->path.total_cost += startup_cost + run_cost;
    1333       10170 : }
    1334             : 
    1335             : /*
    1336             :  * cost_functionscan
    1337             :  *    Determines and returns the cost of scanning a function RTE.
    1338             :  *
    1339             :  * 'baserel' is the relation to be scanned
    1340             :  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
    1341             :  */
    1342             : void
    1343       32350 : cost_functionscan(Path *path, PlannerInfo *root,
    1344             :                   RelOptInfo *baserel, ParamPathInfo *param_info)
    1345             : {
    1346       32350 :     Cost        startup_cost = 0;
    1347       32350 :     Cost        run_cost = 0;
    1348             :     QualCost    qpqual_cost;
    1349             :     Cost        cpu_per_tuple;
    1350             :     RangeTblEntry *rte;
    1351             :     QualCost    exprcost;
    1352             : 
    1353             :     /* Should only be applied to base relations that are functions */
    1354             :     Assert(baserel->relid > 0);
    1355       32350 :     rte = planner_rt_fetch(baserel->relid, root);
    1356             :     Assert(rte->rtekind == RTE_FUNCTION);
    1357             : 
    1358             :     /* Mark the path with the correct row estimate */
    1359       32350 :     if (param_info)
    1360         396 :         path->rows = param_info->ppi_rows;
    1361             :     else
    1362       31954 :         path->rows = baserel->rows;
    1363             : 
    1364             :     /*
    1365             :      * Estimate costs of executing the function expression(s).
    1366             :      *
    1367             :      * Currently, nodeFunctionscan.c always executes the functions to
    1368             :      * completion before returning any rows, and caches the results in a
    1369             :      * tuplestore.  So the function eval cost is all startup cost, and per-row
    1370             :      * costs are minimal.
    1371             :      *
    1372             :      * XXX in principle we ought to charge tuplestore spill costs if the
    1373             :      * number of rows is large.  However, given how phony our rowcount
    1374             :      * estimates for functions tend to be, there's not a lot of point in that
    1375             :      * refinement right now.
    1376             :      */
    1377       32350 :     cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
    1378             : 
    1379       32350 :     startup_cost += exprcost.startup + exprcost.per_tuple;
    1380             : 
    1381             :     /* Add scanning CPU costs */
    1382       32350 :     get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
    1383             : 
    1384       32350 :     startup_cost += qpqual_cost.startup;
    1385       32350 :     cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
    1386       32350 :     run_cost += cpu_per_tuple * baserel->tuples;
    1387             : 
    1388             :     /* tlist eval costs are paid per output row, not per tuple scanned */
    1389       32350 :     startup_cost += path->pathtarget->cost.startup;
    1390       32350 :     run_cost += path->pathtarget->cost.per_tuple * path->rows;
    1391             : 
    1392       32350 :     path->startup_cost = startup_cost;
    1393       32350 :     path->total_cost = startup_cost + run_cost;
    1394       32350 : }
    1395             : 
    1396             : /*
    1397             :  * cost_tablefuncscan
    1398             :  *    Determines and returns the cost of scanning a table function.
    1399             :  *
    1400             :  * 'baserel' is the relation to be scanned
    1401             :  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
    1402             :  */
    1403             : void
    1404         144 : cost_tablefuncscan(Path *path, PlannerInfo *root,
    1405             :                    RelOptInfo *baserel, ParamPathInfo *param_info)
    1406             : {
    1407         144 :     Cost        startup_cost = 0;
    1408         144 :     Cost        run_cost = 0;
    1409             :     QualCost    qpqual_cost;
    1410             :     Cost        cpu_per_tuple;
    1411             :     RangeTblEntry *rte;
    1412             :     QualCost    exprcost;
    1413             : 
    1414             :     /* Should only be applied to base relations that are functions */
    1415             :     Assert(baserel->relid > 0);
    1416         144 :     rte = planner_rt_fetch(baserel->relid, root);
    1417             :     Assert(rte->rtekind == RTE_TABLEFUNC);
    1418             : 
    1419             :     /* Mark the path with the correct row estimate */
    1420         144 :     if (param_info)
    1421          96 :         path->rows = param_info->ppi_rows;
    1422             :     else
    1423          48 :         path->rows = baserel->rows;
    1424             : 
    1425             :     /*
    1426             :      * Estimate costs of executing the table func expression(s).
    1427             :      *
    1428             :      * XXX in principle we ought to charge tuplestore spill costs if the
    1429             :      * number of rows is large.  However, given how phony our rowcount
    1430             :      * estimates for tablefuncs tend to be, there's not a lot of point in that
    1431             :      * refinement right now.
    1432             :      */
    1433         144 :     cost_qual_eval_node(&exprcost, (Node *) rte->tablefunc, root);
    1434             : 
    1435         144 :     startup_cost += exprcost.startup + exprcost.per_tuple;
    1436             : 
    1437             :     /* Add scanning CPU costs */
    1438         144 :     get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
    1439             : 
    1440         144 :     startup_cost += qpqual_cost.startup;
    1441         144 :     cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
    1442         144 :     run_cost += cpu_per_tuple * baserel->tuples;
    1443             : 
    1444             :     /* tlist eval costs are paid per output row, not per tuple scanned */
    1445         144 :     startup_cost += path->pathtarget->cost.startup;
    1446         144 :     run_cost += path->pathtarget->cost.per_tuple * path->rows;
    1447             : 
    1448         144 :     path->startup_cost = startup_cost;
    1449         144 :     path->total_cost = startup_cost + run_cost;
    1450         144 : }
    1451             : 
    1452             : /*
    1453             :  * cost_valuesscan
    1454             :  *    Determines and returns the cost of scanning a VALUES RTE.
    1455             :  *
    1456             :  * 'baserel' is the relation to be scanned
    1457             :  * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
    1458             :  */
    1459             : void
    1460        4362 : cost_valuesscan(Path *path, PlannerInfo *root,
    1461             :                 RelOptInfo *baserel, ParamPathInfo *param_info)
    1462             : {
    1463        4362 :     Cost        startup_cost = 0;
    1464        4362 :     Cost        run_cost = 0;
    1465             :     QualCost    qpqual_cost;
    1466             :     Cost        cpu_per_tuple;
    1467             : 
    1468             :     /* Should only be applied to base relations that are values lists */
    1469             :     Assert(baserel->relid > 0);
    1470             :     Assert(baserel->rtekind == RTE_VALUES);
    1471             : 
    1472             :     /* Mark the path with the correct row estimate */
    1473        4362 :     if (param_info)
    1474          28 :         path->rows = param_info->ppi_rows;
    1475             :     else
    1476        4334 :         path->rows = baserel->rows;
    1477             : 
    1478             :     /*
    1479             :      * For now, estimate list evaluation cost at one operator eval per list
    1480             :      * (probably pretty bogus, but is it worth being smarter?)
    1481             :      */
    1482        4362 :     cpu_per_tuple = cpu_operator_cost;
    1483             : 
    1484             :     /* Add scanning CPU costs */
    1485        4362 :     get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
    1486             : 
    1487        4362 :     startup_cost += qpqual_cost.startup;
    1488        4362 :     cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
    1489        4362 :     run_cost += cpu_per_tuple * baserel->tuples;
    1490             : 
    1491             :     /* tlist eval costs are paid per output row, not per tuple scanned */
    1492        4362 :     startup_cost += path->pathtarget->cost.startup;
    1493        4362 :     run_cost += path->pathtarget->cost.per_tuple * path->rows;
    1494             : 
    1495        4362 :     path->startup_cost = startup_cost;
    1496        4362 :     path->total_cost = startup_cost + run_cost;
    1497        4362 : }
    1498             : 
    1499             : /*
    1500             :  * cost_ctescan
    1501             :  *    Determines and returns the cost of scanning a CTE RTE.
    1502             :  *
    1503             :  * Note: this is used for both self-reference and regular CTEs; the
    1504             :  * possible cost differences are below the threshold of what we could
    1505             :  * estimate accurately anyway.  Note that the costs of evaluating the
    1506             :  * referenced CTE query are added into the final plan as initplan costs,
    1507             :  * and should NOT be counted here.
    1508             :  */
    1509             : void
    1510        1304 : cost_ctescan(Path *path, PlannerInfo *root,
    1511             :              RelOptInfo *baserel, ParamPathInfo *param_info)
    1512             : {
    1513        1304 :     Cost        startup_cost = 0;
    1514        1304 :     Cost        run_cost = 0;
    1515             :     QualCost    qpqual_cost;
    1516             :     Cost        cpu_per_tuple;
    1517             : 
    1518             :     /* Should only be applied to base relations that are CTEs */
    1519             :     Assert(baserel->relid > 0);
    1520             :     Assert(baserel->rtekind == RTE_CTE);
    1521             : 
    1522             :     /* Mark the path with the correct row estimate */
    1523        1304 :     if (param_info)
    1524           0 :         path->rows = param_info->ppi_rows;
    1525             :     else
    1526        1304 :         path->rows = baserel->rows;
    1527             : 
    1528             :     /* Charge one CPU tuple cost per row for tuplestore manipulation */
    1529        1304 :     cpu_per_tuple = cpu_tuple_cost;
    1530             : 
    1531             :     /* Add scanning CPU costs */
    1532        1304 :     get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
    1533             : 
    1534        1304 :     startup_cost += qpqual_cost.startup;
    1535        1304 :     cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
    1536        1304 :     run_cost += cpu_per_tuple * baserel->tuples;
    1537             : 
    1538             :     /* tlist eval costs are paid per output row, not per tuple scanned */
    1539        1304 :     startup_cost += path->pathtarget->cost.startup;
    1540        1304 :     run_cost += path->pathtarget->cost.per_tuple * path->rows;
    1541             : 
    1542        1304 :     path->startup_cost = startup_cost;
    1543        1304 :     path->total_cost = startup_cost + run_cost;
    1544        1304 : }
    1545             : 
    1546             : /*
    1547             :  * cost_namedtuplestorescan
    1548             :  *    Determines and returns the cost of scanning a named tuplestore.
    1549             :  */
    1550             : void
    1551         260 : cost_namedtuplestorescan(Path *path, PlannerInfo *root,
    1552             :                          RelOptInfo *baserel, ParamPathInfo *param_info)
    1553             : {
    1554         260 :     Cost        startup_cost = 0;
    1555         260 :     Cost        run_cost = 0;
    1556             :     QualCost    qpqual_cost;
    1557             :     Cost        cpu_per_tuple;
    1558             : 
    1559             :     /* Should only be applied to base relations that are Tuplestores */
    1560             :     Assert(baserel->relid > 0);
    1561             :     Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
    1562             : 
    1563             :     /* Mark the path with the correct row estimate */
    1564         260 :     if (param_info)
    1565           0 :         path->rows = param_info->ppi_rows;
    1566             :     else
    1567         260 :         path->rows = baserel->rows;
    1568             : 
    1569             :     /* Charge one CPU tuple cost per row for tuplestore manipulation */
    1570         260 :     cpu_per_tuple = cpu_tuple_cost;
    1571             : 
    1572             :     /* Add scanning CPU costs */
    1573         260 :     get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
    1574             : 
    1575         260 :     startup_cost += qpqual_cost.startup;
    1576         260 :     cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
    1577         260 :     run_cost += cpu_per_tuple * baserel->tuples;
    1578             : 
    1579         260 :     path->startup_cost = startup_cost;
    1580         260 :     path->total_cost = startup_cost + run_cost;
    1581         260 : }
    1582             : 
    1583             : /*
    1584             :  * cost_resultscan
    1585             :  *    Determines and returns the cost of scanning an RTE_RESULT relation.
    1586             :  */
    1587             : void
    1588         722 : cost_resultscan(Path *path, PlannerInfo *root,
    1589             :                 RelOptInfo *baserel, ParamPathInfo *param_info)
    1590             : {
    1591         722 :     Cost        startup_cost = 0;
    1592         722 :     Cost        run_cost = 0;
    1593             :     QualCost    qpqual_cost;
    1594             :     Cost        cpu_per_tuple;
    1595             : 
    1596             :     /* Should only be applied to RTE_RESULT base relations */
    1597             :     Assert(baserel->relid > 0);
    1598             :     Assert(baserel->rtekind == RTE_RESULT);
    1599             : 
    1600             :     /* Mark the path with the correct row estimate */
    1601         722 :     if (param_info)
    1602          68 :         path->rows = param_info->ppi_rows;
    1603             :     else
    1604         654 :         path->rows = baserel->rows;
    1605             : 
    1606             :     /* We charge qual cost plus cpu_tuple_cost */
    1607         722 :     get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
    1608             : 
    1609         722 :     startup_cost += qpqual_cost.startup;
    1610         722 :     cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
    1611         722 :     run_cost += cpu_per_tuple * baserel->tuples;
    1612             : 
    1613         722 :     path->startup_cost = startup_cost;
    1614         722 :     path->total_cost = startup_cost + run_cost;
    1615         722 : }
    1616             : 
    1617             : /*
    1618             :  * cost_recursive_union
    1619             :  *    Determines and returns the cost of performing a recursive union,
    1620             :  *    and also the estimated output size.
    1621             :  *
    1622             :  * We are given Paths for the nonrecursive and recursive terms.
    1623             :  */
    1624             : void
    1625         344 : cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
    1626             : {
    1627             :     Cost        startup_cost;
    1628             :     Cost        total_cost;
    1629             :     double      total_rows;
    1630             : 
    1631             :     /* We probably have decent estimates for the non-recursive term */
    1632         344 :     startup_cost = nrterm->startup_cost;
    1633         344 :     total_cost = nrterm->total_cost;
    1634         344 :     total_rows = nrterm->rows;
    1635             : 
    1636             :     /*
    1637             :      * We arbitrarily assume that about 10 recursive iterations will be
    1638             :      * needed, and that we've managed to get a good fix on the cost and output
    1639             :      * size of each one of them.  These are mighty shaky assumptions but it's
    1640             :      * hard to see how to do better.
    1641             :      */
    1642         344 :     total_cost += 10 * rterm->total_cost;
    1643         344 :     total_rows += 10 * rterm->rows;
    1644             : 
    1645             :     /*
    1646             :      * Also charge cpu_tuple_cost per row to account for the costs of
    1647             :      * manipulating the tuplestores.  (We don't worry about possible
    1648             :      * spill-to-disk costs.)
    1649             :      */
    1650         344 :     total_cost += cpu_tuple_cost * total_rows;
    1651             : 
    1652         344 :     runion->startup_cost = startup_cost;
    1653         344 :     runion->total_cost = total_cost;
    1654         344 :     runion->rows = total_rows;
    1655         344 :     runion->pathtarget->width = Max(nrterm->pathtarget->width,
    1656             :                                     rterm->pathtarget->width);
    1657         344 : }
    1658             : 
    1659             : /*
    1660             :  * cost_tuplesort
    1661             :  *    Determines and returns the cost of sorting a relation using tuplesort,
    1662             :  *    not including the cost of reading the input data.
    1663             :  *
    1664             :  * If the total volume of data to sort is less than sort_mem, we will do
    1665             :  * an in-memory sort, which requires no I/O and about t*log2(t) tuple
    1666             :  * comparisons for t tuples.
    1667             :  *
    1668             :  * If the total volume exceeds sort_mem, we switch to a tape-style merge
    1669             :  * algorithm.  There will still be about t*log2(t) tuple comparisons in
    1670             :  * total, but we will also need to write and read each tuple once per
    1671             :  * merge pass.  We expect about ceil(logM(r)) merge passes where r is the
    1672             :  * number of initial runs formed and M is the merge order used by tuplesort.c.
    1673             :  * Since the average initial run should be about sort_mem, we have
    1674             :  *      disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
    1675             :  *      cpu = comparison_cost * t * log2(t)
    1676             :  *
    1677             :  * If the sort is bounded (i.e., only the first k result tuples are needed)
    1678             :  * and k tuples can fit into sort_mem, we use a heap method that keeps only
    1679             :  * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
    1680             :  *
    1681             :  * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
    1682             :  * accesses (XXX can't we refine that guess?)
    1683             :  *
    1684             :  * By default, we charge two operator evals per tuple comparison, which should
    1685             :  * be in the right ballpark in most cases.  The caller can tweak this by
    1686             :  * specifying nonzero comparison_cost; typically that's used for any extra
    1687             :  * work that has to be done to prepare the inputs to the comparison operators.
    1688             :  *
    1689             :  * 'tuples' is the number of tuples in the relation
    1690             :  * 'width' is the average tuple width in bytes
    1691             :  * 'comparison_cost' is the extra cost per comparison, if any
    1692             :  * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
    1693             :  * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
    1694             :  */
    1695             : static void
    1696      736576 : cost_tuplesort(Cost *startup_cost, Cost *run_cost,
    1697             :                double tuples, int width,
    1698             :                Cost comparison_cost, int sort_mem,
    1699             :                double limit_tuples)
    1700             : {
    1701      736576 :     double      input_bytes = relation_byte_size(tuples, width);
    1702             :     double      output_bytes;
    1703             :     double      output_tuples;
    1704      736576 :     long        sort_mem_bytes = sort_mem * 1024L;
    1705             : 
    1706             :     /*
    1707             :      * We want to be sure the cost of a sort is never estimated as zero, even
    1708             :      * if passed-in tuple count is zero.  Besides, mustn't do log(0)...
    1709             :      */
    1710      736576 :     if (tuples < 2.0)
    1711      273116 :         tuples = 2.0;
    1712             : 
    1713             :     /* Include the default cost-per-comparison */
    1714      736576 :     comparison_cost += 2.0 * cpu_operator_cost;
    1715             : 
    1716             :     /* Do we have a useful LIMIT? */
    1717      736576 :     if (limit_tuples > 0 && limit_tuples < tuples)
    1718             :     {
    1719        1594 :         output_tuples = limit_tuples;
    1720        1594 :         output_bytes = relation_byte_size(output_tuples, width);
    1721             :     }
    1722             :     else
    1723             :     {
    1724      734982 :         output_tuples = tuples;
    1725      734982 :         output_bytes = input_bytes;
    1726             :     }
    1727             : 
    1728      736576 :     if (output_bytes > sort_mem_bytes)
    1729             :     {
    1730             :         /*
    1731             :          * We'll have to use a disk-based sort of all the tuples
    1732             :          */
    1733        7006 :         double      npages = ceil(input_bytes / BLCKSZ);
    1734        7006 :         double      nruns = input_bytes / sort_mem_bytes;
    1735        7006 :         double      mergeorder = tuplesort_merge_order(sort_mem_bytes);
    1736             :         double      log_runs;
    1737             :         double      npageaccesses;
    1738             : 
    1739             :         /*
    1740             :          * CPU costs
    1741             :          *
    1742             :          * Assume about N log2 N comparisons
    1743             :          */
    1744        7006 :         *startup_cost = comparison_cost * tuples * LOG2(tuples);
    1745             : 
    1746             :         /* Disk costs */
    1747             : 
    1748             :         /* Compute logM(r) as log(r) / log(M) */
    1749        7006 :         if (nruns > mergeorder)
    1750        3542 :             log_runs = ceil(log(nruns) / log(mergeorder));
    1751             :         else
    1752        3464 :             log_runs = 1.0;
    1753        7006 :         npageaccesses = 2.0 * npages * log_runs;
    1754             :         /* Assume 3/4ths of accesses are sequential, 1/4th are not */
    1755       14012 :         *startup_cost += npageaccesses *
    1756        7006 :             (seq_page_cost * 0.75 + random_page_cost * 0.25);
    1757             :     }
    1758      729570 :     else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
    1759             :     {
    1760             :         /*
    1761             :          * We'll use a bounded heap-sort keeping just K tuples in memory, for
    1762             :          * a total number of tuple comparisons of N log2 K; but the constant
    1763             :          * factor is a bit higher than for quicksort.  Tweak it so that the
    1764             :          * cost curve is continuous at the crossover point.
    1765             :          */
    1766        1094 :         *startup_cost = comparison_cost * tuples * LOG2(2.0 * output_tuples);
    1767             :     }
    1768             :     else
    1769             :     {
    1770             :         /* We'll use plain quicksort on all the input tuples */
    1771      728476 :         *startup_cost = comparison_cost * tuples * LOG2(tuples);
    1772             :     }
    1773             : 
    1774             :     /*
    1775             :      * Also charge a small amount (arbitrarily set equal to operator cost) per
    1776             :      * extracted tuple.  We don't charge cpu_tuple_cost because a Sort node
    1777             :      * doesn't do qual-checking or projection, so it has less overhead than
    1778             :      * most plan nodes.  Note it's correct to use tuples not output_tuples
    1779             :      * here --- the upper LIMIT will pro-rate the run cost so we'd be double
    1780             :      * counting the LIMIT otherwise.
    1781             :      */
    1782      736576 :     *run_cost = cpu_operator_cost * tuples;
    1783      736576 : }
    1784             : 
    1785             : /*
    1786             :  * cost_incremental_sort
    1787             :  *  Determines and returns the cost of sorting a relation incrementally, when
    1788             :  *  the input path is presorted by a prefix of the pathkeys.
    1789             :  *
    1790             :  * 'presorted_keys' is the number of leading pathkeys by which the input path
    1791             :  * is sorted.
    1792             :  *
    1793             :  * We estimate the number of groups into which the relation is divided by the
    1794             :  * leading pathkeys, and then calculate the cost of sorting a single group
    1795             :  * with tuplesort using cost_tuplesort().
    1796             :  */
    1797             : void
    1798        1622 : cost_incremental_sort(Path *path,
    1799             :                       PlannerInfo *root, List *pathkeys, int presorted_keys,
    1800             :                       Cost input_startup_cost, Cost input_total_cost,
    1801             :                       double input_tuples, int width, Cost comparison_cost, int sort_mem,
    1802             :                       double limit_tuples)
    1803             : {
    1804        1622 :     Cost        startup_cost = 0,
    1805        1622 :                 run_cost = 0,
    1806        1622 :                 input_run_cost = input_total_cost - input_startup_cost;
    1807             :     double      group_tuples,
    1808             :                 input_groups;
    1809             :     Cost        group_startup_cost,
    1810             :                 group_run_cost,
    1811             :                 group_input_run_cost;
    1812        1622 :     List       *presortedExprs = NIL;
    1813             :     ListCell   *l;
    1814        1622 :     int         i = 0;
    1815        1622 :     bool        unknown_varno = false;
    1816             : 
    1817             :     Assert(presorted_keys != 0);
    1818             : 
    1819             :     /*
    1820             :      * We want to be sure the cost of a sort is never estimated as zero, even
    1821             :      * if passed-in tuple count is zero.  Besides, mustn't do log(0)...
    1822             :      */
    1823        1622 :     if (input_tuples < 2.0)
    1824         560 :         input_tuples = 2.0;
    1825             : 
    1826             :     /* Default estimate of number of groups, capped to one group per row. */
    1827        1622 :     input_groups = Min(input_tuples, DEFAULT_NUM_DISTINCT);
    1828             : 
    1829             :     /*
    1830             :      * Extract presorted keys as list of expressions.
    1831             :      *
    1832             :      * We need to be careful about Vars containing "varno 0" which might have
    1833             :      * been introduced by generate_append_tlist, which would confuse
    1834             :      * estimate_num_groups (in fact it'd fail for such expressions). See
    1835             :      * recurse_set_operations which has to deal with the same issue.
    1836             :      *
    1837             :      * Unlike recurse_set_operations we can't access the original target list
    1838             :      * here, and even if we could it's not very clear how useful would that be
    1839             :      * for a set operation combining multiple tables. So we simply detect if
    1840             :      * there are any expressions with "varno 0" and use the default
    1841             :      * DEFAULT_NUM_DISTINCT in that case.
    1842             :      *
    1843             :      * We might also use either 1.0 (a single group) or input_tuples (each row
    1844             :      * being a separate group), pretty much the worst and best case for
    1845             :      * incremental sort. But those are extreme cases and using something in
    1846             :      * between seems reasonable. Furthermore, generate_append_tlist is used
    1847             :      * for set operations, which are likely to produce mostly unique output
    1848             :      * anyway - from that standpoint the DEFAULT_NUM_DISTINCT is defensive
    1849             :      * while maintaining lower startup cost.
    1850             :      */
    1851        1650 :     foreach(l, pathkeys)
    1852             :     {
    1853        1650 :         PathKey    *key = (PathKey *) lfirst(l);
    1854        1650 :         EquivalenceMember *member = (EquivalenceMember *)
    1855        1650 :         linitial(key->pk_eclass->ec_members);
    1856             : 
    1857             :         /*
    1858             :          * Check if the expression contains Var with "varno 0" so that we
    1859             :          * don't call estimate_num_groups in that case.
    1860             :          */
    1861        1650 :         if (bms_is_member(0, pull_varnos((Node *) member->em_expr)))
    1862             :         {
    1863           4 :             unknown_varno = true;
    1864           4 :             break;
    1865             :         }
    1866             : 
    1867             :         /* expression not containing any Vars with "varno 0" */
    1868        1646 :         presortedExprs = lappend(presortedExprs, member->em_expr);
    1869             : 
    1870        1646 :         i++;
    1871        1646 :         if (i >= presorted_keys)
    1872        1618 :             break;
    1873             :     }
    1874             : 
    1875             :     /* Estimate number of groups with equal presorted keys. */
    1876        1622 :     if (!unknown_varno)
    1877        1618 :         input_groups = estimate_num_groups(root, presortedExprs, input_tuples, NULL);
    1878             : 
    1879        1622 :     group_tuples = input_tuples / input_groups;
    1880        1622 :     group_input_run_cost = input_run_cost / input_groups;
    1881             : 
    1882             :     /*
    1883             :      * Estimate average cost of sorting of one group where presorted keys are
    1884             :      * equal.  Incremental sort is sensitive to distribution of tuples to the
    1885             :      * groups, where we're relying on quite rough assumptions.  Thus, we're
    1886             :      * pessimistic about incremental sort performance and increase its average
    1887             :      * group size by half.
    1888             :      */
    1889        1622 :     cost_tuplesort(&group_startup_cost, &group_run_cost,
    1890             :                    1.5 * group_tuples, width, comparison_cost, sort_mem,
    1891             :                    limit_tuples);
    1892             : 
    1893             :     /*
    1894             :      * Startup cost of incremental sort is the startup cost of its first group
    1895             :      * plus the cost of its input.
    1896             :      */
    1897        1622 :     startup_cost += group_startup_cost
    1898        1622 :         + input_startup_cost + group_input_run_cost;
    1899             : 
    1900             :     /*
    1901             :      * After we started producing tuples from the first group, the cost of
    1902             :      * producing all the tuples is given by the cost to finish processing this
    1903             :      * group, plus the total cost to process the remaining groups, plus the
    1904             :      * remaining cost of input.
    1905             :      */
    1906        1622 :     run_cost += group_run_cost
    1907        1622 :         + (group_run_cost + group_startup_cost) * (input_groups - 1)
    1908        1622 :         + group_input_run_cost * (input_groups - 1);
    1909             : 
    1910             :     /*
    1911             :      * Incremental sort adds some overhead by itself. Firstly, it has to
    1912             :      * detect the sort groups. This is roughly equal to one extra copy and
    1913             :      * comparison per tuple. Secondly, it has to reset the tuplesort context
    1914             :      * for every group.
    1915             :      */
    1916        1622 :     run_cost += (cpu_tuple_cost + comparison_cost) * input_tuples;
    1917        1622 :     run_cost += 2.0 * cpu_tuple_cost * input_groups;
    1918             : 
    1919        1622 :     path->rows = input_tuples;
    1920        1622 :     path->startup_cost = startup_cost;
    1921        1622 :     path->total_cost = startup_cost + run_cost;
    1922        1622 : }
    1923             : 
    1924             : /*
    1925             :  * cost_sort
    1926             :  *    Determines and returns the cost of sorting a relation, including
    1927             :  *    the cost of reading the input data.
    1928             :  *
    1929             :  * NOTE: some callers currently pass NIL for pathkeys because they
    1930             :  * can't conveniently supply the sort keys.  Since this routine doesn't
    1931             :  * currently do anything with pathkeys anyway, that doesn't matter...
    1932             :  * but if it ever does, it should react gracefully to lack of key data.
    1933             :  * (Actually, the thing we'd most likely be interested in is just the number
    1934             :  * of sort keys, which all callers *could* supply.)
    1935             :  */
    1936             : void
    1937      734954 : cost_sort(Path *path, PlannerInfo *root,
    1938             :           List *pathkeys, Cost input_cost, double tuples, int width,
    1939             :           Cost comparison_cost, int sort_mem,
    1940             :           double limit_tuples)
    1941             : 
    1942             : {
    1943             :     Cost        startup_cost;
    1944             :     Cost        run_cost;
    1945             : 
    1946      734954 :     cost_tuplesort(&startup_cost, &run_cost,
    1947             :                    tuples, width,
    1948             :                    comparison_cost, sort_mem,
    1949             :                    limit_tuples);
    1950             : 
    1951      734954 :     if (!enable_sort)
    1952         646 :         startup_cost += disable_cost;
    1953             : 
    1954      734954 :     startup_cost += input_cost;
    1955             : 
    1956      734954 :     path->rows = tuples;
    1957      734954 :     path->startup_cost = startup_cost;
    1958      734954 :     path->total_cost = startup_cost + run_cost;
    1959      734954 : }
    1960             : 
    1961             : /*
    1962             :  * append_nonpartial_cost
    1963             :  *    Estimate the cost of the non-partial paths in a Parallel Append.
    1964             :  *    The non-partial paths are assumed to be the first "numpaths" paths
    1965             :  *    from the subpaths list, and to be in order of decreasing cost.
    1966             :  */
    1967             : static Cost
    1968        9660 : append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
    1969             : {
    1970             :     Cost       *costarr;
    1971             :     int         arrlen;
    1972             :     ListCell   *l;
    1973             :     ListCell   *cell;
    1974             :     int         i;
    1975             :     int         path_index;
    1976             :     int         min_index;
    1977             :     int         max_index;
    1978             : 
    1979        9660 :     if (numpaths == 0)
    1980        7534 :         return 0;
    1981             : 
    1982             :     /*
    1983             :      * Array length is number of workers or number of relevant paths,
    1984             :      * whichever is less.
    1985             :      */
    1986        2126 :     arrlen = Min(parallel_workers, numpaths);
    1987        2126 :     costarr = (Cost *) palloc(sizeof(Cost) * arrlen);
    1988             : 
    1989             :     /* The first few paths will each be claimed by a different worker. */
    1990        2126 :     path_index = 0;
    1991        6090 :     foreach(cell, subpaths)
    1992             :     {
    1993        4704 :         Path       *subpath = (Path *) lfirst(cell);
    1994             : 
    1995        4704 :         if (path_index == arrlen)
    1996         740 :             break;
    1997        3964 :         costarr[path_index++] = subpath->total_cost;
    1998             :     }
    1999             : 
    2000             :     /*
    2001             :      * Since subpaths are sorted by decreasing cost, the last one will have
    2002             :      * the minimum cost.
    2003             :      */
    2004        2126 :     min_index = arrlen - 1;
    2005             : 
    2006             :     /*
    2007             :      * For each of the remaining subpaths, add its cost to the array element
    2008             :      * with minimum cost.
    2009             :      */
    2010        2648 :     for_each_cell(l, subpaths, cell)
    2011             :     {
    2012         866 :         Path       *subpath = (Path *) lfirst(l);
    2013             :         int         i;
    2014             : 
    2015             :         /* Consider only the non-partial paths */
    2016         866 :         if (path_index++ == numpaths)
    2017         344 :             break;
    2018             : 
    2019         522 :         costarr[min_index] += subpath->total_cost;
    2020             : 
    2021             :         /* Update the new min cost array index */
    2022        1590 :         for (min_index = i = 0; i < arrlen; i++)
    2023             :         {
    2024        1068 :             if (costarr[i] < costarr[min_index])
    2025         130 :                 min_index = i;
    2026             :         }
    2027             :     }
    2028             : 
    2029             :     /* Return the highest cost from the array */
    2030        6090 :     for (max_index = i = 0; i < arrlen; i++)
    2031             :     {
    2032        3964 :         if (costarr[i] > costarr[max_index])
    2033         312 :             max_index = i;
    2034             :     }
    2035             : 
    2036        2126 :     return costarr[max_index];
    2037             : }
    2038             : 
    2039             : /*
    2040             :  * cost_append
    2041             :  *    Determines and returns the cost of an Append node.
    2042             :  */
    2043             : void
    2044       27362 : cost_append(AppendPath *apath)
    2045             : {
    2046             :     ListCell   *l;
    2047             : 
    2048       27362 :     apath->path.startup_cost = 0;
    2049       27362 :     apath->path.total_cost = 0;
    2050       27362 :     apath->path.rows = 0;
    2051             : 
    2052       27362 :     if (apath->subpaths == NIL)
    2053         784 :         return;
    2054             : 
    2055       26578 :     if (!apath->path.parallel_aware)
    2056             :     {
    2057       16918 :         List       *pathkeys = apath->path.pathkeys;
    2058             : 
    2059       16918 :         if (pathkeys == NIL)
    2060             :         {
    2061       15778 :             Path       *subpath = (Path *) linitial(apath->subpaths);
    2062             : 
    2063             :             /*
    2064             :              * For an unordered, non-parallel-aware Append we take the startup
    2065             :              * cost as the startup cost of the first subpath.
    2066             :              */
    2067       15778 :             apath->path.startup_cost = subpath->startup_cost;
    2068             : 
    2069             :             /* Compute rows and costs as sums of subplan rows and costs. */
    2070       61600 :             foreach(l, apath->subpaths)
    2071             :             {
    2072       45822 :                 Path       *subpath = (Path *) lfirst(l);
    2073             : 
    2074       45822 :                 apath->path.rows += subpath->rows;
    2075       45822 :                 apath->path.total_cost += subpath->total_cost;
    2076             :             }
    2077             :         }
    2078             :         else
    2079             :         {
    2080             :             /*
    2081             :              * For an ordered, non-parallel-aware Append we take the startup
    2082             :              * cost as the sum of the subpath startup costs.  This ensures
    2083             :              * that we don't underestimate the startup cost when a query's
    2084             :              * LIMIT is such that several of the children have to be run to
    2085             :              * satisfy it.  This might be overkill --- another plausible hack
    2086             :              * would be to take the Append's startup cost as the maximum of
    2087             :              * the child startup costs.  But we don't want to risk believing
    2088             :              * that an ORDER BY LIMIT query can be satisfied at small cost
    2089             :              * when the first child has small startup cost but later ones
    2090             :              * don't.  (If we had the ability to deal with nonlinear cost
    2091             :              * interpolation for partial retrievals, we would not need to be
    2092             :              * so conservative about this.)
    2093             :              *
    2094             :              * This case is also different from the above in that we have to
    2095             :              * account for possibly injecting sorts into subpaths that aren't
    2096             :              * natively ordered.
    2097             :              */
    2098        4480 :             foreach(l, apath->subpaths)
    2099             :             {
    2100        3340 :                 Path       *subpath = (Path *) lfirst(l);
    2101             :                 Path        sort_path;  /* dummy for result of cost_sort */
    2102             : 
    2103        3340 :                 if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
    2104             :                 {
    2105             :                     /*
    2106             :                      * We'll need to insert a Sort node, so include costs for
    2107             :                      * that.  We can use the parent's LIMIT if any, since we
    2108             :                      * certainly won't pull more than that many tuples from
    2109             :                      * any child.
    2110             :                      */
    2111          40 :                     cost_sort(&sort_path,
    2112             :                               NULL, /* doesn't currently need root */
    2113             :                               pathkeys,
    2114             :                               subpath->total_cost,
    2115             :                               subpath->rows,
    2116          20 :                               subpath->pathtarget->width,
    2117             :                               0.0,
    2118             :                               work_mem,
    2119             :                               apath->limit_tuples);
    2120          20 :                     subpath = &sort_path;
    2121             :                 }
    2122             : 
    2123        3340 :                 apath->path.rows += subpath->rows;
    2124        3340 :                 apath->path.startup_cost += subpath->startup_cost;
    2125        3340 :                 apath->path.total_cost += subpath->total_cost;
    2126             :             }
    2127             :         }
    2128             :     }
    2129             :     else                        /* parallel-aware */
    2130             :     {
    2131        9660 :         int         i = 0;
    2132        9660 :         double      parallel_divisor = get_parallel_divisor(&apath->path);
    2133             : 
    2134             :         /* Parallel-aware Append never produces ordered output. */
    2135             :         Assert(apath->path.pathkeys == NIL);
    2136             : 
    2137             :         /* Calculate startup cost. */
    2138       39214 :         foreach(l, apath->subpaths)
    2139             :         {
    2140       29554 :             Path       *subpath = (Path *) lfirst(l);
    2141             : 
    2142             :             /*
    2143             :              * Append will start returning tuples when the child node having
    2144             :              * lowest startup cost is done setting up. We consider only the
    2145             :              * first few subplans that immediately get a worker assigned.
    2146             :              */
    2147       29554 :             if (i == 0)
    2148        9660 :                 apath->path.startup_cost = subpath->startup_cost;
    2149       19894 :             else if (i < apath->path.parallel_workers)
    2150        9604 :                 apath->path.startup_cost = Min(apath->path.startup_cost,
    2151             :                                                subpath->startup_cost);
    2152             : 
    2153             :             /*
    2154             :              * Apply parallel divisor to subpaths.  Scale the number of rows
    2155             :              * for each partial subpath based on the ratio of the parallel
    2156             :              * divisor originally used for the subpath to the one we adopted.
    2157             :              * Also add the cost of partial paths to the total cost, but
    2158             :              * ignore non-partial paths for now.
    2159             :              */
    2160       29554 :             if (i < apath->first_partial_path)
    2161        4486 :                 apath->path.rows += subpath->rows / parallel_divisor;
    2162             :             else
    2163             :             {
    2164             :                 double      subpath_parallel_divisor;
    2165             : 
    2166       25068 :                 subpath_parallel_divisor = get_parallel_divisor(subpath);
    2167       25068 :                 apath->path.rows += subpath->rows * (subpath_parallel_divisor /
    2168             :                                                      parallel_divisor);
    2169       25068 :                 apath->path.total_cost += subpath->total_cost;
    2170             :             }
    2171             : 
    2172       29554 :             apath->path.rows = clamp_row_est(apath->path.rows);
    2173             : 
    2174       29554 :             i++;
    2175             :         }
    2176             : 
    2177             :         /* Add cost for non-partial subpaths. */
    2178        9660 :         apath->path.total_cost +=
    2179        9660 :             append_nonpartial_cost(apath->subpaths,
    2180             :                                    apath->first_partial_path,
    2181             :                                    apath->path.parallel_workers);
    2182             :     }
    2183             : 
    2184             :     /*
    2185             :      * Although Append does not do any selection or projection, it's not free;
    2186             :      * add a small per-tuple overhead.
    2187             :      */
    2188       53156 :     apath->path.total_cost +=
    2189       26578 :         cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * apath->path.rows;
    2190             : }
    2191             : 
    2192             : /*
    2193             :  * cost_merge_append
    2194             :  *    Determines and returns the cost of a MergeAppend node.
    2195             :  *
    2196             :  * MergeAppend merges several pre-sorted input streams, using a heap that
    2197             :  * at any given instant holds the next tuple from each stream.  If there
    2198             :  * are N streams, we need about N*log2(N) tuple comparisons to construct
    2199             :  * the heap at startup, and then for each output tuple, about log2(N)
    2200             :  * comparisons to replace the top entry.
    2201             :  *
    2202             :  * (The effective value of N will drop once some of the input streams are
    2203             :  * exhausted, but it seems unlikely to be worth trying to account for that.)
    2204             :  *
    2205             :  * The heap is never spilled to disk, since we assume N is not very large.
    2206             :  * So this is much simpler than cost_sort.
    2207             :  *
    2208             :  * As in cost_sort, we charge two operator evals per tuple comparison.
    2209             :  *
    2210             :  * 'pathkeys' is a list of sort keys
    2211             :  * 'n_streams' is the number of input streams
    2212             :  * 'input_startup_cost' is the sum of the input streams' startup costs
    2213             :  * 'input_total_cost' is the sum of the input streams' total costs
    2214             :  * 'tuples' is the number of tuples in all the streams
    2215             :  */
    2216             : void
    2217        2248 : cost_merge_append(Path *path, PlannerInfo *root,
    2218             :                   List *pathkeys, int n_streams,
    2219             :                   Cost input_startup_cost, Cost input_total_cost,
    2220             :                   double tuples)
    2221             : {
    2222        2248 :     Cost        startup_cost = 0;
    2223        2248 :     Cost        run_cost = 0;
    2224             :     Cost        comparison_cost;
    2225             :     double      N;
    2226             :     double      logN;
    2227             : 
    2228             :     /*
    2229             :      * Avoid log(0)...
    2230             :      */
    2231        2248 :     N = (n_streams < 2) ? 2.0 : (double) n_streams;
    2232        2248 :     logN = LOG2(N);
    2233             : 
    2234             :     /* Assumed cost per tuple comparison */
    2235        2248 :     comparison_cost = 2.0 * cpu_operator_cost;
    2236             : 
    2237             :     /* Heap creation cost */
    2238        2248 :     startup_cost += comparison_cost * N * logN;
    2239             : 
    2240             :     /* Per-tuple heap maintenance cost */
    2241        2248 :     run_cost += tuples * comparison_cost * logN;
    2242             : 
    2243             :     /*
    2244             :      * Although MergeAppend does not do any selection or projection, it's not
    2245             :      * free; add a small per-tuple overhead.
    2246             :      */
    2247        2248 :     run_cost += cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * tuples;
    2248             : 
    2249        2248 :     path->startup_cost = startup_cost + input_startup_cost;
    2250        2248 :     path->total_cost = startup_cost + run_cost + input_total_cost;
    2251        2248 : }
    2252             : 
    2253             : /*
    2254             :  * cost_material
    2255             :  *    Determines and returns the cost of materializing a relation, including
    2256             :  *    the cost of reading the input data.
    2257             :  *
    2258             :  * If the total volume of data to materialize exceeds work_mem, we will need
    2259             :  * to write it to disk, so the cost is much higher in that case.
    2260             :  *
    2261             :  * Note that here we are estimating the costs for the first scan of the
    2262             :  * relation, so the materialization is all overhead --- any savings will
    2263             :  * occur only on rescan, which is estimated in cost_rescan.
    2264             :  */
    2265             : void
    2266      244244 : cost_material(Path *path,
    2267             :               Cost input_startup_cost, Cost input_total_cost,
    2268             :               double tuples, int width)
    2269             : {
    2270      244244 :     Cost        startup_cost = input_startup_cost;
    2271      244244 :     Cost        run_cost = input_total_cost - input_startup_cost;
    2272      244244 :     double      nbytes = relation_byte_size(tuples, width);
    2273      244244 :     long        work_mem_bytes = work_mem * 1024L;
    2274             : 
    2275      244244 :     path->rows = tuples;
    2276             : 
    2277             :     /*
    2278             :      * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
    2279             :      * reflect bookkeeping overhead.  (This rate must be more than what
    2280             :      * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
    2281             :      * if it is exactly the same then there will be a cost tie between
    2282             :      * nestloop with A outer, materialized B inner and nestloop with B outer,
    2283             :      * materialized A inner.  The extra cost ensures we'll prefer
    2284             :      * materializing the smaller rel.)  Note that this is normally a good deal
    2285             :      * less than cpu_tuple_cost; which is OK because a Material plan node
    2286             :      * doesn't do qual-checking or projection, so it's got less overhead than
    2287             :      * most plan nodes.
    2288             :      */
    2289      244244 :     run_cost += 2 * cpu_operator_cost * tuples;
    2290             : 
    2291             :     /*
    2292             :      * If we will spill to disk, charge at the rate of seq_page_cost per page.
    2293             :      * This cost is assumed to be evenly spread through the plan run phase,
    2294             :      * which isn't exactly accurate but our cost model doesn't allow for
    2295             :      * nonuniform costs within the run phase.
    2296             :      */
    2297      244244 :     if (nbytes > work_mem_bytes)
    2298             :     {
    2299        1956 :         double      npages = ceil(nbytes / BLCKSZ);
    2300             : 
    2301        1956 :         run_cost += seq_page_cost * npages;
    2302             :     }
    2303             : 
    2304      244244 :     path->startup_cost = startup_cost;
    2305      244244 :     path->total_cost = startup_cost + run_cost;
    2306      244244 : }
    2307             : 
    2308             : /*
    2309             :  * cost_agg
    2310             :  *      Determines and returns the cost of performing an Agg plan node,
    2311             :  *      including the cost of its input.
    2312             :  *
    2313             :  * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
    2314             :  * we are using a hashed Agg node just to do grouping).
    2315             :  *
    2316             :  * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
    2317             :  * are for appropriately-sorted input.
    2318             :  */
    2319             : void
    2320       42106 : cost_agg(Path *path, PlannerInfo *root,
    2321             :          AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
    2322             :          int numGroupCols, double numGroups,
    2323             :          List *quals,
    2324             :          Cost input_startup_cost, Cost input_total_cost,
    2325             :          double input_tuples, double input_width)
    2326             : {
    2327             :     double      output_tuples;
    2328             :     Cost        startup_cost;
    2329             :     Cost        total_cost;
    2330             :     AggClauseCosts dummy_aggcosts;
    2331             : 
    2332             :     /* Use all-zero per-aggregate costs if NULL is passed */
    2333       42106 :     if (aggcosts == NULL)
    2334             :     {
    2335             :         Assert(aggstrategy == AGG_HASHED);
    2336       20640 :         MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
    2337        3440 :         aggcosts = &dummy_aggcosts;
    2338             :     }
    2339             : 
    2340             :     /*
    2341             :      * The transCost.per_tuple component of aggcosts should be charged once
    2342             :      * per input tuple, corresponding to the costs of evaluating the aggregate
    2343             :      * transfns and their input expressions. The finalCost.per_tuple component
    2344             :      * is charged once per output tuple, corresponding to the costs of
    2345             :      * evaluating the finalfns.  Startup costs are of course charged but once.
    2346             :      *
    2347             :      * If we are grouping, we charge an additional cpu_operator_cost per
    2348             :      * grouping column per input tuple for grouping comparisons.
    2349             :      *
    2350             :      * We will produce a single output tuple if not grouping, and a tuple per
    2351             :      * group otherwise.  We charge cpu_tuple_cost for each output tuple.
    2352             :      *
    2353             :      * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
    2354             :      * same total CPU cost, but AGG_SORTED has lower startup cost.  If the
    2355             :      * input path is already sorted appropriately, AGG_SORTED should be
    2356             :      * preferred (since it has no risk of memory overflow).  This will happen
    2357             :      * as long as the computed total costs are indeed exactly equal --- but if
    2358             :      * there's roundoff error we might do the wrong thing.  So be sure that
    2359             :      * the computations below form the same intermediate values in the same
    2360             :      * order.
    2361             :      */
    2362       42106 :     if (aggstrategy == AGG_PLAIN)
    2363             :     {
    2364       28028 :         startup_cost = input_total_cost;
    2365       28028 :         startup_cost += aggcosts->transCost.startup;
    2366       28028 :         startup_cost += aggcosts->transCost.per_tuple * input_tuples;
    2367       28028 :         startup_cost += aggcosts->finalCost.startup;
    2368       28028 :         startup_cost += aggcosts->finalCost.per_tuple;
    2369             :         /* we aren't grouping */
    2370       28028 :         total_cost = startup_cost + cpu_tuple_cost;
    2371       28028 :         output_tuples = 1;
    2372             :     }
    2373       14078 :     else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
    2374             :     {
    2375             :         /* Here we are able to deliver output on-the-fly */
    2376        6134 :         startup_cost = input_startup_cost;
    2377        6134 :         total_cost = input_total_cost;
    2378        6134 :         if (aggstrategy == AGG_MIXED && !enable_hashagg)
    2379             :         {
    2380         300 :             startup_cost += disable_cost;
    2381         300 :             total_cost += disable_cost;
    2382             :         }
    2383             :         /* calcs phrased this way to match HASHED case, see note above */
    2384        6134 :         total_cost += aggcosts->transCost.startup;
    2385        6134 :         total_cost += aggcosts->transCost.per_tuple * input_tuples;
    2386        6134 :         total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
    2387        6134 :         total_cost += aggcosts->finalCost.startup;
    2388        6134 :         total_cost += aggcosts->finalCost.per_tuple * numGroups;
    2389        6134 :         total_cost += cpu_tuple_cost * numGroups;
    2390        6134 :         output_tuples = numGroups;
    2391             :     }
    2392             :     else
    2393             :     {
    2394             :         /* must be AGG_HASHED */
    2395        7944 :         startup_cost = input_total_cost;
    2396        7944 :         if (!enable_hashagg)
    2397         902 :             startup_cost += disable_cost;
    2398        7944 :         startup_cost += aggcosts->transCost.startup;
    2399        7944 :         startup_cost += aggcosts->transCost.per_tuple * input_tuples;
    2400             :         /* cost of computing hash value */
    2401        7944 :         startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
    2402        7944 :         startup_cost += aggcosts->finalCost.startup;
    2403             : 
    2404        7944 :         total_cost = startup_cost;
    2405        7944 :         total_cost += aggcosts->finalCost.per_tuple * numGroups;
    2406             :         /* cost of retrieving from hash table */
    2407        7944 :         total_cost += cpu_tuple_cost * numGroups;
    2408        7944 :         output_tuples = numGroups;
    2409             :     }
    2410             : 
    2411             :     /*
    2412             :      * Add the disk costs of hash aggregation that spills to disk.
    2413             :      *
    2414             :      * Groups that go into the hash table stay in memory until finalized, so
    2415             :      * spilling and reprocessing tuples doesn't incur additional invocations
    2416             :      * of transCost or finalCost. Furthermore, the computed hash value is
    2417             :      * stored with the spilled tuples, so we don't incur extra invocations of
    2418             :      * the hash function.
    2419             :      *
    2420             :      * Hash Agg begins returning tuples after the first batch is complete.
    2421             :      * Accrue writes (spilled tuples) to startup_cost and to total_cost;
    2422             :      * accrue reads only to total_cost.
    2423             :      */
    2424       42106 :     if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
    2425             :     {
    2426             :         double      pages;
    2427        8452 :         double      pages_written = 0.0;
    2428        8452 :         double      pages_read = 0.0;
    2429             :         double      spill_cost;
    2430             :         double      hashentrysize;
    2431             :         double      nbatches;
    2432             :         Size        mem_limit;
    2433             :         uint64      ngroups_limit;
    2434             :         int         num_partitions;
    2435             :         int         depth;
    2436             : 
    2437             :         /*
    2438             :          * Estimate number of batches based on the computed limits. If less
    2439             :          * than or equal to one, all groups are expected to fit in memory;
    2440             :          * otherwise we expect to spill.
    2441             :          */
    2442        8452 :         hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
    2443             :                                             input_width,
    2444             :                                             aggcosts->transitionSpace);
    2445        8452 :         hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
    2446             :                             &ngroups_limit, &num_partitions);
    2447             : 
    2448        8452 :         nbatches = Max((numGroups * hashentrysize) / mem_limit,
    2449             :                        numGroups / ngroups_limit);
    2450             : 
    2451        8452 :         nbatches = Max(ceil(nbatches), 1.0);
    2452        8452 :         num_partitions = Max(num_partitions, 2);
    2453             : 
    2454             :         /*
    2455             :          * The number of partitions can change at different levels of
    2456             :          * recursion; but for the purposes of this calculation assume it stays
    2457             :          * constant.
    2458             :          */
    2459        8452 :         depth = ceil(log(nbatches) / log(num_partitions));
    2460             : 
    2461             :         /*
    2462             :          * Estimate number of pages read and written. For each level of
    2463             :          * recursion, a tuple must be written and then later read.
    2464             :          */
    2465        8452 :         pages = relation_byte_size(input_tuples, input_width) / BLCKSZ;
    2466        8452 :         pages_written = pages_read = pages * depth;
    2467             : 
    2468             :         /*
    2469             :          * HashAgg has somewhat worse IO behavior than Sort on typical
    2470             :          * hardware/OS combinations. Account for this with a generic penalty.
    2471             :          */
    2472        8452 :         pages_read *= 2.0;
    2473        8452 :         pages_written *= 2.0;
    2474             : 
    2475        8452 :         startup_cost += pages_written * random_page_cost;
    2476        8452 :         total_cost += pages_written * random_page_cost;
    2477        8452 :         total_cost += pages_read * seq_page_cost;
    2478             : 
    2479             :         /* account for CPU cost of spilling a tuple and reading it back */
    2480        8452 :         spill_cost = depth * input_tuples * 2.0 * cpu_tuple_cost;
    2481        8452 :         startup_cost += spill_cost;
    2482        8452 :         total_cost += spill_cost;
    2483             :     }
    2484             : 
    2485             :     /*
    2486             :      * If there are quals (HAVING quals), account for their cost and
    2487             :      * selectivity.
    2488             :      */
    2489       42106 :     if (quals)
    2490             :     {
    2491             :         QualCost    qual_cost;
    2492             : 
    2493        2390 :         cost_qual_eval(&qual_cost, quals, root);
    2494        2390 :         startup_cost += qual_cost.startup;
    2495        2390 :         total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
    2496             : 
    2497        2390 :         output_tuples = clamp_row_est(output_tuples *
    2498        2390 :                                       clauselist_selectivity(root,
    2499             :                                                              quals,
    2500             :                                                              0,
    2501             :                                                              JOIN_INNER,
    2502             :                                                              NULL));
    2503             :     }
    2504             : 
    2505       42106 :     path->rows = output_tuples;
    2506       42106 :     path->startup_cost = startup_cost;
    2507       42106 :     path->total_cost = total_cost;
    2508       42106 : }
    2509             : 
    2510             : /*
    2511             :  * cost_windowagg
    2512             :  *      Determines and returns the cost of performing a WindowAgg plan node,
    2513             :  *      including the cost of its input.
    2514             :  *
    2515             :  * Input is assumed already properly sorted.
    2516             :  */
    2517             : void
    2518        1268 : cost_windowagg(Path *path, PlannerInfo *root,
    2519             :                List *windowFuncs, int numPartCols, int numOrderCols,
    2520             :                Cost input_startup_cost, Cost input_total_cost,
    2521             :                double input_tuples)
    2522             : {
    2523             :     Cost        startup_cost;
    2524             :     Cost        total_cost;
    2525             :     ListCell   *lc;
    2526             : 
    2527        1268 :     startup_cost = input_startup_cost;
    2528        1268 :     total_cost = input_total_cost;
    2529             : 
    2530             :     /*
    2531             :      * Window functions are assumed to cost their stated execution cost, plus
    2532             :      * the cost of evaluating their input expressions, per tuple.  Since they
    2533             :      * may in fact evaluate their inputs at multiple rows during each cycle,
    2534             :      * this could be a drastic underestimate; but without a way to know how
    2535             :      * many rows the window function will fetch, it's hard to do better.  In
    2536             :      * any case, it's a good estimate for all the built-in window functions,
    2537             :      * so we'll just do this for now.
    2538             :      */
    2539        2816 :     foreach(lc, windowFuncs)
    2540             :     {
    2541        1548 :         WindowFunc *wfunc = lfirst_node(WindowFunc, lc);
    2542             :         Cost        wfunccost;
    2543             :         QualCost    argcosts;
    2544             : 
    2545        1548 :         argcosts.startup = argcosts.per_tuple = 0;
    2546        1548 :         add_function_cost(root, wfunc->winfnoid, (Node *) wfunc,
    2547             :                           &argcosts);
    2548        1548 :         startup_cost += argcosts.startup;
    2549        1548 :         wfunccost = argcosts.per_tuple;
    2550             : 
    2551             :         /* also add the input expressions' cost to per-input-row costs */
    2552        1548 :         cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
    2553        1548 :         startup_cost += argcosts.startup;
    2554        1548 :         wfunccost += argcosts.per_tuple;
    2555             : 
    2556             :         /*
    2557             :          * Add the filter's cost to per-input-row costs.  XXX We should reduce
    2558             :          * input expression costs according to filter selectivity.
    2559             :          */
    2560        1548 :         cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
    2561        1548 :         startup_cost += argcosts.startup;
    2562        1548 :         wfunccost += argcosts.per_tuple;
    2563             : 
    2564        1548 :         total_cost += wfunccost * input_tuples;
    2565             :     }
    2566             : 
    2567             :     /*
    2568             :      * We also charge cpu_operator_cost per grouping column per tuple for
    2569             :      * grouping comparisons, plus cpu_tuple_cost per tuple for general
    2570             :      * overhead.
    2571             :      *
    2572             :      * XXX this neglects costs of spooling the data to disk when it overflows
    2573             :      * work_mem.  Sooner or later that should get accounted for.
    2574             :      */
    2575        1268 :     total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
    2576        1268 :     total_cost += cpu_tuple_cost * input_tuples;
    2577             : 
    2578        1268 :     path->rows = input_tuples;
    2579        1268 :     path->startup_cost = startup_cost;
    2580        1268 :     path->total_cost = total_cost;
    2581        1268 : }
    2582             : 
    2583             : /*
    2584             :  * cost_group
    2585             :  *      Determines and returns the cost of performing a Group plan node,
    2586             :  *      including the cost of its input.
    2587             :  *
    2588             :  * Note: caller must ensure that input costs are for appropriately-sorted
    2589             :  * input.
    2590             :  */
    2591             : void
    2592        1092 : cost_group(Path *path, PlannerInfo *root,
    2593             :            int numGroupCols, double numGroups,
    2594             :            List *quals,
    2595             :            Cost input_startup_cost, Cost input_total_cost,
    2596             :            double input_tuples)
    2597             : {
    2598             :     double      output_tuples;
    2599             :     Cost        startup_cost;
    2600             :     Cost        total_cost;
    2601             : 
    2602        1092 :     output_tuples = numGroups;
    2603        1092 :     startup_cost = input_startup_cost;
    2604        1092 :     total_cost = input_total_cost;
    2605             : 
    2606             :     /*
    2607             :      * Charge one cpu_operator_cost per comparison per input tuple. We assume
    2608             :      * all columns get compared at most of the tuples.
    2609             :      */
    2610        1092 :     total_cost += cpu_operator_cost * input_tuples * numGroupCols;
    2611             : 
    2612             :     /*
    2613             :      * If there are quals (HAVING quals), account for their cost and
    2614             :      * selectivity.
    2615             :      */
    2616        1092 :     if (quals)
    2617             :     {
    2618             :         QualCost    qual_cost;
    2619             : 
    2620           0 :         cost_qual_eval(&qual_cost, quals, root);
    2621           0 :         startup_cost += qual_cost.startup;
    2622           0 :         total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
    2623             : 
    2624           0 :         output_tuples = clamp_row_est(output_tuples *
    2625           0 :                                       clauselist_selectivity(root,
    2626             :                                                              quals,
    2627             :                                                              0,
    2628             :                                                              JOIN_INNER,
    2629             :                                                              NULL));
    2630             :     }
    2631             : 
    2632        1092 :     path->rows = output_tuples;
    2633        1092 :     path->startup_cost = startup_cost;
    2634        1092 :     path->total_cost = total_cost;
    2635        1092 : }
    2636             : 
    2637             : /*
    2638             :  * initial_cost_nestloop
    2639             :  *    Preliminary estimate of the cost of a nestloop join path.
    2640             :  *
    2641             :  * This must quickly produce lower-bound estimates of the path's startup and
    2642             :  * total costs.  If we are unable to eliminate the proposed path from
    2643             :  * consideration using the lower bounds, final_cost_nestloop will be called
    2644             :  * to obtain the final estimates.
    2645             :  *
    2646             :  * The exact division of labor between this function and final_cost_nestloop
    2647             :  * is private to them, and represents a tradeoff between speed of the initial
    2648             :  * estimate and getting a tight lower bound.  We choose to not examine the
    2649             :  * join quals here, since that's by far the most expensive part of the
    2650             :  * calculations.  The end result is that CPU-cost considerations must be
    2651             :  * left for the second phase; and for SEMI/ANTI joins, we must also postpone
    2652             :  * incorporation of the inner path's run cost.
    2653             :  *
    2654             :  * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
    2655             :  *      other data to be used by final_cost_nestloop
    2656             :  * 'jointype' is the type of join to be performed
    2657             :  * 'outer_path' is the outer input to the join
    2658             :  * 'inner_path' is the inner input to the join
    2659             :  * 'extra' contains miscellaneous information about the join
    2660             :  */
    2661             : void
    2662     1167254 : initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
    2663             :                       JoinType jointype,
    2664             :                       Path *outer_path, Path *inner_path,
    2665             :                       JoinPathExtraData *extra)
    2666             : {
    2667     1167254 :     Cost        startup_cost = 0;
    2668     1167254 :     Cost        run_cost = 0;
    2669     1167254 :     double      outer_path_rows = outer_path->rows;
    2670             :     Cost        inner_rescan_start_cost;
    2671             :     Cost        inner_rescan_total_cost;
    2672             :     Cost        inner_run_cost;
    2673             :     Cost        inner_rescan_run_cost;
    2674             : 
    2675             :     /* estimate costs to rescan the inner relation */
    2676     1167254 :     cost_rescan(root, inner_path,
    2677             :                 &inner_rescan_start_cost,
    2678             :                 &inner_rescan_total_cost);
    2679             : 
    2680             :     /* cost of source data */
    2681             : 
    2682             :     /*
    2683             :      * NOTE: clearly, we must pay both outer and inner paths' startup_cost
    2684             :      * before we can start returning tuples, so the join's startup cost is
    2685             :      * their sum.  We'll also pay the inner path's rescan startup cost
    2686             :      * multiple times.
    2687             :      */
    2688     1167254 :     startup_cost += outer_path->startup_cost + inner_path->startup_cost;
    2689     1167254 :     run_cost += outer_path->total_cost - outer_path->startup_cost;
    2690     1167254 :     if (outer_path_rows > 1)
    2691      621894 :         run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
    2692             : 
    2693     1167254 :     inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
    2694     1167254 :     inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
    2695             : 
    2696     1167254 :     if (jointype == JOIN_SEMI || jointype == JOIN_ANTI ||
    2697     1127242 :         extra->inner_unique)
    2698             :     {
    2699             :         /*
    2700             :          * With a SEMI or ANTI join, or if the innerrel is known unique, the
    2701             :          * executor will stop after the first match.
    2702             :          *
    2703             :          * Getting decent estimates requires inspection of the join quals,
    2704             :          * which we choose to postpone to final_cost_nestloop.
    2705             :          */
    2706             : 
    2707             :         /* Save private data for final_cost_nestloop */
    2708      537190 :         workspace->inner_run_cost = inner_run_cost;
    2709      537190 :         workspace->inner_rescan_run_cost = inner_rescan_run_cost;
    2710             :     }
    2711             :     else
    2712             :     {
    2713             :         /* Normal case; we'll scan whole input rel for each outer row */
    2714      630064 :         run_cost += inner_run_cost;
    2715      630064 :         if (outer_path_rows > 1)
    2716      354346 :             run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
    2717             :     }
    2718             : 
    2719             :     /* CPU costs left for later */
    2720             : 
    2721             :     /* Public result fields */
    2722     1167254 :     workspace->startup_cost = startup_cost;
    2723     1167254 :     workspace->total_cost = startup_cost + run_cost;
    2724             :     /* Save private data for final_cost_nestloop */
    2725     1167254 :     workspace->run_cost = run_cost;
    2726     1167254 : }
    2727             : 
    2728             : /*
    2729             :  * final_cost_nestloop
    2730             :  *    Final estimate of the cost and result size of a nestloop join path.
    2731             :  *
    2732             :  * 'path' is already filled in except for the rows and cost fields
    2733             :  * 'workspace' is the result from initial_cost_nestloop
    2734             :  * 'extra' contains miscellaneous information about the join
    2735             :  */
    2736             : void
    2737      634786 : final_cost_nestloop(PlannerInfo *root, NestPath *path,
    2738             :                     JoinCostWorkspace *workspace,
    2739             :                     JoinPathExtraData *extra)
    2740             : {
    2741      634786 :     Path       *outer_path = path->outerjoinpath;
    2742      634786 :     Path       *inner_path = path->innerjoinpath;
    2743      634786 :     double      outer_path_rows = outer_path->rows;
    2744      634786 :     double      inner_path_rows = inner_path->rows;
    2745      634786 :     Cost        startup_cost = workspace->startup_cost;
    2746      634786 :     Cost        run_cost = workspace->run_cost;
    2747             :     Cost        cpu_per_tuple;
    2748             :     QualCost    restrict_qual_cost;
    2749             :     double      ntuples;
    2750             : 
    2751             :     /* Protect some assumptions below that rowcounts aren't zero */
    2752      634786 :     if (outer_path_rows <= 0)
    2753           0 :         outer_path_rows = 1;
    2754      634786 :     if (inner_path_rows <= 0)
    2755          88 :         inner_path_rows = 1;
    2756             :     /* Mark the path with the correct row estimate */
    2757      634786 :     if (path->path.param_info)
    2758        4934 :         path->path.rows = path->path.param_info->ppi_rows;
    2759             :     else
    2760      629852 :         path->path.rows = path->path.parent->rows;
    2761             : 
    2762             :     /* For partial paths, scale row estimate. */
    2763      634786 :     if (path->path.parallel_workers > 0)
    2764             :     {
    2765        3820 :         double      parallel_divisor = get_parallel_divisor(&path->path);
    2766             : 
    2767        3820 :         path->path.rows =
    2768        3820 :             clamp_row_est(path->path.rows / parallel_divisor);
    2769             :     }
    2770             : 
    2771             :     /*
    2772             :      * We could include disable_cost in the preliminary estimate, but that
    2773             :      * would amount to optimizing for the case where the join method is
    2774             :      * disabled, which doesn't seem like the way to bet.
    2775             :      */
    2776      634786 :     if (!enable_nestloop)
    2777        1812 :         startup_cost += disable_cost;
    2778             : 
    2779             :     /* cost of inner-relation source data (we already dealt with outer rel) */
    2780             : 
    2781      634786 :     if (path->jointype == JOIN_SEMI || path->jointype == JOIN_ANTI ||
    2782      609086 :         extra->inner_unique)
    2783      393504 :     {
    2784             :         /*
    2785             :          * With a SEMI or ANTI join, or if the innerrel is known unique, the
    2786             :          * executor will stop after the first match.
    2787             :          */
    2788      393504 :         Cost        inner_run_cost = workspace->inner_run_cost;
    2789      393504 :         Cost        inner_rescan_run_cost = workspace->inner_rescan_run_cost;
    2790             :         double      outer_matched_rows;
    2791             :         double      outer_unmatched_rows;
    2792             :         Selectivity inner_scan_frac;
    2793             : 
    2794             :         /*
    2795             :          * For an outer-rel row that has at least one match, we can expect the
    2796             :          * inner scan to stop after a fraction 1/(match_count+1) of the inner
    2797             :          * rows, if the matches are evenly distributed.  Since they probably
    2798             :          * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
    2799             :          * that fraction.  (If we used a larger fuzz factor, we'd have to
    2800             :          * clamp inner_scan_frac to at most 1.0; but since match_count is at
    2801             :          * least 1, no such clamp is needed now.)
    2802             :          */
    2803      393504 :         outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
    2804      393504 :         outer_unmatched_rows = outer_path_rows - outer_matched_rows;
    2805      393504 :         inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
    2806             : 
    2807             :         /*
    2808             :          * Compute number of tuples processed (not number emitted!).  First,
    2809             :          * account for successfully-matched outer rows.
    2810             :          */
    2811      393504 :         ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
    2812             : 
    2813             :         /*
    2814             :          * Now we need to estimate the actual costs of scanning the inner
    2815             :          * relation, which may be quite a bit less than N times inner_run_cost
    2816             :          * due to early scan stops.  We consider two cases.  If the inner path
    2817             :          * is an indexscan using all the joinquals as indexquals, then an
    2818             :          * unmatched outer row results in an indexscan returning no rows,
    2819             :          * which is probably quite cheap.  Otherwise, the executor will have
    2820             :          * to scan the whole inner rel for an unmatched row; not so cheap.
    2821             :          */
    2822      393504 :         if (has_indexed_join_quals(path))
    2823             :         {
    2824             :             /*
    2825             :              * Successfully-matched outer rows will only require scanning
    2826             :              * inner_scan_frac of the inner relation.  In this case, we don't
    2827             :              * need to charge the full inner_run_cost even when that's more
    2828             :              * than inner_rescan_run_cost, because we can assume that none of
    2829             :              * the inner scans ever scan the whole inner relation.  So it's
    2830             :              * okay to assume that all the inner scan executions can be
    2831             :              * fractions of the full cost, even if materialization is reducing
    2832             :              * the rescan cost.  At this writing, it's impossible to get here
    2833             :              * for a materialized inner scan, so inner_run_cost and
    2834             :              * inner_rescan_run_cost will be the same anyway; but just in
    2835             :              * case, use inner_run_cost for the first matched tuple and
    2836             :              * inner_rescan_run_cost for additional ones.
    2837             :              */
    2838       83168 :             run_cost += inner_run_cost * inner_scan_frac;
    2839       83168 :             if (outer_matched_rows > 1)
    2840        6046 :                 run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
    2841             : 
    2842             :             /*
    2843             :              * Add the cost of inner-scan executions for unmatched outer rows.
    2844             :              * We estimate this as the same cost as returning the first tuple
    2845             :              * of a nonempty scan.  We consider that these are all rescans,
    2846             :              * since we used inner_run_cost once already.
    2847             :              */
    2848      166336 :             run_cost += outer_unmatched_rows *
    2849       83168 :                 inner_rescan_run_cost / inner_path_rows;
    2850             : 
    2851             :             /*
    2852             :              * We won't be evaluating any quals at all for unmatched rows, so
    2853             :              * don't add them to ntuples.
    2854             :              */
    2855             :         }
    2856             :         else
    2857             :         {
    2858             :             /*
    2859             :              * Here, a complicating factor is that rescans may be cheaper than
    2860             :              * first scans.  If we never scan all the way to the end of the
    2861             :              * inner rel, it might be (depending on the plan type) that we'd
    2862             :              * never pay the whole inner first-scan run cost.  However it is
    2863             :              * difficult to estimate whether that will happen (and it could
    2864             :              * not happen if there are any unmatched outer rows!), so be
    2865             :              * conservative and always charge the whole first-scan cost once.
    2866             :              * We consider this charge to correspond to the first unmatched
    2867             :              * outer row, unless there isn't one in our estimate, in which
    2868             :              * case blame it on the first matched row.
    2869             :              */
    2870             : 
    2871             :             /* First, count all unmatched join tuples as being processed */
    2872      310336 :             ntuples += outer_unmatched_rows * inner_path_rows;
    2873             : 
    2874             :             /* Now add the forced full scan, and decrement appropriate count */
    2875      310336 :             run_cost += inner_run_cost;
    2876      310336 :             if (outer_unmatched_rows >= 1)
    2877      238920 :                 outer_unmatched_rows -= 1;
    2878             :             else
    2879       71416 :                 outer_matched_rows -= 1;
    2880             : 
    2881             :             /* Add inner run cost for additional outer tuples having matches */
    2882      310336 :             if (outer_matched_rows > 0)
    2883       76690 :                 run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
    2884             : 
    2885             :             /* Add inner run cost for additional unmatched outer tuples */
    2886      310336 :             if (outer_unmatched_rows > 0)
    2887      119724 :                 run_cost += outer_unmatched_rows * inner_rescan_run_cost;
    2888             :         }
    2889             :     }
    2890             :     else
    2891             :     {
    2892             :         /* Normal-case source costs were included in preliminary estimate */
    2893             : 
    2894             :         /* Compute number of tuples processed (not number emitted!) */
    2895      241282 :         ntuples = outer_path_rows * inner_path_rows;
    2896             :     }
    2897             : 
    2898             :     /* CPU costs */
    2899      634786 :     cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
    2900      634786 :     startup_cost += restrict_qual_cost.startup;
    2901      634786 :     cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
    2902      634786 :     run_cost += cpu_per_tuple * ntuples;
    2903             : 
    2904             :     /* tlist eval costs are paid per output row, not per tuple scanned */
    2905      634786 :     startup_cost += path->path.pathtarget->cost.startup;
    2906      634786 :     run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
    2907             : 
    2908      634786 :     path->path.startup_cost = startup_cost;
    2909      634786 :     path->path.total_cost = startup_cost + run_cost;
    2910      634786 : }
    2911             : 
    2912             : /*
    2913             :  * initial_cost_mergejoin
    2914             :  *    Preliminary estimate of the cost of a mergejoin path.
    2915             :  *
    2916             :  * This must quickly produce lower-bound estimates of the path's startup and
    2917             :  * total costs.  If we are unable to eliminate the proposed path from
    2918             :  * consideration using the lower bounds, final_cost_mergejoin will be called
    2919             :  * to obtain the final estimates.
    2920             :  *
    2921             :  * The exact division of labor between this function and final_cost_mergejoin
    2922             :  * is private to them, and represents a tradeoff between speed of the initial
    2923             :  * estimate and getting a tight lower bound.  We choose to not examine the
    2924             :  * join quals here, except for obtaining the scan selectivity estimate which
    2925             :  * is really essential (but fortunately, use of caching keeps the cost of
    2926             :  * getting that down to something reasonable).
    2927             :  * We also assume that cost_sort is cheap enough to use here.
    2928             :  *
    2929             :  * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
    2930             :  *      other data to be used by final_cost_mergejoin
    2931             :  * 'jointype' is the type of join to be performed
    2932             :  * 'mergeclauses' is the list of joinclauses to be used as merge clauses
    2933             :  * 'outer_path' is the outer input to the join
    2934             :  * 'inner_path' is the inner input to the join
    2935             :  * 'outersortkeys' is the list of sort keys for the outer path
    2936             :  * 'innersortkeys' is the list of sort keys for the inner path
    2937             :  * 'extra' contains miscellaneous information about the join
    2938             :  *
    2939             :  * Note: outersortkeys and innersortkeys should be NIL if no explicit
    2940             :  * sort is needed because the respective source path is already ordered.
    2941             :  */
    2942             : void
    2943      564956 : initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
    2944             :                        JoinType jointype,
    2945             :                        List *mergeclauses,
    2946             :                        Path *outer_path, Path *inner_path,
    2947             :                        List *outersortkeys, List *innersortkeys,
    2948             :                        JoinPathExtraData *extra)
    2949             : {
    2950      564956 :     Cost        startup_cost = 0;
    2951      564956 :     Cost        run_cost = 0;
    2952      564956 :     double      outer_path_rows = outer_path->rows;
    2953      564956 :     double      inner_path_rows = inner_path->rows;
    2954             :     Cost        inner_run_cost;
    2955             :     double      outer_rows,
    2956             :                 inner_rows,
    2957             :                 outer_skip_rows,
    2958             :                 inner_skip_rows;
    2959             :     Selectivity outerstartsel,
    2960             :                 outerendsel,
    2961             :                 innerstartsel,
    2962             :                 innerendsel;
    2963             :     Path        sort_path;      /* dummy for result of cost_sort */
    2964             : 
    2965             :     /* Protect some assumptions below that rowcounts aren't zero */
    2966      564956 :     if (outer_path_rows <= 0)
    2967          32 :         outer_path_rows = 1;
    2968      564956 :     if (inner_path_rows <= 0)
    2969          76 :         inner_path_rows = 1;
    2970             : 
    2971             :     /*
    2972             :      * A merge join will stop as soon as it exhausts either input stream
    2973             :      * (unless it's an outer join, in which case the outer side has to be
    2974             :      * scanned all the way anyway).  Estimate fraction of the left and right
    2975             :      * inputs that will actually need to be scanned.  Likewise, we can
    2976             :      * estimate the number of rows that will be skipped before the first join
    2977             :      * pair is found, which should be factored into startup cost. We use only
    2978             :      * the first (most significant) merge clause for this purpose. Since
    2979             :      * mergejoinscansel() is a fairly expensive computation, we cache the
    2980             :      * results in the merge clause RestrictInfo.
    2981             :      */
    2982      564956 :     if (mergeclauses && jointype != JOIN_FULL)
    2983      560950 :     {
    2984      560950 :         RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
    2985             :         List       *opathkeys;
    2986             :         List       *ipathkeys;
    2987             :         PathKey    *opathkey;
    2988             :         PathKey    *ipathkey;
    2989             :         MergeScanSelCache *cache;
    2990             : 
    2991             :         /* Get the input pathkeys to determine the sort-order details */
    2992      560950 :         opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
    2993      560950 :         ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
    2994             :         Assert(opathkeys);
    2995             :         Assert(ipathkeys);
    2996      560950 :         opathkey = (PathKey *) linitial(opathkeys);
    2997      560950 :         ipathkey = (PathKey *) linitial(ipathkeys);
    2998             :         /* debugging check */
    2999      560950 :         if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
    3000      560950 :             opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
    3001      560950 :             opathkey->pk_strategy != ipathkey->pk_strategy ||
    3002      560950 :             opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
    3003           0 :             elog(ERROR, "left and right pathkeys do not match in mergejoin");
    3004             : 
    3005             :         /* Get the selectivity with caching */
    3006      560950 :         cache = cached_scansel(root, firstclause, opathkey);
    3007             : 
    3008      560950 :         if (bms_is_subset(firstclause->left_relids,
    3009      560950 :                           outer_path->parent->relids))
    3010             :         {
    3011             :             /* left side of clause is outer */
    3012      279260 :             outerstartsel = cache->leftstartsel;
    3013      279260 :             outerendsel = cache->leftendsel;
    3014      279260 :             innerstartsel = cache->rightstartsel;
    3015      279260 :             innerendsel = cache->rightendsel;
    3016             :         }
    3017             :         else
    3018             :         {
    3019             :             /* left side of clause is inner */
    3020      281690 :             outerstartsel = cache->rightstartsel;
    3021      281690 :             outerendsel = cache->rightendsel;
    3022      281690 :             innerstartsel = cache->leftstartsel;
    3023      281690 :             innerendsel = cache->leftendsel;
    3024             :         }
    3025      560950 :         if (jointype == JOIN_LEFT ||
    3026             :             jointype == JOIN_ANTI)
    3027             :         {
    3028      127176 :             outerstartsel = 0.0;
    3029      127176 :             outerendsel = 1.0;
    3030             :         }
    3031      433774 :         else if (jointype == JOIN_RIGHT)
    3032             :         {
    3033       93540 :             innerstartsel = 0.0;
    3034       93540 :             innerendsel = 1.0;
    3035             :         }
    3036             :     }
    3037             :     else
    3038             :     {
    3039             :         /* cope with clauseless or full mergejoin */
    3040        4006 :         outerstartsel = innerstartsel = 0.0;
    3041        4006 :         outerendsel = innerendsel = 1.0;
    3042             :     }
    3043             : 
    3044             :     /*
    3045             :      * Convert selectivities to row counts.  We force outer_rows and
    3046             :      * inner_rows to be at least 1, but the skip_rows estimates can be zero.
    3047             :      */
    3048      564956 :     outer_skip_rows = rint(outer_path_rows * outerstartsel);
    3049      564956 :     inner_skip_rows = rint(inner_path_rows * innerstartsel);
    3050      564956 :     outer_rows = clamp_row_est(outer_path_rows * outerendsel);
    3051      564956 :     inner_rows = clamp_row_est(inner_path_rows * innerendsel);
    3052             : 
    3053             :     Assert(outer_skip_rows <= outer_rows);
    3054             :     Assert(inner_skip_rows <= inner_rows);
    3055             : 
    3056             :     /*
    3057             :      * Readjust scan selectivities to account for above rounding.  This is
    3058             :      * normally an insignificant effect, but when there are only a few rows in
    3059             :      * the inputs, failing to do this makes for a large percentage error.
    3060             :      */
    3061      564956 :     outerstartsel = outer_skip_rows / outer_path_rows;
    3062      564956 :     innerstartsel = inner_skip_rows / inner_path_rows;
    3063      564956 :     outerendsel = outer_rows / outer_path_rows;
    3064      564956 :     innerendsel = inner_rows / inner_path_rows;
    3065             : 
    3066             :     Assert(outerstartsel <= outerendsel);
    3067             :     Assert(innerstartsel <= innerendsel);
    3068             : 
    3069             :     /* cost of source data */
    3070             : 
    3071      564956 :     if (outersortkeys)          /* do we need to sort outer? */
    3072             :     {
    3073      507440 :         cost_sort(&sort_path,
    3074             :                   root,
    3075             :                   outersortkeys,
    3076             :                   outer_path->total_cost,
    3077             :                   outer_path_rows,
    3078      253720 :                   outer_path->pathtarget->width,
    3079             :                   0.0,
    3080             :                   work_mem,
    3081             :                   -1.0);
    3082      253720 :         startup_cost += sort_path.startup_cost;
    3083      507440 :         startup_cost += (sort_path.total_cost - sort_path.startup_cost)
    3084      253720 :             * outerstartsel;
    3085      507440 :         run_cost += (sort_path.total_cost - sort_path.startup_cost)
    3086      253720 :             * (outerendsel - outerstartsel);
    3087             :     }
    3088             :     else
    3089             :     {
    3090      311236 :         startup_cost += outer_path->startup_cost;
    3091      622472 :         startup_cost += (outer_path->total_cost - outer_path->startup_cost)
    3092      311236 :             * outerstartsel;
    3093      622472 :         run_cost += (outer_path->total_cost - outer_path->startup_cost)
    3094      311236 :             * (outerendsel - outerstartsel);
    3095             :     }
    3096             : 
    3097      564956 :     if (innersortkeys)          /* do we need to sort inner? */
    3098             :     {
    3099      868904 :         cost_sort(&sort_path,
    3100             :                   root,
    3101             :                   innersortkeys,
    3102             :                   inner_path->total_cost,
    3103             :                   inner_path_rows,
    3104      434452 :                   inner_path->pathtarget->width,
    3105             :                   0.0,
    3106             :                   work_mem,
    3107             :                   -1.0);
    3108      434452 :         startup_cost += sort_path.startup_cost;
    3109      868904 :         startup_cost += (sort_path.total_cost - sort_path.startup_cost)
    3110      434452 :             * innerstartsel;
    3111      868904 :         inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
    3112      434452 :             * (innerendsel - innerstartsel);
    3113             :     }
    3114             :     else
    3115             :     {
    3116      130504 :         startup_cost += inner_path->startup_cost;
    3117      261008 :         startup_cost += (inner_path->total_cost - inner_path->startup_cost)
    3118      130504 :             * innerstartsel;
    3119      261008 :         inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
    3120      130504 :             * (innerendsel - innerstartsel);
    3121             :     }
    3122             : 
    3123             :     /*
    3124             :      * We can't yet determine whether rescanning occurs, or whether
    3125             :      * materialization of the inner input should be done.  The minimum
    3126             :      * possible inner input cost, regardless of rescan and materialization
    3127             :      * considerations, is inner_run_cost.  We include that in
    3128             :      * workspace->total_cost, but not yet in run_cost.
    3129             :      */
    3130             : 
    3131             :     /* CPU costs left for later */
    3132             : 
    3133             :     /* Public result fields */
    3134      564956 :     workspace->startup_cost = startup_cost;
    3135      564956 :     workspace->total_cost = startup_cost + run_cost + inner_run_cost;
    3136             :     /* Save private data for final_cost_mergejoin */
    3137      564956 :     workspace->run_cost = run_cost;
    3138      564956 :     workspace->inner_run_cost = inner_run_cost;
    3139      564956 :     workspace->outer_rows = outer_rows;
    3140      564956 :     workspace->inner_rows = inner_rows;
    3141      564956 :     workspace->outer_skip_rows = outer_skip_rows;
    3142      564956 :     workspace->inner_skip_rows = inner_skip_rows;
    3143      564956 : }
    3144             : 
    3145             : /*
    3146             :  * final_cost_mergejoin
    3147             :  *    Final estimate of the cost and result size of a mergejoin path.
    3148             :  *
    3149             :  * Unlike other costsize functions, this routine makes two actual decisions:
    3150             :  * whether the executor will need to do mark/restore, and whether we should
    3151             :  * materialize the inner path.  It would be logically cleaner to build
    3152             :  * separate paths testing these alternatives, but that would require repeating
    3153             :  * most of the cost calculations, which are not all that cheap.  Since the
    3154             :  * choice will not affect output pathkeys or startup cost, only total cost,
    3155             :  * there is no possibility of wanting to keep more than one path.  So it seems
    3156             :  * best to make the decisions here and record them in the path's
    3157             :  * skip_mark_restore and materialize_inner fields.
    3158             :  *
    3159             :  * Mark/restore overhead is usually required, but can be skipped if we know
    3160             :  * that the executor need find only one match per outer tuple, and that the
    3161             :  * mergeclauses are sufficient to identify a match.
    3162             :  *
    3163             :  * We materialize the inner path if we need mark/restore and either the inner
    3164             :  * path can't support mark/restore, or it's cheaper to use an interposed
    3165             :  * Material node to handle mark/restore.
    3166             :  *
    3167             :  * 'path' is already filled in except for the rows and cost fields and
    3168             :  *      skip_mark_restore and materialize_inner
    3169             :  * 'workspace' is the result from initial_cost_mergejoin
    3170             :  * 'extra' contains miscellaneous information about the join
    3171             :  */
    3172             : void
    3173      145514 : final_cost_mergejoin(PlannerInfo *root, MergePath *path,
    3174             :                      JoinCostWorkspace *workspace,
    3175             :                      JoinPathExtraData *extra)
    3176             : {
    3177      145514 :     Path       *outer_path = path->jpath.outerjoinpath;
    3178      145514 :     Path       *inner_path = path->jpath.innerjoinpath;
    3179      145514 :     double      inner_path_rows = inner_path->rows;
    3180      145514 :     List       *mergeclauses = path->path_mergeclauses;
    3181      145514 :     List       *innersortkeys = path->innersortkeys;
    3182      145514 :     Cost        startup_cost = workspace->startup_cost;
    3183      145514 :     Cost        run_cost = workspace->run_cost;
    3184      145514 :     Cost        inner_run_cost = workspace->inner_run_cost;
    3185      145514 :     double      outer_rows = workspace->outer_rows;
    3186      145514 :     double      inner_rows = workspace->inner_rows;
    3187      145514 :     double      outer_skip_rows = workspace->outer_skip_rows;
    3188      145514 :     double      inner_skip_rows = workspace->inner_skip_rows;
    3189             :     Cost        cpu_per_tuple,
    3190             :                 bare_inner_cost,
    3191             :                 mat_inner_cost;
    3192             :     QualCost    merge_qual_cost;
    3193             :     QualCost    qp_qual_cost;
    3194             :     double      mergejointuples,
    3195             :                 rescannedtuples;
    3196             :     double      rescanratio;
    3197             : 
    3198             :     /* Protect some assumptions below that rowcounts aren't zero */
    3199      145514 :     if (inner_path_rows <= 0)
    3200          52 :         inner_path_rows = 1;
    3201             : 
    3202             :     /* Mark the path with the correct row estimate */
    3203      145514 :     if (path->jpath.path.param_info)
    3204         444 :         path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
    3205             :     else
    3206      145070 :         path->jpath.path.rows = path->jpath.path.parent->rows;
    3207             : 
    3208             :     /* For partial paths, scale row estimate. */
    3209      145514 :     if (path->jpath.path.parallel_workers > 0)
    3210             :     {
    3211        5876 :         double      parallel_divisor = get_parallel_divisor(&path->jpath.path);
    3212             : 
    3213        5876 :         path->jpath.path.rows =
    3214        5876 :             clamp_row_est(path->jpath.path.rows / parallel_divisor);
    3215             :     }
    3216             : 
    3217             :     /*
    3218             :      * We could include disable_cost in the preliminary estimate, but that
    3219             :      * would amount to optimizing for the case where the join method is
    3220             :      * disabled, which doesn't seem like the way to bet.
    3221             :      */
    3222      145514 :     if (!enable_mergejoin)
    3223           0 :         startup_cost += disable_cost;
    3224             : 
    3225             :     /*
    3226             :      * Compute cost of the mergequals and qpquals (other restriction clauses)
    3227             :      * separately.
    3228             :      */
    3229      145514 :     cost_qual_eval(&merge_qual_cost, mergeclauses, root);
    3230      145514 :     cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
    3231      145514 :     qp_qual_cost.startup -= merge_qual_cost.startup;
    3232      145514 :     qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
    3233             : 
    3234             :     /*
    3235             :      * With a SEMI or ANTI join, or if the innerrel is known unique, the
    3236             :      * executor will stop scanning for matches after the first match.  When
    3237             :      * all the joinclauses are merge clauses, this means we don't ever need to
    3238             :      * back up the merge, and so we can skip mark/restore overhead.
    3239             :      */
    3240      145514 :     if ((path->jpath.jointype == JOIN_SEMI ||
    3241      143918 :          path->jpath.jointype == JOIN_ANTI ||
    3242      205030 :          extra->inner_unique) &&
    3243       66362 :         (list_length(path->jpath.joinrestrictinfo) ==
    3244       66362 :          list_length(path->path_mergeclauses)))
    3245       61402 :         path->skip_mark_restore = true;
    3246             :     else
    3247       84112 :         path->skip_mark_restore = false;
    3248             : 
    3249             :     /*
    3250             :      * Get approx # tuples passing the mergequals.  We use approx_tuple_count
    3251             :      * here because we need an estimate done with JOIN_INNER semantics.
    3252             :      */
    3253      145514 :     mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
    3254             : 
    3255             :     /*
    3256             :      * When there are equal merge keys in the outer relation, the mergejoin
    3257             :      * must rescan any matching tuples in the inner relation. This means
    3258             :      * re-fetching inner tuples; we have to estimate how often that happens.
    3259             :      *
    3260             :      * For regular inner and outer joins, the number of re-fetches can be
    3261             :      * estimated approximately as size of merge join output minus size of
    3262             :      * inner relation. Assume that the distinct key values are 1, 2, ..., and
    3263             :      * denote the number of values of each key in the outer relation as m1,
    3264             :      * m2, ...; in the inner relation, n1, n2, ...  Then we have
    3265             :      *
    3266             :      * size of join = m1 * n1 + m2 * n2 + ...
    3267             :      *
    3268             :      * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
    3269             :      * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
    3270             :      * relation
    3271             :      *
    3272             :      * This equation works correctly for outer tuples having no inner match
    3273             :      * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
    3274             :      * are effectively subtracting those from the number of rescanned tuples,
    3275             :      * when we should not.  Can we do better without expensive selectivity
    3276             :      * computations?
    3277             :      *
    3278             :      * The whole issue is moot if we are working from a unique-ified outer
    3279             :      * input, or if we know we don't need to mark/restore at all.
    3280             :      */
    3281      145514 :     if (IsA(outer_path, UniquePath) || path->skip_mark_restore)
    3282       61720 :         rescannedtuples = 0;
    3283             :     else
    3284             :     {
    3285       83794 :         rescannedtuples = mergejointuples - inner_path_rows;
    3286             :         /* Must clamp because of possible underestimate */
    3287       83794 :         if (rescannedtuples < 0)
    3288       38800 :             rescannedtuples = 0;
    3289             :     }
    3290             : 
    3291             :     /*
    3292             :      * We'll inflate various costs this much to account for rescanning.  Note
    3293             :      * that this is to be multiplied by something involving inner_rows, or
    3294             :      * another number related to the portion of the inner rel we'll scan.
    3295             :      */
    3296      145514 :     rescanratio = 1.0 + (rescannedtuples / inner_rows);
    3297             : 
    3298             :     /*
    3299             :      * Decide whether we want to materialize the inner input to shield it from
    3300             :      * mark/restore and performing re-fetches.  Our cost model for regular
    3301             :      * re-fetches is that a re-fetch costs the same as an original fetch,
    3302             :      * which is probably an overestimate; but on the other hand we ignore the
    3303             :      * bookkeeping costs of mark/restore.  Not clear if it's worth developing
    3304             :      * a more refined model.  So we just need to inflate the inner run cost by
    3305             :      * rescanratio.
    3306             :      */
    3307      145514 :     bare_inner_cost = inner_run_cost * rescanratio;
    3308             : 
    3309             :     /*
    3310             :      * When we interpose a Material node the re-fetch cost is assumed to be
    3311             :      * just cpu_operator_cost per tuple, independently of the underlying
    3312             :      * plan's cost; and we charge an extra cpu_operator_cost per original
    3313             :      * fetch as well.  Note that we're assuming the materialize node will
    3314             :      * never spill to disk, since it only has to remember tuples back to the
    3315             :      * last mark.  (If there are a huge number of duplicates, our other cost
    3316             :      * factors will make the path so expensive that it probably won't get
    3317             :      * chosen anyway.)  So we don't use cost_rescan here.
    3318             :      *
    3319             :      * Note: keep this estimate in sync with create_mergejoin_plan's labeling
    3320             :      * of the generated Material node.
    3321             :      */
    3322      145514 :     mat_inner_cost = inner_run_cost +
    3323      145514 :         cpu_operator_cost * inner_rows * rescanratio;
    3324             : 
    3325             :     /*
    3326             :      * If we don't need mark/restore at all, we don't need materialization.
    3327             :      */
    3328      145514 :     if (path->skip_mark_restore)
    3329       61402 :         path->materialize_inner = false;
    3330             : 
    3331             :     /*
    3332             :      * Prefer materializing if it looks cheaper, unless the user has asked to
    3333             :      * suppress materialization.
    3334             :      */
    3335       84112 :     else if (enable_material && mat_inner_cost < bare_inner_cost)
    3336         934 :         path->materialize_inner = true;
    3337             : 
    3338             :     /*
    3339             :      * Even if materializing doesn't look cheaper, we *must* do it if the
    3340             :      * inner path is to be used directly (without sorting) and it doesn't
    3341             :      * support mark/restore.
    3342             :      *
    3343             :      * Since the inner side must be ordered, and only Sorts and IndexScans can
    3344             :      * create order to begin with, and they both support mark/restore, you
    3345             :      * might think there's no problem --- but you'd be wrong.  Nestloop and
    3346             :      * merge joins can *preserve* the order of their inputs, so they can be
    3347             :      * selected as the input of a mergejoin, and they don't support
    3348             :      * mark/restore at present.
    3349             :      *
    3350             :      * We don't test the value of enable_material here, because
    3351             :      * materialization is required for correctness in this case, and turning
    3352             :      * it off does not entitle us to deliver an invalid plan.
    3353             :      */
    3354       83178 :     else if (innersortkeys == NIL &&
    3355        3018 :              !ExecSupportsMarkRestore(inner_path))
    3356         726 :         path->materialize_inner = true;
    3357             : 
    3358             :     /*
    3359             :      * Also, force materializing if the inner path is to be sorted and the
    3360             :      * sort is expected to spill to disk.  This is because the final merge
    3361             :      * pass can be done on-the-fly if it doesn't have to support mark/restore.
    3362             :      * We don't try to adjust the cost estimates for this consideration,
    3363             :      * though.
    3364             :      *
    3365             :      * Since materialization is a performance optimization in this case,
    3366             :      * rather than necessary for correctness, we skip it if enable_material is
    3367             :      * off.
    3368             :      */
    3369       82452 :     else if (enable_material && innersortkeys != NIL &&
    3370       80128 :              relation_byte_size(inner_path_rows,
    3371       80128 :                                 inner_path->pathtarget->width) >
    3372       80128 :              (work_mem * 1024L))
    3373         140 :         path->materialize_inner = true;
    3374             :     else
    3375       82312 :         path->materialize_inner = false;
    3376             : 
    3377             :     /* Charge the right incremental cost for the chosen case */
    3378      145514 :     if (path->materialize_inner)
    3379        1800 :         run_cost += mat_inner_cost;
    3380             :     else
    3381      143714 :         run_cost += bare_inner_cost;
    3382             : 
    3383             :     /* CPU costs */
    3384             : 
    3385             :     /*
    3386             :      * The number of tuple comparisons needed is approximately number of outer
    3387             :      * rows plus number of inner rows plus number of rescanned tuples (can we
    3388             :      * refine this?).  At each one, we need to evaluate the mergejoin quals.
    3389             :      */
    3390      145514 :     startup_cost += merge_qual_cost.startup;
    3391      291028 :     startup_cost += merge_qual_cost.per_tuple *
    3392      145514 :         (outer_skip_rows + inner_skip_rows * rescanratio);
    3393      291028 :     run_cost += merge_qual_cost.per_tuple *
    3394      291028 :         ((outer_rows - outer_skip_rows) +
    3395      145514 :          (inner_rows - inner_skip_rows) * rescanratio);
    3396             : 
    3397             :     /*
    3398             :      * For each tuple that gets through the mergejoin proper, we charge
    3399             :      * cpu_tuple_cost plus the cost of evaluating additional restriction
    3400             :      * clauses that are to be applied at the join.  (This is pessimistic since
    3401             :      * not all of the quals may get evaluated at each tuple.)
    3402             :      *
    3403             :      * Note: we could adjust for SEMI/ANTI joins skipping some qual
    3404             :      * evaluations here, but it's probably not worth the trouble.
    3405             :      */
    3406      145514 :     startup_cost += qp_qual_cost.startup;
    3407      145514 :     cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
    3408      145514 :     run_cost += cpu_per_tuple * mergejointuples;
    3409             : 
    3410             :     /* tlist eval costs are paid per output row, not per tuple scanned */
    3411      145514 :     startup_cost += path->jpath.path.pathtarget->cost.startup;
    3412      145514 :     run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
    3413             : 
    3414      145514 :     path->jpath.path.startup_cost = startup_cost;
    3415      145514 :     path->jpath.path.total_cost = startup_cost + run_cost;
    3416      145514 : }
    3417             : 
    3418             : /*
    3419             :  * run mergejoinscansel() with caching
    3420             :  */
    3421             : static MergeScanSelCache *
    3422      560950 : cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
    3423             : {
    3424             :     MergeScanSelCache *cache;
    3425             :     ListCell   *lc;
    3426             :     Selectivity leftstartsel,
    3427             :                 leftendsel,
    3428             :                 rightstartsel,
    3429             :                 rightendsel;
    3430             :     MemoryContext oldcontext;
    3431             : 
    3432             :     /* Do we have this result already? */
    3433      560954 :     foreach(lc, rinfo->scansel_cache)
    3434             :     {
    3435      498338 :         cache = (MergeScanSelCache *) lfirst(lc);
    3436      498338 :         if (cache->opfamily == pathkey->pk_opfamily &&
    3437      498338 :             cache->collation == pathkey->pk_eclass->ec_collation &&
    3438      498338 :             cache->strategy == pathkey->pk_strategy &&
    3439      498334 :             cache->nulls_first == pathkey->pk_nulls_first)
    3440      498334 :             return cache;
    3441             :     }
    3442             : 
    3443             :     /* Nope, do the computation */
    3444      125232 :     mergejoinscansel(root,
    3445       62616 :                      (Node *) rinfo->clause,
    3446             :                      pathkey->pk_opfamily,
    3447             :                      pathkey->pk_strategy,
    3448       62616 :                      pathkey->pk_nulls_first,
    3449             :                      &leftstartsel,
    3450             :                      &leftendsel,
    3451             :                      &rightstartsel,
    3452             :                      &rightendsel);
    3453             : 
    3454             :     /* Cache the result in suitably long-lived workspace */
    3455       62616 :     oldcontext = MemoryContextSwitchTo(root->planner_cxt);
    3456             : 
    3457       62616 :     cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
    3458       62616 :     cache->opfamily = pathkey->pk_opfamily;
    3459       62616 :     cache->collation = pathkey->pk_eclass->ec_collation;
    3460       62616 :     cache->strategy = pathkey->pk_strategy;
    3461       62616 :     cache->nulls_first = pathkey->pk_nulls_first;
    3462       62616 :     cache->leftstartsel = leftstartsel;
    3463       62616 :     cache->leftendsel = leftendsel;
    3464       62616 :     cache->rightstartsel = rightstartsel;
    3465       62616 :     cache->rightendsel = rightendsel;
    3466             : 
    3467       62616 :     rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
    3468             : 
    3469       62616 :     MemoryContextSwitchTo(oldcontext);
    3470             : 
    3471       62616 :     return cache;
    3472             : }
    3473             : 
    3474             : /*
    3475             :  * initial_cost_hashjoin
    3476             :  *    Preliminary estimate of the cost of a hashjoin path.
    3477             :  *
    3478             :  * This must quickly produce lower-bound estimates of the path's startup and
    3479             :  * total costs.  If we are unable to eliminate the proposed path from
    3480             :  * consideration using the lower bounds, final_cost_hashjoin will be called
    3481             :  * to obtain the final estimates.
    3482             :  *
    3483             :  * The exact division of labor between this function and final_cost_hashjoin
    3484             :  * is private to them, and represents a tradeoff between speed of the initial
    3485             :  * estimate and getting a tight lower bound.  We choose to not examine the
    3486             :  * join quals here (other than by counting the number of hash clauses),
    3487             :  * so we can't do much with CPU costs.  We do assume that
    3488             :  * ExecChooseHashTableSize is cheap enough to use here.
    3489             :  *
    3490             :  * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
    3491             :  *      other data to be used by final_cost_hashjoin
    3492             :  * 'jointype' is the type of join to be performed
    3493             :  * 'hashclauses' is the list of joinclauses to be used as hash clauses
    3494             :  * 'outer_path' is the outer input to the join
    3495             :  * 'inner_path' is the inner input to the join
    3496             :  * 'extra' contains miscellaneous information about the join
    3497             :  * 'parallel_hash' indicates that inner_path is partial and that a shared
    3498             :  *      hash table will be built in parallel
    3499             :  */
    3500             : void
    3501      296276 : initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
    3502             :                       JoinType jointype,
    3503             :                       List *hashclauses,
    3504             :                       Path *outer_path, Path *inner_path,
    3505             :                       JoinPathExtraData *extra,
    3506             :                       bool parallel_hash)
    3507             : {
    3508      296276 :     Cost        startup_cost = 0;
    3509      296276 :     Cost        run_cost = 0;
    3510      296276 :     double      outer_path_rows = outer_path->rows;
    3511      296276 :     double      inner_path_rows = inner_path->rows;
    3512      296276 :     double      inner_path_rows_total = inner_path_rows;
    3513      296276 :     int         num_hashclauses = list_length(hashclauses);
    3514             :     int         numbuckets;
    3515             :     int         numbatches;
    3516             :     int         num_skew_mcvs;
    3517             :     size_t      space_allowed;  /* unused */
    3518             : 
    3519             :     /* cost of source data */
    3520      296276 :     startup_cost += outer_path->startup_cost;
    3521      296276 :     run_cost += outer_path->total_cost - outer_path->startup_cost;
    3522      296276 :     startup_cost += inner_path->total_cost;
    3523             : 
    3524             :     /*
    3525             :      * Cost of computing hash function: must do it once per input tuple. We
    3526             :      * charge one cpu_operator_cost for each column's hash function.  Also,
    3527             :      * tack on one cpu_tuple_cost per inner row, to model the costs of
    3528             :      * inserting the row into the hashtable.
    3529             :      *
    3530             :      * XXX when a hashclause is more complex than a single operator, we really
    3531             :      * should charge the extra eval costs of the left or right side, as
    3532             :      * appropriate, here.  This seems more work than it's worth at the moment.
    3533             :      */
    3534      592552 :     startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
    3535      296276 :         * inner_path_rows;
    3536      296276 :     run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
    3537             : 
    3538             :     /*
    3539             :      * If this is a parallel hash build, then the value we have for
    3540             :      * inner_rows_total currently refers only to the rows returned by each
    3541             :      * participant.  For shared hash table size estimation, we need the total
    3542             :      * number, so we need to undo the division.
    3543             :      */
    3544      296276 :     if (parallel_hash)
    3545        4828 :         inner_path_rows_total *= get_parallel_divisor(inner_path);
    3546             : 
    3547             :     /*
    3548             :      * Get hash table size that executor would use for inner relation.
    3549             :      *
    3550             :      * XXX for the moment, always assume that skew optimization will be
    3551             :      * performed.  As long as SKEW_HASH_MEM_PERCENT is small, it's not worth
    3552             :      * trying to determine that for sure.
    3553             :      *
    3554             :      * XXX at some point it might be interesting to try to account for skew
    3555             :      * optimization in the cost estimate, but for now, we don't.
    3556             :      */
    3557      592552 :     ExecChooseHashTableSize(inner_path_rows_total,
    3558      296276 :                             inner_path->pathtarget->width,
    3559             :                             true,   /* useskew */
    3560             :                             parallel_hash,  /* try_combined_hash_mem */
    3561             :                             outer_path->parallel_workers,
    3562             :                             &space_allowed,
    3563             :                             &numbuckets,
    3564             :                             &numbatches,
    3565             :                             &num_skew_mcvs);
    3566             : 
    3567             :     /*
    3568             :      * If inner relation is too big then we will need to "batch" the join,
    3569             :      * which implies writing and reading most of the tuples to disk an extra
    3570             :      * time.  Charge seq_page_cost per page, since the I/O should be nice and
    3571             :      * sequential.  Writing the inner rel counts as startup cost, all the rest
    3572             :      * as run cost.
    3573             :      */
    3574      296276 :     if (numbatches > 1)
    3575             :     {
    3576        2072 :         double      outerpages = page_size(outer_path_rows,
    3577        2072 :                                            outer_path->pathtarget->width);
    3578        2072 :         double      innerpages = page_size(inner_path_rows,
    3579        2072 :                                            inner_path->pathtarget->width);
    3580             : 
    3581        2072 :         startup_cost += seq_page_cost * innerpages;
    3582        2072 :         run_cost += seq_page_cost * (innerpages + 2 * outerpages);
    3583             :     }
    3584             : 
    3585             :     /* CPU costs left for later */
    3586             : 
    3587             :     /* Public result fields */
    3588      296276 :     workspace->startup_cost = startup_cost;
    3589      296276 :     workspace->total_cost = startup_cost + run_cost;
    3590             :     /* Save private data for final_cost_hashjoin */
    3591      296276 :     workspace->run_cost = run_cost;
    3592      296276 :     workspace->numbuckets = numbuckets;
    3593      296276 :     workspace->numbatches = numbatches;
    3594      296276 :     workspace->inner_rows_total = inner_path_rows_total;
    3595      296276 : }
    3596             : 
    3597             : /*
    3598             :  * final_cost_hashjoin
    3599             :  *    Final estimate of the cost and result size of a hashjoin path.
    3600             :  *
    3601             :  * Note: the numbatches estimate is also saved into 'path' for use later
    3602             :  *
    3603             :  * 'path' is already filled in except for the rows and cost fields and
    3604             :  *      num_batches
    3605             :  * 'workspace' is the result from initial_cost_hashjoin
    3606             :  * 'extra' contains miscellaneous information about the join
    3607             :  */
    3608             : void
    3609      158844 : final_cost_hashjoin(PlannerInfo *root, HashPath *path,
    3610             :                     JoinCostWorkspace *workspace,
    3611             :                     JoinPathExtraData *extra)
    3612             : {
    3613      158844 :     Path       *outer_path = path->jpath.outerjoinpath;
    3614      158844 :     Path       *inner_path = path->jpath.innerjoinpath;
    3615      158844 :     double      outer_path_rows = outer_path->rows;
    3616      158844 :     double      inner_path_rows = inner_path->rows;
    3617      158844 :     double      inner_path_rows_total = workspace->inner_rows_total;
    3618      158844 :     List       *hashclauses = path->path_hashclauses;
    3619      158844 :     Cost        startup_cost = workspace->startup_cost;
    3620      158844 :     Cost        run_cost = workspace->run_cost;
    3621      158844 :     int         numbuckets = workspace->numbuckets;
    3622      158844 :     int         numbatches = workspace->numbatches;
    3623             :     int         hash_mem;
    3624             :     Cost        cpu_per_tuple;
    3625             :     QualCost    hash_qual_cost;
    3626             :     QualCost    qp_qual_cost;
    3627             :     double      hashjointuples;
    3628             :     double      virtualbuckets;
    3629             :     Selectivity innerbucketsize;
    3630             :     Selectivity innermcvfreq;
    3631             :     ListCell   *hcl;
    3632             : 
    3633             :     /* Mark the path with the correct row estimate */
    3634      158844 :     if (path->jpath.path.param_info)
    3635         866 :         path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
    3636             :     else
    3637      157978 :         path->jpath.path.rows = path->jpath.path.parent->rows;
    3638             : 
    3639             :     /* For partial paths, scale row estimate. */
    3640      158844 :     if (path->jpath.path.parallel_workers > 0)
    3641             :     {
    3642        4502 :         double      parallel_divisor = get_parallel_divisor(&path->jpath.path);
    3643             : 
    3644        4502 :         path->jpath.path.rows =
    3645        4502 :             clamp_row_est(path->jpath.path.rows / parallel_divisor);
    3646             :     }
    3647             : 
    3648             :     /*
    3649             :      * We could include disable_cost in the preliminary estimate, but that
    3650             :      * would amount to optimizing for the case where the join method is
    3651             :      * disabled, which doesn't seem like the way to bet.
    3652             :      */
    3653      158844 :     if (!enable_hashjoin)
    3654          76 :         startup_cost += disable_cost;
    3655             : 
    3656             :     /* mark the path with estimated # of batches */
    3657      158844 :     path->num_batches = numbatches;
    3658             : 
    3659             :     /* store the total number of tuples (sum of partial row estimates) */
    3660      158844 :     path->inner_rows_total = inner_path_rows_total;
    3661             : 
    3662             :     /* and compute the number of "virtual" buckets in the whole join */
    3663      158844 :     virtualbuckets = (double) numbuckets * (double) numbatches;
    3664             : 
    3665             :     /*
    3666             :      * Determine bucketsize fraction and MCV frequency for the inner relation.
    3667             :      * We use the smallest bucketsize or MCV frequency estimated for any
    3668             :      * individual hashclause; this is undoubtedly conservative.
    3669             :      *
    3670             :      * BUT: if inner relation has been unique-ified, we can assume it's good
    3671             :      * for hashing.  This is important both because it's the right answer, and
    3672             :      * because we avoid contaminating the cache with a value that's wrong for
    3673             :      * non-unique-ified paths.
    3674             :      */
    3675      158844 :     if (IsA(inner_path, UniquePath))
    3676             :     {
    3677         460 :         innerbucketsize = 1.0 / virtualbuckets;
    3678         460 :         innermcvfreq = 0.0;
    3679             :     }
    3680             :     else
    3681             :     {
    3682      158384 :         innerbucketsize = 1.0;
    3683      158384 :         innermcvfreq = 1.0;
    3684      359014 :         foreach(hcl, hashclauses)
    3685             :         {
    3686      200630 :             RestrictInfo *restrictinfo = lfirst_node(RestrictInfo, hcl);
    3687             :             Selectivity thisbucketsize;
    3688             :             Selectivity thismcvfreq;
    3689             : 
    3690             :             /*
    3691             :              * First we have to figure out which side of the hashjoin clause
    3692             :              * is the inner side.
    3693             :              *
    3694             :              * Since we tend to visit the same clauses over and over when
    3695             :              * planning a large query, we cache the bucket stats estimates in
    3696             :              * the RestrictInfo node to avoid repeated lookups of statistics.
    3697             :              */
    3698      200630 :             if (bms_is_subset(restrictinfo->right_relids,
    3699      200630 :                               inner_path->parent->relids))
    3700             :             {
    3701             :                 /* righthand side is inner */
    3702      107566 :                 thisbucketsize = restrictinfo->right_bucketsize;
    3703      107566 :                 if (thisbucketsize < 0)
    3704             :                 {
    3705             :                     /* not cached yet */
    3706       51744 :                     estimate_hash_bucket_stats(root,
    3707       51744 :                                                get_rightop(restrictinfo->clause),
    3708             :                                                virtualbuckets,
    3709             :                                                &restrictinfo->right_mcvfreq,
    3710             :                                                &restrictinfo->right_bucketsize);
    3711       51744 :                     thisbucketsize = restrictinfo->right_bucketsize;
    3712             :                 }
    3713      107566 :                 thismcvfreq = restrictinfo->right_mcvfreq;
    3714             :             }
    3715             :             else
    3716             :             {
    3717             :                 Assert(bms_is_subset(restrictinfo->left_relids,
    3718             :                                      inner_path->parent->relids));
    3719             :                 /* lefthand side is inner */
    3720       93064 :                 thisbucketsize = restrictinfo->left_bucketsize;
    3721       93064 :                 if (thisbucketsize < 0)
    3722             :                 {
    3723             :                     /* not cached yet */
    3724       41310 :                     estimate_hash_bucket_stats(root,
    3725       41310 :                                                get_leftop(restrictinfo->clause),
    3726             :                                                virtualbuckets,
    3727             :                                                &restrictinfo->left_mcvfreq,
    3728             :                                                &restrictinfo->left_bucketsize);
    3729       41310 :                     thisbucketsize = restrictinfo->left_bucketsize;
    3730             :                 }
    3731       93064 :                 thismcvfreq = restrictinfo->left_mcvfreq;
    3732             :             }
    3733             : 
    3734      200630 :             if (innerbucketsize > thisbucketsize)
    3735       91022 :                 innerbucketsize = thisbucketsize;
    3736      200630 :             if (innermcvfreq > thismcvfreq)
    3737      164428 :                 innermcvfreq = thismcvfreq;
    3738             :         }
    3739             :     }
    3740             : 
    3741             :     /*
    3742             :      * If the bucket holding the inner MCV would exceed hash_mem, we don't
    3743             :      * want to hash unless there is really no other alternative, so apply
    3744             :      * disable_cost.  (The executor normally copes with excessive memory usage
    3745             :      * by splitting batches, but obviously it cannot separate equal values
    3746             :      * that way, so it will be unable to drive the batch size below hash_mem
    3747             :      * when this is true.)
    3748             :      */
    3749      158844 :     hash_mem = get_hash_mem();
    3750      158844 :     if (relation_byte_size(clamp_row_est(inner_path_rows * innermcvfreq),
    3751      158844 :                            inner_path->pathtarget->width) >
    3752      158844 :         (hash_mem * 1024L))
    3753           0 :         startup_cost += disable_cost;
    3754             : 
    3755             :     /*
    3756             :      * Compute cost of the hashquals and qpquals (other restriction clauses)
    3757             :      * separately.
    3758             :      */
    3759      158844 :     cost_qual_eval(&hash_qual_cost, hashclauses, root);
    3760      158844 :     cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
    3761      158844 :     qp_qual_cost.startup -= hash_qual_cost.startup;
    3762      158844 :     qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
    3763             : 
    3764             :     /* CPU costs */
    3765             : 
    3766      158844 :     if (path->jpath.jointype == JOIN_SEMI ||
    3767      157748 :         path->jpath.jointype == JOIN_ANTI ||
    3768      147174 :         extra->inner_unique)
    3769       62224 :     {
    3770             :         double      outer_matched_rows;
    3771             :         Selectivity inner_scan_frac;
    3772             : 
    3773             :         /*
    3774             :          * With a SEMI or ANTI join, or if the innerrel is known unique, the
    3775             :          * executor will stop after the first match.
    3776             :          *
    3777             :          * For an outer-rel row that has at least one match, we can expect the
    3778             :          * bucket scan to stop after a fraction 1/(match_count+1) of the
    3779             :          * bucket's rows, if the matches are evenly distributed.  Since they
    3780             :          * probably aren't quite evenly distributed, we apply a fuzz factor of
    3781             :          * 2.0 to that fraction.  (If we used a larger fuzz factor, we'd have
    3782             :          * to clamp inner_scan_frac to at most 1.0; but since match_count is
    3783             :          * at least 1, no such clamp is needed now.)
    3784             :          */
    3785       62224 :         outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
    3786       62224 :         inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
    3787             : 
    3788       62224 :         startup_cost += hash_qual_cost.startup;
    3789      186672 :         run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
    3790       62224 :             clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
    3791             : 
    3792             :         /*
    3793             :          * For unmatched outer-rel rows, the picture is quite a lot different.
    3794             :          * In the first place, there is no reason to assume that these rows
    3795             :          * preferentially hit heavily-populated buckets; instead assume they
    3796             :          * are uncorrelated with the inner distribution and so they see an
    3797             :          * average bucket size of inner_path_rows / virtualbuckets.  In the
    3798             :          * second place, it seems likely that they will have few if any exact
    3799             :          * hash-code matches and so very few of the tuples in the bucket will
    3800             :          * actually require eval of the hash quals.  We don't have any good
    3801             :          * way to estimate how many will, but for the moment assume that the
    3802             :          * effective cost per bucket entry is one-tenth what it is for
    3803             :          * matchable tuples.
    3804             :          */
    3805      186672 :         run_cost += hash_qual_cost.per_tuple *
    3806      124448 :             (outer_path_rows - outer_matched_rows) *
    3807       62224 :             clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
    3808             : 
    3809             :         /* Get # of tuples that will pass the basic join */
    3810       62224 :         if (path->jpath.jointype == JOIN_ANTI)
    3811       10574 :             hashjointuples = outer_path_rows - outer_matched_rows;
    3812             :         else
    3813       51650 :             hashjointuples = outer_matched_rows;
    3814             :     }
    3815             :     else
    3816             :     {
    3817             :         /*
    3818             :          * The number of tuple comparisons needed is the number of outer
    3819             :          * tuples times the typical number of tuples in a hash bucket, which
    3820             :          * is the inner relation size times its bucketsize fraction.  At each
    3821             :          * one, we need to evaluate the hashjoin quals.  But actually,
    3822             :          * charging the full qual eval cost at each tuple is pessimistic,
    3823             :          * since we don't evaluate the quals unless the hash values match
    3824             :          * exactly.  For lack of a better idea, halve the cost estimate to
    3825             :          * allow for that.
    3826             :          */
    3827       96620 :         startup_cost += hash_qual_cost.startup;
    3828      289860 :         run_cost += hash_qual_cost.per_tuple * outer_path_rows *
    3829       96620 :             clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
    3830             : 
    3831             :         /*
    3832             :          * Get approx # tuples passing the hashquals.  We use
    3833             :          * approx_tuple_count here because we need an estimate done with
    3834             :          * JOIN_INNER semantics.
    3835             :          */
    3836       96620 :         hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
    3837             :     }
    3838             : 
    3839             :     /*
    3840             :      * For each tuple that gets through the hashjoin proper, we charge
    3841             :      * cpu_tuple_cost plus the cost of evaluating additional restriction
    3842             :      * clauses that are to be applied at the join.  (This is pessimistic since
    3843             :      * not all of the quals may get evaluated at each tuple.)
    3844             :      */
    3845      158844 :     startup_cost += qp_qual_cost.startup;
    3846      158844 :     cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
    3847      158844 :     run_cost += cpu_per_tuple * hashjointuples;
    3848             : 
    3849             :     /* tlist eval costs are paid per output row, not per tuple scanned */
    3850      158844 :     startup_cost += path->jpath.path.pathtarget->cost.startup;
    3851      158844 :     run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
    3852             : 
    3853      158844 :     path->jpath.path.startup_cost = startup_cost;
    3854      158844 :     path->jpath.path.total_cost = startup_cost + run_cost;
    3855      158844 : }
    3856             : 
    3857             : 
    3858             : /*
    3859             :  * cost_subplan
    3860             :  *      Figure the costs for a SubPlan (or initplan).
    3861             :  *
    3862             :  * Note: we could dig the subplan's Plan out of the root list, but in practice
    3863             :  * all callers have it handy already, so we make them pass it.
    3864             :  */
    3865             : void
    3866       53104 : cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
    3867             : {
    3868             :     QualCost    sp_cost;
    3869             : 
    3870             :     /* Figure any cost for evaluating the testexpr */
    3871       53104 :     cost_qual_eval(&sp_cost,
    3872       53104 :                    make_ands_implicit((Expr *) subplan->testexpr),
    3873             :                    root);
    3874             : 
    3875       53104 :     if (subplan->useHashTable)
    3876             :     {
    3877             :         /*
    3878             :          * If we are using a hash table for the subquery outputs, then the
    3879             :          * cost of evaluating the query is a one-time cost.  We charge one
    3880             :          * cpu_operator_cost per tuple for the work of loading the hashtable,
    3881             :          * too.
    3882             :          */
    3883        2760 :         sp_cost.startup += plan->total_cost +
    3884        1380 :             cpu_operator_cost * plan->plan_rows;
    3885             : 
    3886             :         /*
    3887             :          * The per-tuple costs include the cost of evaluating the lefthand
    3888             :          * expressions, plus the cost of probing the hashtable.  We already
    3889             :          * accounted for the lefthand expressions as part of the testexpr, and
    3890             :          * will also have counted one cpu_operator_cost for each comparison
    3891             :          * operator.  That is probably too low for the probing cost, but it's
    3892             :          * hard to make a better estimate, so live with it for now.
    3893             :          */
    3894             :     }
    3895             :     else
    3896             :     {
    3897             :         /*
    3898             :          * Otherwise we will be rescanning the subplan output on each
    3899             :          * evaluation.  We need to estimate how much of the output we will
    3900             :          * actually need to scan.  NOTE: this logic should agree with the
    3901             :          * tuple_fraction estimates used by make_subplan() in
    3902             :          * plan/subselect.c.
    3903             :          */
    3904       51724 :         Cost        plan_run_cost = plan->total_cost - plan->startup_cost;
    3905             : 
    3906       51724 :         if (subplan->subLinkType == EXISTS_SUBLINK)
    3907             :         {
    3908             :             /* we only need to fetch 1 tuple; clamp to avoid zero divide */
    3909        1720 :             sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
    3910             :         }
    3911       50004 :         else if (subplan->subLinkType == ALL_SUBLINK ||
    3912       49992 :                  subplan->subLinkType == ANY_SUBLINK)
    3913             :         {
    3914             :             /* assume we need 50% of the tuples */
    3915          68 :             sp_cost.per_tuple += 0.50 * plan_run_cost;
    3916             :             /* also charge a cpu_operator_cost per row examined */
    3917          68 :             sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
    3918             :         }
    3919             :         else
    3920             :         {
    3921             :             /* assume we need all tuples */
    3922       49936 :             sp_cost.per_tuple += plan_run_cost;
    3923             :         }
    3924             : 
    3925             :         /*
    3926             :          * Also account for subplan's startup cost. If the subplan is
    3927             :          * uncorrelated or undirect correlated, AND its topmost node is one
    3928             :          * that materializes its output, assume that we'll only need to pay
    3929             :          * its startup cost once; otherwise assume we pay the startup cost
    3930             :          * every time.
    3931             :          */
    3932       62606 :         if (subplan->parParam == NIL &&
    3933       10882 :             ExecMaterializesOutput(nodeTag(plan)))
    3934         182 :             sp_cost.startup += plan->startup_cost;
    3935             :         else
    3936       51542 :             sp_cost.per_tuple += plan->startup_cost;
    3937             :     }
    3938             : 
    3939       53104 :     subplan->startup_cost = sp_cost.startup;
    3940       53104 :     subplan->per_call_cost = sp_cost.per_tuple;
    3941       53104 : }
    3942             : 
    3943             : 
    3944             : /*
    3945             :  * cost_rescan
    3946             :  *      Given a finished Path, estimate the costs of rescanning it after
    3947             :  *      having done so the first time.  For some Path types a rescan is
    3948             :  *      cheaper than an original scan (if no parameters change), and this
    3949             :  *      function embodies knowledge about that.  The default is to return
    3950             :  *      the same costs stored in the Path.  (Note that the cost estimates
    3951             :  *      actually stored in Paths are always for first scans.)
    3952             :  *
    3953             :  * This function is not currently intended to model effects such as rescans
    3954             :  * being cheaper due to disk block caching; what we are concerned with is
    3955             :  * plan types wherein the executor caches results explicitly, or doesn't
    3956             :  * redo startup calculations, etc.
    3957             :  */
    3958             : static void
    3959     1167254 : cost_rescan(PlannerInfo *root, Path *path,
    3960             :             Cost *rescan_startup_cost,  /* output parameters */
    3961             :             Cost *rescan_total_cost)
    3962             : {
    3963     1167254 :     switch (path->pathtype)
    3964             :     {
    3965       11858 :         case T_FunctionScan:
    3966             : 
    3967             :             /*
    3968             :              * Currently, nodeFunctionscan.c always executes the function to
    3969             :              * completion before returning any rows, and caches the results in
    3970             :              * a tuplestore.  So the function eval cost is all startup cost
    3971             :              * and isn't paid over again on rescans. However, all run costs
    3972             :              * will be paid over again.
    3973             :              */
    3974       11858 :             *rescan_startup_cost = 0;
    3975       11858 :             *rescan_total_cost = path->total_cost - path->startup_cost;
    3976       11858 :             break;
    3977       39150 :         case T_HashJoin:
    3978             : 
    3979             :             /*
    3980             :              * If it's a single-batch join, we don't need to rebuild the hash
    3981             :              * table during a rescan.
    3982             :              */
    3983       39150 :             if (((HashPath *) path)->num_batches == 1)
    3984             :             {
    3985             :                 /* Startup cost is exactly the cost of hash table building */
    3986       39150 :                 *rescan_startup_cost = 0;
    3987       39150 :                 *rescan_total_cost = path->total_cost - path->startup_cost;
    3988             :             }
    3989             :             else
    3990             :             {
    3991             :                 /* Otherwise, no special treatment */
    3992           0 :                 *rescan_startup_cost = path->startup_cost;
    3993           0 :                 *rescan_total_cost = path->total_cost;
    3994             :             }
    3995       39150 :             break;
    3996        2078 :         case T_CteScan:
    3997             :         case T_WorkTableScan:
    3998             :             {
    3999             :                 /*
    4000             :                  * These plan types materialize their final result in a
    4001             :                  * tuplestore or tuplesort object.  So the rescan cost is only
    4002             :                  * cpu_tuple_cost per tuple, unless the result is large enough
    4003             :                  * to spill to disk.
    4004             :                  */
    4005        2078 :                 Cost        run_cost = cpu_tuple_cost * path->rows;
    4006        2078 :                 double      nbytes = relation_byte_size(path->rows,
    4007        2078 :                                                         path->pathtarget->width);
    4008        2078 :                 long        work_mem_bytes = work_mem * 1024L;
    4009             : 
    4010        2078 :                 if (nbytes > work_mem_bytes)
    4011             :                 {
    4012             :                     /* It will spill, so account for re-read cost */
    4013           0 :                     double      npages = ceil(nbytes / BLCKSZ);
    4014             : 
    4015           0 :                     run_cost += seq_page_cost * npages;
    4016             :                 }
    4017        2078 :                 *rescan_startup_cost = 0;
    4018        2078 :                 *rescan_total_cost = run_cost;
    4019             :             }
    4020        2078 :             break;
    4021      454888 :         case T_Material:
    4022             :         case T_Sort:
    4023             :             {
    4024             :                 /*
    4025             :                  * These plan types not only materialize their results, but do
    4026             :                  * not implement qual filtering or projection.  So they are
    4027             :                  * even cheaper to rescan than the ones above.  We charge only
    4028             :                  * cpu_operator_cost per tuple.  (Note: keep that in sync with
    4029             :                  * the run_cost charge in cost_sort, and also see comments in
    4030             :                  * cost_material before you change it.)
    4031             :                  */
    4032      454888 :                 Cost        run_cost = cpu_operator_cost * path->rows;
    4033      454888 :                 double      nbytes = relation_byte_size(path->rows,
    4034      454888 :                                                         path->pathtarget->width);
    4035      454888 :                 long        work_mem_bytes = work_mem * 1024L;
    4036             : 
    4037      454888 :                 if (nbytes > work_mem_bytes)
    4038             :                 {
    4039             :                     /* It will spill, so account for re-read cost */
    4040        3648 :                     double      npages = ceil(nbytes / BLCKSZ);
    4041             : 
    4042        3648 :                     run_cost += seq_page_cost * npages;
    4043             :                 }
    4044      454888 :                 *rescan_startup_cost = 0;
    4045      454888 :                 *rescan_total_cost = run_cost;
    4046             :             }
    4047      454888 :             break;
    4048      659280 :         default:
    4049      659280 :             *rescan_startup_cost = path->startup_cost;
    4050      659280 :             *rescan_total_cost = path->total_cost;
    4051      659280 :             break;
    4052             :     }
    4053     1167254 : }
    4054             : 
    4055             : 
    4056             : /*
    4057             :  * cost_qual_eval
    4058             :  *      Estimate the CPU costs of evaluating a WHERE clause.
    4059             :  *      The input can be either an implicitly-ANDed list of boolean
    4060             :  *      expressions, or a list of RestrictInfo nodes.  (The latter is
    4061             :  *      preferred since it allows caching of the results.)
    4062             :  *      The result includes both a one-time (startup) component,
    4063             :  *      and a per-evaluation component.
    4064             :  */
    4065             : void
    4066     1996702 : cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
    4067             : {
    4068             :     cost_qual_eval_context context;
    4069             :     ListCell   *l;
    4070             : 
    4071     1996702 :     context.root = root;
    4072     1996702 :     context.total.startup = 0;
    4073     1996702 :     context.total.per_tuple = 0;
    4074             : 
    4075             :     /* We don't charge any cost for the implicit ANDing at top level ... */
    4076             : 
    4077     3788290 :     foreach(l, quals)
    4078             :     {
    4079     1791588 :         Node       *qual = (Node *) lfirst(l);
    4080             : 
    4081     1791588 :         cost_qual_eval_walker(qual, &context);
    4082             :     }
    4083             : 
    4084     1996702 :     *cost = context.total;
    4085     1996702 : }
    4086             : 
    4087             : /*
    4088             :  * cost_qual_eval_node
    4089             :  *      As above, for a single RestrictInfo or expression.
    4090             :  */
    4091             : void
    4092     1258712 : cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
    4093             : {
    4094             :     cost_qual_eval_context context;
    4095             : 
    4096     1258712 :     context.root = root;
    4097     1258712 :     context.total.startup = 0;
    4098     1258712 :     context.total.per_tuple = 0;
    4099             : 
    4100     1258712 :     cost_qual_eval_walker(qual, &context);
    4101             : 
    4102     1258712 :     *cost = context.total;
    4103     1258712 : }
    4104             : 
    4105             : static bool
    4106     5192318 : cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
    4107             : {
    4108     5192318 :     if (node == NULL)
    4109       53746 :         return false;
    4110             : 
    4111             :     /*
    4112             :      * RestrictInfo nodes contain an eval_cost field reserved for this
    4113             :      * routine's use, so that it's not necessary to evaluate the qual clause's
    4114             :      * cost more than once.  If the clause's cost hasn't been computed yet,
    4115             :      * the field's startup value will contain -1.
    4116             :      */
    4117     5138572 :     if (IsA(node, RestrictInfo))
    4118             :     {
    4119     1875988 :         RestrictInfo *rinfo = (RestrictInfo *) node;
    4120             : 
    4121     1875988 :         if (rinfo->eval_cost.startup < 0)
    4122             :         {
    4123             :             cost_qual_eval_context locContext;
    4124             : 
    4125      347198 :             locContext.root = context->root;
    4126      347198 :             locContext.total.startup = 0;
    4127      347198 :             locContext.total.per_tuple = 0;
    4128             : 
    4129             :             /*
    4130             :              * For an OR clause, recurse into the marked-up tree so that we
    4131             :              * set the eval_cost for contained RestrictInfos too.
    4132             :              */
    4133      347198 :             if (rinfo->orclause)
    4134        5606 :                 cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
    4135             :             else
    4136      341592 :                 cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
    4137             : 
    4138             :             /*
    4139             :              * If the RestrictInfo is marked pseudoconstant, it will be tested
    4140             :              * only once, so treat its cost as all startup cost.
    4141             :              */
    4142      347198 :             if (rinfo->pseudoconstant)
    4143             :             {
    4144             :                 /* count one execution during startup */
    4145        2880 :                 locContext.total.startup += locContext.total.per_tuple;
    4146        2880 :                 locContext.total.per_tuple = 0;
    4147             :             }
    4148      347198 :             rinfo->eval_cost = locContext.total;
    4149             :         }
    4150     1875988 :         context->total.startup += rinfo->eval_cost.startup;
    4151     1875988 :         context->total.per_tuple += rinfo->eval_cost.per_tuple;
    4152             :         /* do NOT recurse into children */
    4153     1875988 :         return false;
    4154             :     }
    4155             : 
    4156             :     /*
    4157             :      * For each operator or function node in the given tree, we charge the
    4158             :      * estimated execution cost given by pg_proc.procost (remember to multiply
    4159             :      * this by cpu_operator_cost).
    4160             :      *
    4161             :      * Vars and Consts are charged zero, and so are boolean operators (AND,
    4162             :      * OR, NOT). Simplistic, but a lot better than no model at all.
    4163             :      *
    4164             :      * Should we try to account for the possibility of short-circuit
    4165             :      * evaluation of AND/OR?  Probably *not*, because that would make the
    4166             :      * results depend on the clause ordering, and we are not in any position
    4167             :      * to expect that the current ordering of the clauses is the one that's
    4168             :      * going to end up being used.  The above per-RestrictInfo caching would
    4169             :      * not mix well with trying to re-order clauses anyway.
    4170             :      *
    4171             :      * Another issue that is entirely ignored here is that if a set-returning
    4172             :      * function is below top level in the tree, the functions/operators above
    4173             :      * it will need to be evaluated multiple times.  In practical use, such
    4174             :      * cases arise so seldom as to not be worth the added complexity needed;
    4175             :      * moreover, since our rowcount estimates for functions tend to be pretty
    4176             :      * phony, the results would also be pretty phony.
    4177             :      */
    4178     3262584 :     if (IsA(node, FuncExpr))
    4179             :     {
    4180      271742 :         add_function_cost(context->root, ((FuncExpr *) node)->funcid, node,
    4181             :                           &context->total);
    4182             :     }
    4183     2990842 :     else if (IsA(node, OpExpr) ||
    4184     2591500 :              IsA(node, DistinctExpr) ||
    4185     2591022 :              IsA(node, NullIfExpr))
    4186             :     {
    4187             :         /* rely on struct equivalence to treat these all alike */
    4188      399992 :         set_opfuncid((OpExpr *) node);
    4189      399992 :         add_function_cost(context->root, ((OpExpr *) node)->opfuncid, node,
    4190             :                           &context->total);
    4191             :     }
    4192     2590850 :     else if (IsA(node, ScalarArrayOpExpr))
    4193             :     {
    4194             :         /*
    4195             :          * Estimate that the operator will be applied to about half of the
    4196             :          * array elements before the answer is determined.
    4197             :          */
    4198       24126 :         ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
    4199       24126 :         Node       *arraynode = (Node *) lsecond(saop->args);
    4200             :         QualCost    sacosts;
    4201             : 
    4202       24126 :         set_sa_opfuncid(saop);
    4203       24126 :         sacosts.startup = sacosts.per_tuple = 0;
    4204       24126 :         add_function_cost(context->root, saop->opfuncid, NULL,
    4205             :                           &sacosts);
    4206       24126 :         context->total.startup += sacosts.startup;
    4207       72378 :         context->total.per_tuple += sacosts.per_tuple *
    4208       24126 :             estimate_array_length(arraynode) * 0.5;
    4209             :     }
    4210     2566724 :     else if (IsA(node, Aggref) ||
    4211     2529036 :              IsA(node, WindowFunc))
    4212             :     {
    4213             :         /*
    4214             :          * Aggref and WindowFunc nodes are (and should be) treated like Vars,
    4215             :          * ie, zero execution cost in the current model, because they behave
    4216             :          * essentially like Vars at execution.  We disregard the costs of
    4217             :          * their input expressions for the same reason.  The actual execution
    4218             :          * costs of the aggregate/window functions and their arguments have to
    4219             :          * be factored into plan-node-specific costing of the Agg or WindowAgg
    4220             :          * plan node.
    4221             :          */
    4222       39296 :         return false;           /* don't recurse into children */
    4223             :     }
    4224     2527428 :     else if (IsA(node, CoerceViaIO))
    4225             :     {
    4226        7918 :         CoerceViaIO *iocoerce = (CoerceViaIO *) node;
    4227             :         Oid         iofunc;
    4228             :         Oid         typioparam;
    4229             :         bool        typisvarlena;
    4230             : 
    4231             :         /* check the result type's input function */
    4232        7918 :         getTypeInputInfo(iocoerce->resulttype,
    4233             :                          &iofunc, &typioparam);
    4234        7918 :         add_function_cost(context->root, iofunc, NULL,
    4235             :                           &context->total);
    4236             :         /* check the input type's output function */
    4237        7918 :         getTypeOutputInfo(exprType((Node *) iocoerce->arg),
    4238             :                           &iofunc, &typisvarlena);
    4239        7918 :         add_function_cost(context->root, iofunc, NULL,
    4240             :                           &context->total);
    4241             :     }
    4242     2519510 :     else if (IsA(node, ArrayCoerceExpr))
    4243             :     {
    4244         374 :         ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
    4245             :         QualCost    perelemcost;
    4246             : 
    4247         374 :         cost_qual_eval_node(&perelemcost, (Node *) acoerce->elemexpr,
    4248             :                             context->root);
    4249         374 :         context->total.startup += perelemcost.startup;
    4250         374 :         if (perelemcost.per_tuple > 0)
    4251          38 :             context->total.per_tuple += perelemcost.per_tuple *
    4252          38 :                 estimate_array_length((Node *) acoerce->arg);
    4253             :     }
    4254     2519136 :     else if (IsA(node, RowCompareExpr))
    4255             :     {
    4256             :         /* Conservatively assume we will check all the columns */
    4257         104 :         RowCompareExpr *rcexpr = (RowCompareExpr *) node;
    4258             :         ListCell   *lc;
    4259             : 
    4260         348 :         foreach(lc, rcexpr->opnos)
    4261             :         {
    4262         244 :             Oid         opid = lfirst_oid(lc);
    4263             : 
    4264         244 :             add_function_cost(context->root, get_opcode(opid), NULL,
    4265             :                               &context->total);
    4266             :         }
    4267             :     }
    4268     2519032 :     else if (IsA(node, MinMaxExpr) ||
    4269     2518934 :              IsA(node, SQLValueFunction) ||
    4270     2517034 :              IsA(node, XmlExpr) ||
    4271     2516678 :              IsA(node, CoerceToDomain) ||
    4272     2487230 :              IsA(node, NextValueExpr))
    4273             :     {
    4274             :         /* Treat all these as having cost 1 */
    4275       31980 :         context->total.per_tuple += cpu_operator_cost;
    4276             :     }
    4277     2487052 :     else if (IsA(node, CurrentOfExpr))
    4278             :     {
    4279             :         /* Report high cost to prevent selection of anything but TID scan */
    4280         384 :         context->total.startup += disable_cost;
    4281             :     }
    4282     2486668 :     else if (IsA(node, SubLink))
    4283             :     {
    4284             :         /* This routine should not be applied to un-planned expressions */
    4285           0 :         elog(ERROR, "cannot handle unplanned sub-select");
    4286             :     }
    4287     2486668 :     else if (IsA(node, SubPlan))
    4288             :     {
    4289             :         /*
    4290             :          * A subplan node in an expression typically indicates that the
    4291             :          * subplan will be executed on each evaluation, so charge accordingly.
    4292             :          * (Sub-selects that can be executed as InitPlans have already been
    4293             :          * removed from the expression.)
    4294             :          */
    4295       63140 :         SubPlan    *subplan = (SubPlan *) node;
    4296             : 
    4297       63140 :         context->total.startup += subplan->startup_cost;
    4298       63140 :         context->total.per_tuple += subplan->per_call_cost;
    4299             : 
    4300             :         /*
    4301             :          * We don't want to recurse into the testexpr, because it was already
    4302             :          * counted in the SubPlan node's costs.  So we're done.
    4303             :          */
    4304       63140 :         return false;
    4305             :     }
    4306     2423528 :     else if (IsA(node, AlternativeSubPlan))
    4307             :     {
    4308             :         /*
    4309             :          * Arbitrarily use the first alternative plan for costing.  (We should
    4310             :          * certainly only include one alternative, and we don't yet have
    4311             :          * enough information to know which one the executor is most likely to
    4312             :          * use.)
    4313             :          */
    4314        1528 :         AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
    4315             : 
    4316        1528 :         return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
    4317             :                                      context);
    4318             :     }
    4319     2422000 :     else if (IsA(node, PlaceHolderVar))
    4320             :     {
    4321             :         /*
    4322             :          * A PlaceHolderVar should be given cost zero when considering general
    4323             :          * expression evaluation costs.  The expense of doing the contained
    4324             :          * expression is charged as part of the tlist eval costs of the scan
    4325             :          * or join where the PHV is first computed (see set_rel_width and
    4326             :          * add_placeholders_to_joinrel).  If we charged it again here, we'd be
    4327             :          * double-counting the cost for each level of plan that the PHV
    4328             :          * bubbles up through.  Hence, return without recursing into the
    4329             :          * phexpr.
    4330             :          */
    4331        1152 :         return false;
    4332             :     }
    4333             : 
    4334             :     /* recurse into children */
    4335     3157468 :     return expression_tree_walker(node, cost_qual_eval_walker,
    4336             :                                   (void *) context);
    4337             : }
    4338             : 
    4339             : /*
    4340             :  * get_restriction_qual_cost
    4341             :  *    Compute evaluation costs of a baserel's restriction quals, plus any
    4342             :  *    movable join quals that have been pushed down to the scan.
    4343             :  *    Results are returned into *qpqual_cost.
    4344             :  *
    4345             :  * This is a convenience subroutine that works for seqscans and other cases
    4346             :  * where all the given quals will be evaluated the hard way.  It's not useful
    4347             :  * for cost_index(), for example, where the index machinery takes care of
    4348             :  * some of the quals.  We assume baserestrictcost was previously set by
    4349             :  * set_baserel_size_estimates().
    4350             :  */
    4351             : static void
    4352      613190 : get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
    4353             :                           ParamPathInfo *param_info,
    4354             :                           QualCost *qpqual_cost)
    4355             : {
    4356      613190 :     if (param_info)
    4357             :     {
    4358             :         /* Include costs of pushed-down clauses */
    4359      102474 :         cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
    4360             : 
    4361      102474 :         qpqual_cost->startup += baserel->baserestrictcost.startup;
    4362      102474 :         qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
    4363             :     }
    4364             :     else
    4365      510716 :         *qpqual_cost = baserel->baserestrictcost;
    4366      613190 : }
    4367             : 
    4368             : 
    4369             : /*
    4370             :  * compute_semi_anti_join_factors
    4371             :  *    Estimate how much of the inner input a SEMI, ANTI, or inner_unique join
    4372             :  *    can be expected to scan.
    4373             :  *
    4374             :  * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
    4375             :  * inner rows as soon as it finds a match to the current outer row.
    4376             :  * The same happens if we have detected the inner rel is unique.
    4377             :  * We should therefore adjust some of the cost components for this effect.
    4378             :  * This function computes some estimates needed for these adjustments.
    4379             :  * These estimates will be the same regardless of the particular paths used
    4380             :  * for the outer and inner relation, so we compute these once and then pass
    4381             :  * them to all the join cost estimation functions.
    4382             :  *
    4383             :  * Input parameters:
    4384             :  *  joinrel: join relation under consideration
    4385             :  *  outerrel: outer relation under consideration
    4386             :  *  innerrel: inner relation under consideration
    4387             :  *  jointype: if not JOIN_SEMI or JOIN_ANTI, we assume it's inner_unique
    4388             :  *  sjinfo: SpecialJoinInfo relevant to this join
    4389             :  *  restrictlist: join quals
    4390             :  * Output parameters:
    4391             :  *  *semifactors is filled in (see pathnodes.h for field definitions)
    4392             :  */
    4393             : void
    4394      119724 : compute_semi_anti_join_factors(PlannerInfo *root,
    4395             :                                RelOptInfo *joinrel,
    4396             :                                RelOptInfo *outerrel,
    4397             :                                RelOptInfo *innerrel,
    4398             :                                JoinType jointype,
    4399             :                                SpecialJoinInfo *sjinfo,
    4400             :                                List *restrictlist,
    4401             :                                SemiAntiJoinFactors *semifactors)
    4402             : {
    4403             :     Selectivity jselec;
    4404             :     Selectivity nselec;
    4405             :     Selectivity avgmatch;
    4406             :     SpecialJoinInfo norm_sjinfo;
    4407             :     List       *joinquals;
    4408             :     ListCell   *l;
    4409             : 
    4410             :     /*
    4411             :      * In an ANTI join, we must ignore clauses that are "pushed down", since
    4412             :      * those won't affect the match logic.  In a SEMI join, we do not
    4413             :      * distinguish joinquals from "pushed down" quals, so just use the whole
    4414             :      * restrictinfo list.  For other outer join types, we should consider only
    4415             :      * non-pushed-down quals, so that this devolves to an IS_OUTER_JOIN check.
    4416             :      */
    4417      119724 :     if (IS_OUTER_JOIN(jointype))
    4418             :     {
    4419       44796 :         joinquals = NIL;
    4420       98062 :         foreach(l, restrictlist)
    4421             :         {
    4422       53266 :             RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
    4423             : 
    4424       53266 :             if (!RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
    4425       51374 :                 joinquals = lappend(joinquals, rinfo);
    4426             :         }
    4427             :     }
    4428             :     else
    4429       74928 :         joinquals = restrictlist;
    4430             : 
    4431             :     /*
    4432             :      * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
    4433             :      */
    4434      119724 :     jselec = clauselist_selectivity(root,
    4435             :                                     joinquals,
    4436             :                                     0,
    4437             :                                     (jointype == JOIN_ANTI) ? JOIN_ANTI : JOIN_SEMI,
    4438             :                                     sjinfo);
    4439             : 
    4440             :     /*
    4441             :      * Also get the normal inner-join selectivity of the join clauses.
    4442             :      */
    4443      119724 :     norm_sjinfo.type = T_SpecialJoinInfo;
    4444      119724 :     norm_sjinfo.min_lefthand = outerrel->relids;
    4445      119724 :     norm_sjinfo.min_righthand = innerrel->relids;
    4446      119724 :     norm_sjinfo.syn_lefthand = outerrel->relids;
    4447      119724 :     norm_sjinfo.syn_righthand = innerrel->relids;
    4448      119724 :     norm_sjinfo.jointype = JOIN_INNER;
    4449             :     /* we don't bother trying to make the remaining fields valid */
    4450      119724 :     norm_sjinfo.lhs_strict = false;
    4451      119724 :     norm_sjinfo.delay_upper_joins = false;
    4452      119724 :     norm_sjinfo.semi_can_btree = false;
    4453      119724 :     norm_sjinfo.semi_can_hash = false;
    4454      119724 :     norm_sjinfo.semi_operators = NIL;
    4455      119724 :     norm_sjinfo.semi_rhs_exprs = NIL;
    4456             : 
    4457      119724 :     nselec = clauselist_selectivity(root,
    4458             :                                     joinquals,
    4459             :                                     0,
    4460             :                                     JOIN_INNER,
    4461             :                                     &norm_sjinfo);
    4462             : 
    4463             :     /* Avoid leaking a lot of ListCells */
    4464      119724 :     if (IS_OUTER_JOIN(jointype))
    4465       44796 :         list_free(joinquals);
    4466             : 
    4467             :     /*
    4468             :      * jselec can be interpreted as the fraction of outer-rel rows that have
    4469             :      * any matches (this is true for both SEMI and ANTI cases).  And nselec is
    4470             :      * the fraction of the Cartesian product that matches.  So, the average
    4471             :      * number of matches for each outer-rel row that has at least one match is
    4472             :      * nselec * inner_rows / jselec.
    4473             :      *
    4474             :      * Note: it is correct to use the inner rel's "rows" count here, even
    4475             :      * though we might later be considering a parameterized inner path with
    4476             :      * fewer rows.  This is because we have included all the join clauses in
    4477             :      * the selectivity estimate.
    4478             :      */
    4479      119724 :     if (jselec > 0)              /* protect against zero divide */
    4480             :     {
    4481      119562 :         avgmatch = nselec * innerrel->rows / jselec;
    4482             :         /* Clamp to sane range */
    4483      119562 :         avgmatch = Max(1.0, avgmatch);
    4484             :     }
    4485             :     else
    4486         162 :         avgmatch = 1.0;
    4487             : 
    4488      119724 :     semifactors->outer_match_frac = jselec;
    4489      119724 :     semifactors->match_count = avgmatch;
    4490      119724 : }
    4491             : 
    4492             : /*
    4493             :  * has_indexed_join_quals
    4494             :  *    Check whether all the joinquals of a nestloop join are used as
    4495             :  *    inner index quals.
    4496             :  *
    4497             :  * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
    4498             :  * indexscan) that uses all the joinquals as indexquals, we can assume that an
    4499             :  * unmatched outer tuple is cheap to process, whereas otherwise it's probably
    4500             :  * expensive.
    4501             :  */
    4502             : static bool
    4503      393504 : has_indexed_join_quals(NestPath *joinpath)
    4504             : {
    4505      393504 :     Relids      joinrelids = joinpath->path.parent->relids;
    4506      393504 :     Path       *innerpath = joinpath->innerjoinpath;
    4507             :     List       *indexclauses;
    4508             :     bool        found_one;
    4509             :     ListCell   *lc;
    4510             : 
    4511             :     /* If join still has quals to evaluate, it's not fast */
    4512      393504 :     if (joinpath->joinrestrictinfo != NIL)
    4513      305528 :         return false;
    4514             :     /* Nor if the inner path isn't parameterized at all */
    4515       87976 :     if (innerpath->param_info == NULL)
    4516        3200 :         return false;
    4517             : 
    4518             :     /* Find the indexclauses list for the inner scan */
    4519       84776 :     switch (innerpath->pathtype)
    4520             :     {
    4521       84160 :         case T_IndexScan:
    4522             :         case T_IndexOnlyScan:
    4523       84160 :             indexclauses = ((IndexPath *) innerpath)->indexclauses;
    4524       84160 :             break;
    4525         164 :         case T_BitmapHeapScan:
    4526             :             {
    4527             :                 /* Accept only a simple bitmap scan, not AND/OR cases */
    4528         164 :                 Path       *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
    4529             : 
    4530         164 :                 if (IsA(bmqual, IndexPath))
    4531         132 :                     indexclauses = ((IndexPath *) bmqual)->indexclauses;
    4532             :                 else
    4533          32 :                     return false;
    4534         132 :                 break;
    4535             :             }
    4536         452 :         default:
    4537             : 
    4538             :             /*
    4539             :              * If it's not a simple indexscan, it probably doesn't run quickly
    4540             :              * for zero rows out, even if it's a parameterized path using all
    4541             :              * the joinquals.
    4542             :              */
    4543         452 :             return false;
    4544             :     }
    4545             : 
    4546             :     /*
    4547             :      * Examine the inner path's param clauses.  Any that are from the outer
    4548             :      * path must be found in the indexclauses list, either exactly or in an
    4549             :      * equivalent form generated by equivclass.c.  Also, we must find at least
    4550             :      * one such clause, else it's a clauseless join which isn't fast.
    4551             :      */
    4552       84292 :     found_one = false;
    4553      169114 :     foreach(lc, innerpath->param_info->ppi_clauses)
    4554             :     {
    4555       85946 :         RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
    4556             : 
    4557       85946 :         if (join_clause_is_movable_into(rinfo,
    4558       85946 :                                         innerpath->parent->relids,
    4559             :                                         joinrelids))
    4560             :         {
    4561       85946 :             if (!is_redundant_with_indexclauses(rinfo, indexclauses))
    4562        1124 :                 return false;
    4563       84822 :             found_one = true;
    4564             :         }
    4565             :     }
    4566       83168 :     return found_one;
    4567             : }
    4568             : 
    4569             : 
    4570             : /*
    4571             :  * approx_tuple_count
    4572             :  *      Quick-and-dirty estimation of the number of join rows passing
    4573             :  *      a set of qual conditions.
    4574             :  *
    4575             :  * The quals can be either an implicitly-ANDed list of boolean expressions,
    4576             :  * or a list of RestrictInfo nodes (typically the latter).
    4577             :  *
    4578             :  * We intentionally compute the selectivity under JOIN_INNER rules, even
    4579             :  * if it's some type of outer join.  This is appropriate because we are
    4580             :  * trying to figure out how many tuples pass the initial merge or hash
    4581             :  * join step.
    4582             :  *
    4583             :  * This is quick-and-dirty because we bypass clauselist_selectivity, and
    4584             :  * simply multiply the independent clause selectivities together.  Now
    4585             :  * clauselist_selectivity often can't do any better than that anyhow, but
    4586             :  * for some situations (such as range constraints) it is smarter.  However,
    4587             :  * we can't effectively cache the results of clauselist_selectivity, whereas
    4588             :  * the individual clause selectivities can be and are cached.
    4589             :  *
    4590             :  * Since we are only using the results to estimate how many potential
    4591             :  * output tuples are generated and passed through qpqual checking, it
    4592             :  * seems OK to live with the approximation.
    4593             :  */
    4594             : static double
    4595      242134 : approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
    4596             : {
    4597             :     double      tuples;
    4598      242134 :     double      outer_tuples = path->outerjoinpath->rows;
    4599      242134 :     double      inner_tuples = path->innerjoinpath->rows;
    4600             :     SpecialJoinInfo sjinfo;
    4601      242134 :     Selectivity selec = 1.0;
    4602             :     ListCell   *l;
    4603             : 
    4604             :     /*
    4605             :      * Make up a SpecialJoinInfo for JOIN_INNER semantics.
    4606             :      */
    4607      242134 :     sjinfo.type = T_SpecialJoinInfo;
    4608      242134 :     sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
    4609      242134 :     sjinfo.min_righthand = path->innerjoinpath->parent->relids;
    4610      242134 :     sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
    4611      242134 :     sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
    4612      242134 :     sjinfo.jointype = JOIN_INNER;
    4613             :     /* we don't bother trying to make the remaining fields valid */
    4614      242134 :     sjinfo.lhs_strict = false;
    4615      242134 :     sjinfo.delay_upper_joins = false;
    4616      242134 :     sjinfo.semi_can_btree = false;
    4617      242134 :     sjinfo.semi_can_hash = false;
    4618      242134 :     sjinfo.semi_operators = NIL;
    4619      242134 :     sjinfo.semi_rhs_exprs = NIL;
    4620             : 
    4621             :     /* Get the approximate selectivity */
    4622      535884 :     foreach(l, quals)
    4623             :     {
    4624      293750 :         Node       *qual = (Node *) lfirst(l);
    4625             : 
    4626             :         /* Note that clause_selectivity will be able to cache its result */
    4627      293750 :         selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
    4628             :     }
    4629             : 
    4630             :     /* Apply it to the input relation sizes */
    4631      242134 :     tuples = selec * outer_tuples * inner_tuples;
    4632             : 
    4633      242134 :     return clamp_row_est(tuples);
    4634             : }
    4635             : 
    4636             : 
    4637             : /*
    4638             :  * set_baserel_size_estimates
    4639             :  *      Set the size estimates for the given base relation.
    4640             :  *
    4641             :  * The rel's targetlist and restrictinfo list must have been constructed
    4642             :  * already, and rel->tuples must be set.
    4643             :  *
    4644             :  * We set the following fields of the rel node:
    4645             :  *  rows: the estimated number of output tuples (after applying
    4646             :  *        restriction clauses).
    4647             :  *  width: the estimated average output tuple width in bytes.
    4648             :  *  baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
    4649             :  */
    4650             : void
    4651      294322 : set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
    4652             : {
    4653             :     double      nrows;
    4654             : 
    4655             :     /* Should only be applied to base relations */
    4656             :     Assert(rel->relid > 0);
    4657             : 
    4658      588644 :     nrows = rel->tuples *
    4659      294322 :         clauselist_selectivity(root,
    4660             :                                rel->baserestrictinfo,
    4661             :                                0,
    4662             :                                JOIN_INNER,
    4663             :                                NULL);
    4664             : 
    4665      294322 :     rel->rows = clamp_row_est(nrows);
    4666             : 
    4667      294322 :     cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
    4668             : 
    4669      294322 :     set_rel_width(root, rel);
    4670      294322 : }
    4671             : 
    4672             : /*
    4673             :  * get_parameterized_baserel_size
    4674             :  *      Make a size estimate for a parameterized scan of a base relation.
    4675             :  *
    4676             :  * 'param_clauses' lists the additional join clauses to be used.
    4677             :  *
    4678             :  * set_baserel_size_estimates must have been applied already.
    4679             :  */
    4680             : double
    4681       72770 : get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel,
    4682             :                                List *param_clauses)
    4683             : {
    4684             :     List       *allclauses;
    4685             :     double      nrows;
    4686             : 
    4687             :     /*
    4688             :      * Estimate the number of rows returned by the parameterized scan, knowing
    4689             :      * that it will apply all the extra join clauses as well as the rel's own
    4690             :      * restriction clauses.  Note that we force the clauses to be treated as
    4691             :      * non-join clauses during selectivity estimation.
    4692             :      */
    4693       72770 :     allclauses = list_concat_copy(param_clauses, rel->baserestrictinfo);
    4694      145540 :     nrows = rel->tuples *
    4695       72770 :         clauselist_selectivity(root,
    4696             :                                allclauses,
    4697       72770 :                                rel->relid,   /* do not use 0! */
    4698             :                                JOIN_INNER,
    4699             :                                NULL);
    4700       72770 :     nrows = clamp_row_est(nrows);
    4701             :     /* For safety, make sure result is not more than the base estimate */
    4702       72770 :     if (nrows > rel->rows)
    4703           0 :         nrows = rel->rows;
    4704       72770 :     return nrows;
    4705             : }
    4706             : 
    4707             : /*
    4708             :  * set_joinrel_size_estimates
    4709             :  *      Set the size estimates for the given join relation.
    4710             :  *
    4711             :  * The rel's targetlist must have been constructed already, and a
    4712             :  * restriction clause list that matches the given component rels must
    4713             :  * be provided.
    4714             :  *
    4715             :  * Since there is more than one way to make a joinrel for more than two
    4716             :  * base relations, the results we get here could depend on which component
    4717             :  * rel pair is provided.  In theory we should get the same answers no matter
    4718             :  * which pair is provided; in practice, since the selectivity estimation
    4719             :  * routines don't handle all cases equally well, we might not.  But there's
    4720             :  * not much to be done about it.  (Would it make sense to repeat the
    4721             :  * calculations for each pair of input rels that's encountered, and somehow
    4722             :  * average the results?  Probably way more trouble than it's worth, and
    4723             :  * anyway we must keep the rowcount estimate the same for all paths for the
    4724             :  * joinrel.)
    4725             :  *
    4726             :  * We set only the rows field here.  The reltarget field was already set by
    4727             :  * build_joinrel_tlist, and baserestrictcost is not used for join rels.
    4728             :  */
    4729             : void
    4730      107104 : set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
    4731             :                            RelOptInfo *outer_rel,
    4732             :                            RelOptInfo *inner_rel,
    4733             :                            SpecialJoinInfo *sjinfo,
    4734             :                            List *restrictlist)
    4735             : {
    4736      107104 :     rel->rows = calc_joinrel_size_estimate(root,
    4737             :                                            rel,
    4738             :                                            outer_rel,
    4739             :                                            inner_rel,
    4740             :                                            outer_rel->rows,
    4741             :                                            inner_rel->rows,
    4742             :                                            sjinfo,
    4743             :                                            restrictlist);
    4744      107104 : }
    4745             : 
    4746             : /*
    4747             :  * get_parameterized_joinrel_size
    4748             :  *      Make a size estimate for a parameterized scan of a join relation.
    4749             :  *
    4750             :  * 'rel' is the joinrel under consideration.
    4751             :  * 'outer_path', 'inner_path' are (probably also parameterized) Paths that
    4752             :  *      produce the relations being joined.
    4753             :  * 'sjinfo' is any SpecialJoinInfo relevant to this join.
    4754             :  * 'restrict_clauses' lists the join clauses that need to be applied at the
    4755             :  * join node (including any movable clauses that were moved down to this join,
    4756             :  * and not including any movable clauses that were pushed down into the
    4757             :  * child paths).
    4758             :  *
    4759             :  * set_joinrel_size_estimates must have been applied already.
    4760             :  */
    4761             : double
    4762        1874 : get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel,
    4763             :                                Path *outer_path,
    4764             :                                Path *inner_path,
    4765             :                                SpecialJoinInfo *sjinfo,
    4766             :                                List *restrict_clauses)
    4767             : {
    4768             :     double      nrows;
    4769             : 
    4770             :     /*
    4771             :      * Estimate the number of rows returned by the parameterized join as the
    4772             :      * sizes of the input paths times the selectivity of the clauses that have
    4773             :      * ended up at this join node.
    4774             :      *
    4775             :      * As with set_joinrel_size_estimates, the rowcount estimate could depend
    4776             :      * on the pair of input paths provided, though ideally we'd get the same
    4777             :      * estimate for any pair with the same parameterization.
    4778             :      */
    4779        1874 :     nrows = calc_joinrel_size_estimate(root,
    4780             :                                        rel,
    4781             :                                        outer_path->parent,
    4782             :                                        inner_path->parent,
    4783             :                                        outer_path->rows,
    4784             :                                        inner_path->rows,
    4785             :                                        sjinfo,
    4786             :                                        restrict_clauses);
    4787             :     /* For safety, make sure result is not more than the base estimate */
    4788        1874 :     if (nrows > rel->rows)
    4789           8 :         nrows = rel->rows;
    4790        1874 :     return nrows;
    4791             : }
    4792             : 
    4793             : /*
    4794             :  * calc_joinrel_size_estimate
    4795             :  *      Workhorse for set_joinrel_size_estimates and
    4796             :  *      get_parameterized_joinrel_size.
    4797             :  *
    4798             :  * outer_rel/inner_rel are the relations being joined, but they should be
    4799             :  * assumed to have sizes outer_rows/inner_rows; those numbers might be less
    4800             :  * than what rel->rows says, when we are considering parameterized paths.
    4801             :  */
    4802             : static double
    4803      108978 : calc_joinrel_size_estimate(PlannerInfo *root,
    4804             :                            RelOptInfo *joinrel,
    4805             :                            RelOptInfo *outer_rel,
    4806             :                            RelOptInfo *inner_rel,
    4807             :                            double outer_rows,
    4808             :                            double inner_rows,
    4809             :                            SpecialJoinInfo *sjinfo,
    4810             :                            List *restrictlist_in)
    4811             : {
    4812             :     /* This apparently-useless variable dodges a compiler bug in VS2013: */
    4813      108978 :     List       *restrictlist = restrictlist_in;
    4814      108978 :     JoinType    jointype = sjinfo->jointype;
    4815             :     Selectivity fkselec;
    4816             :     Selectivity jselec;
    4817             :     Selectivity pselec;
    4818             :     double      nrows;
    4819             : 
    4820             :     /*
    4821             :      * Compute joinclause selectivity.  Note that we are only considering
    4822             :      * clauses that become restriction clauses at this join level; we are not
    4823             :      * double-counting them because they were not considered in estimating the
    4824             :      * sizes of the component rels.
    4825             :      *
    4826             :      * First, see whether any of the joinclauses can be matched to known FK
    4827             :      * constraints.  If so, drop those clauses from the restrictlist, and
    4828             :      * instead estimate their selectivity using FK semantics.  (We do this
    4829             :      * without regard to whether said clauses are local or "pushed down".
    4830             :      * Probably, an FK-matching clause could never be seen as pushed down at
    4831             :      * an outer join, since it would be strict and hence would be grounds for
    4832             :      * join strength reduction.)  fkselec gets the net selectivity for
    4833             :      * FK-matching clauses, or 1.0 if there are none.
    4834             :      */
    4835      108978 :     fkselec = get_foreign_key_join_selectivity(root,
    4836             :                                                outer_rel->relids,
    4837             :                                                inner_rel->relids,
    4838             :                                                sjinfo,
    4839             :                                                &restrictlist);
    4840             : 
    4841             :     /*
    4842             :      * For an outer join, we have to distinguish the selectivity of the join's
    4843             :      * own clauses (JOIN/ON conditions) from any clauses that were "pushed
    4844             :      * down".  For inner joins we just count them all as joinclauses.
    4845             :      */
    4846      108978 :     if (IS_OUTER_JOIN(jointype))
    4847             :     {
    4848       56184 :         List       *joinquals = NIL;
    4849       56184 :         List       *pushedquals = NIL;
    4850             :         ListCell   *l;
    4851             : 
    4852             :         /* Grovel through the clauses to separate into two lists */
    4853      125034 :         foreach(l, restrictlist)
    4854             :         {
    4855       68850 :             RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
    4856             : 
    4857       68850 :             if (RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
    4858        1914 :                 pushedquals = lappend(pushedquals, rinfo);
    4859             :             else
    4860       66936 :                 joinquals = lappend(joinquals, rinfo);
    4861             :         }
    4862             : 
    4863             :         /* Get the separate selectivities */
    4864       56184 :         jselec = clauselist_selectivity(root,
    4865             :                                         joinquals,
    4866             :                                         0,
    4867             :                                         jointype,
    4868             :                                         sjinfo);
    4869       56184 :         pselec = clauselist_selectivity(root,
    4870             :                                         pushedquals,
    4871             :                                         0,
    4872             :                                         jointype,
    4873             :                                         sjinfo);
    4874             : 
    4875             :         /* Avoid leaking a lot of ListCells */
    4876       56184 :         list_free(joinquals);
    4877       56184 :         list_free(pushedquals);
    4878             :     }
    4879             :     else
    4880             :     {
    4881       52794 :         jselec = clauselist_selectivity(root,
    4882             :                                         restrictlist,
    4883             :                                         0,
    4884             :                                         jointype,
    4885             :                                         sjinfo);
    4886       52794 :         pselec = 0.0;           /* not used, keep compiler quiet */
    4887             :     }
    4888             : 
    4889             :     /*
    4890             :      * Basically, we multiply size of Cartesian product by selectivity.
    4891             :      *
    4892             :      * If we are doing an outer join, take that into account: the joinqual
    4893             :      * selectivity has to be clamped using the knowledge that the output must
    4894             :      * be at least as large as the non-nullable input.  However, any
    4895             :      * pushed-down quals are applied after the outer join, so their
    4896             :      * selectivity applies fully.
    4897             :      *
    4898             :      * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
    4899             :      * of LHS rows that have matches, and we apply that straightforwardly.
    4900             :      */
    4901      108978 :     switch (jointype)
    4902             :     {
    4903       50672 :         case JOIN_INNER:
    4904       50672 :             nrows = outer_rows * inner_rows * fkselec * jselec;
    4905             :             /* pselec not used */
    4906       50672 :             break;
    4907       44834 :         case JOIN_LEFT:
    4908       44834 :             nrows = outer_rows * inner_rows * fkselec * jselec;
    4909       44834 :             if (nrows < outer_rows)
    4910       26650 :                 nrows = outer_rows;
    4911       44834 :             nrows *= pselec;
    4912       44834 :             break;
    4913        1038 :         case JOIN_FULL:
    4914        1038 :             nrows = outer_rows * inner_rows * fkselec * jselec;
    4915        1038 :             if (nrows < outer_rows)
    4916         716 :                 nrows = outer_rows;
    4917        1038 :             if (nrows < inner_rows)
    4918          58 :                 nrows = inner_rows;
    4919        1038 :             nrows *= pselec;
    4920        1038 :             break;
    4921        2122 :         case JOIN_SEMI:
    4922        2122 :             nrows = outer_rows * fkselec * jselec;
    4923             :             /* pselec not used */
    4924        2122 :             break;
    4925       10312 :         case JOIN_ANTI:
    4926       10312 :             nrows = outer_rows * (1.0 - fkselec * jselec);
    4927       10312 :             nrows *= pselec;
    4928       10312 :             break;
    4929           0 :         default:
    4930             :             /* other values not expected here */
    4931           0 :             elog(ERROR, "unrecognized join type: %d", (int) jointype);
    4932             :             nrows = 0;          /* keep compiler quiet */
    4933             :             break;
    4934             :     }
    4935             : 
    4936      108978 :     return clamp_row_est(nrows);
    4937             : }
    4938             : 
    4939             : /*
    4940             :  * get_foreign_key_join_selectivity
    4941             :  *      Estimate join selectivity for foreign-key-related clauses.
    4942             :  *
    4943             :  * Remove any clauses that can be matched to FK constraints from *restrictlist,
    4944             :  * and return a substitute estimate of their selectivity.  1.0 is returned
    4945             :  * when there are no such clauses.
    4946             :  *
    4947             :  * The reason for treating such clauses specially is that we can get better
    4948             :  * estimates this way than by relying on clauselist_selectivity(), especially
    4949             :  * for multi-column FKs where that function's assumption that the clauses are
    4950             :  * independent falls down badly.  But even with single-column FKs, we may be
    4951             :  * able to get a better answer when the pg_statistic stats are missing or out
    4952             :  * of date.
    4953             :  */
    4954             : static Selectivity
    4955      108978 : get_foreign_key_join_selectivity(PlannerInfo *root,
    4956             :                                  Relids outer_relids,
    4957             :                                  Relids inner_relids,
    4958             :                                  SpecialJoinInfo *sjinfo,
    4959             :                                  List **restrictlist)
    4960             : {
    4961      108978 :     Selectivity fkselec = 1.0;
    4962      108978 :     JoinType    jointype = sjinfo->jointype;
    4963      108978 :     List       *worklist = *restrictlist;
    4964             :     ListCell   *lc;
    4965             : 
    4966             :     /* Consider each FK constraint that is known to match the query */
    4967      110266 :     foreach(lc, root->fkey_list)
    4968             :     {
    4969        1288 :         ForeignKeyOptInfo *fkinfo = (ForeignKeyOptInfo *) lfirst(lc);
    4970             :         bool        ref_is_outer;
    4971             :         List       *removedlist;
    4972             :         ListCell   *cell;
    4973             : 
    4974             :         /*
    4975             :          * This FK is not relevant unless it connects a baserel on one side of
    4976             :          * this join to a baserel on the other side.
    4977             :          */
    4978        2338 :         if (bms_is_member(fkinfo->con_relid, outer_relids) &&
    4979        1050 :             bms_is_member(fkinfo->ref_relid, inner_relids))
    4980         922 :             ref_is_outer = false;
    4981         604 :         else if (bms_is_member(fkinfo->ref_relid, outer_relids) &&
    4982         238 :                  bms_is_member(fkinfo->con_relid, inner_relids))
    4983          90 :             ref_is_outer = true;
    4984             :         else
    4985         276 :             continue;
    4986             : 
    4987             :         /*
    4988             :          * If we're dealing with a semi/anti join, and the FK's referenced
    4989             :          * relation is on the outside, then knowledge of the FK doesn't help
    4990             :          * us figure out what we need to know (which is the fraction of outer
    4991             :          * rows that have matches).  On the other hand, if the referenced rel
    4992             :          * is on the inside, then all outer rows must have matches in the
    4993             :          * referenced table (ignoring nulls).  But any restriction or join
    4994             :          * clauses that filter that table will reduce the fraction of matches.
    4995             :          * We can account for restriction clauses, but it's too hard to guess
    4996             :          * how many table rows would get through a join that's inside the RHS.
    4997             :          * Hence, if either case applies, punt and ignore the FK.
    4998             :          */
    4999        1012 :         if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
    5000         694 :             (ref_is_outer || bms_membership(inner_relids) != BMS_SINGLETON))
    5001           4 :             continue;
    5002             : 
    5003             :         /*
    5004             :          * Modify the restrictlist by removing clauses that match the FK (and
    5005             :          * putting them into removedlist instead).  It seems unsafe to modify
    5006             :          * the originally-passed List structure, so we make a shallow copy the
    5007             :          * first time through.
    5008             :          */
    5009        1008 :         if (worklist == *restrictlist)
    5010         854 :             worklist = list_copy(worklist);
    5011             : 
    5012        1008 :         removedlist = NIL;
    5013        2064 :         foreach(cell, worklist)
    5014             :         {
    5015        1056 :             RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
    5016        1056 :             bool        remove_it = false;
    5017             :             int         i;
    5018             : 
    5019             :             /* Drop this clause if it matches any column of the FK */
    5020        1282 :             for (i = 0; i < fkinfo->nkeys; i++)
    5021             :             {
    5022        1262 :                 if (rinfo->parent_ec)
    5023             :                 {
    5024             :                     /*
    5025             :                      * EC-derived clauses can only match by EC.  It is okay to
    5026             :                      * consider any clause derived from the same EC as
    5027             :                      * matching the FK: even if equivclass.c chose to generate
    5028             :                      * a clause equating some other pair of Vars, it could
    5029             :                      * have generated one equating the FK's Vars.  So for
    5030             :                      * purposes of estimation, we can act as though it did so.
    5031             :                      *
    5032             :                      * Note: checking parent_ec is a bit of a cheat because
    5033             :                      * there are EC-derived clauses that don't have parent_ec
    5034             :                      * set; but such clauses must compare expressions that
    5035             :                      * aren't just Vars, so they cannot match the FK anyway.
    5036             :                      */
    5037         210 :                     if (fkinfo->eclass[i] == rinfo->parent_ec)
    5038             :                     {
    5039         206 :                         remove_it = true;
    5040         206 :                         break;
    5041             :                     }
    5042             :                 }
    5043             :                 else
    5044             :                 {
    5045             :                     /*
    5046             :                      * Otherwise, see if rinfo was previously matched to FK as
    5047             :                      * a "loose" clause.
    5048             :                      */
    5049        1052 :                     if (list_member_ptr(fkinfo->rinfos[i], rinfo))
    5050             :                     {
    5051         830 :                         remove_it = true;
    5052         830 :                         break;
    5053             :                     }
    5054             :                 }
    5055             :             }
    5056        1056 :             if (remove_it)
    5057             :             {
    5058        1036 :                 worklist = foreach_delete_current(worklist, cell);
    5059        1036 :                 removedlist = lappend(removedlist, rinfo);
    5060             :             }
    5061             :         }
    5062             : 
    5063             :         /*
    5064             :          * If we failed to remove all the matching clauses we expected to
    5065             :          * find, chicken out and ignore this FK; applying its selectivity
    5066             :          * might result in double-counting.  Put any clauses we did manage to
    5067             :          * remove back into the worklist.
    5068             :          *
    5069             :          * Since the matching clauses are known not outerjoin-delayed, they
    5070             :          * would normally have appeared in the initial joinclause list.  If we
    5071             :          * didn't find them, there are two possibilities:
    5072             :          *
    5073             :          * 1. If the FK match is based on an EC that is ec_has_const, it won't
    5074             :          * have generated any join clauses at all.  We discount such ECs while
    5075             :          * checking to see if we have "all" the clauses.  (Below, we'll adjust
    5076             :          * the selectivity estimate for this case.)
    5077             :          *
    5078             :          * 2. The clauses were matched to some other FK in a previous
    5079             :          * iteration of this loop, and thus removed from worklist.  (A likely
    5080             :          * case is that two FKs are matched to the same EC; there will be only
    5081             :          * one EC-derived clause in the initial list, so the first FK will
    5082             :          * consume it.)  Applying both FKs' selectivity independently risks
    5083             :          * underestimating the join size; in particular, this would undo one
    5084             :          * of the main things that ECs were invented for, namely to avoid
    5085             :          * double-counting the selectivity of redundant equality conditions.
    5086             :          * Later we might think of a reasonable way to combine the estimates,
    5087             :          * but for now, just punt, since this is a fairly uncommon situation.
    5088             :          */
    5089        1008 :         if (removedlist == NIL ||
    5090         846 :             list_length(removedlist) !=
    5091         846 :             (fkinfo->nmatched_ec - fkinfo->nconst_ec + fkinfo->nmatched_ri))
    5092             :         {
    5093         162 :             worklist = list_concat(worklist, removedlist);
    5094         162 :             continue;
    5095             :         }
    5096             : 
    5097             :         /*
    5098             :          * Finally we get to the payoff: estimate selectivity using the
    5099             :          * knowledge that each referencing row will match exactly one row in
    5100             :          * the referenced table.
    5101             :          *
    5102             :          * XXX that's not true in the presence of nulls in the referencing
    5103             :          * column(s), so in principle we should derate the estimate for those.
    5104             :          * However (1) if there are any strict restriction clauses for the
    5105             :          * referencing column(s) elsewhere in the query, derating here would
    5106             :          * be double-counting the null fraction, and (2) it's not very clear
    5107             :          * how to combine null fractions for multiple referencing columns. So
    5108             :          * we do nothing for now about correcting for nulls.
    5109             :          *
    5110             :          * XXX another point here is that if either side of an FK constraint
    5111             :          * is an inheritance parent, we estimate as though the constraint
    5112             :          * covers all its children as well.  This is not an unreasonable
    5113             :          * assumption for a referencing table, ie the user probably applied
    5114             :          * identical constraints to all child tables (though perhaps we ought
    5115             :          * to check that).  But it's not possible to have done that for a
    5116             :          * referenced table.  Fortunately, precisely because that doesn't
    5117             :          * work, it is uncommon in practice to have an FK referencing a parent
    5118             :          * table.  So, at least for now, disregard inheritance here.
    5119             :          */
    5120         846 :         if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
    5121         540 :         {
    5122             :             /*
    5123             :              * For JOIN_SEMI and JOIN_ANTI, we only get here when the FK's
    5124             :              * referenced table is exactly the inside of the join.  The join
    5125             :              * selectivity is defined as the fraction of LHS rows that have
    5126             :              * matches.  The FK implies that every LHS row has a match *in the
    5127             :              * referenced table*; but any restriction clauses on it will
    5128             :              * reduce the number of matches.  Hence we take the join
    5129             :              * selectivity as equal to the selectivity of the table's
    5130             :              * restriction clauses, which is rows / tuples; but we must guard
    5131             :              * against tuples == 0.
    5132             :              */
    5133         540 :             RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
    5134         540 :             double      ref_tuples = Max(ref_rel->tuples, 1.0);
    5135             : 
    5136         540 :             fkselec *= ref_rel->rows / ref_tuples;
    5137             :         }
    5138             :         else
    5139             :         {
    5140             :             /*
    5141             :              * Otherwise, selectivity is exactly 1/referenced-table-size; but
    5142             :              * guard against tuples == 0.  Note we should use the raw table
    5143             :              * tuple count, not any estimate of its filtered or joined size.
    5144             :              */
    5145         306 :             RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
    5146         306 :             double      ref_tuples = Max(ref_rel->tuples, 1.0);
    5147             : 
    5148         306 :             fkselec *= 1.0 / ref_tuples;
    5149             :         }
    5150             : 
    5151             :         /*
    5152             :          * If any of the FK columns participated in ec_has_const ECs, then
    5153             :          * equivclass.c will have generated "var = const" restrictions for
    5154             :          * each side of the join, thus reducing the sizes of both input
    5155             :          * relations.  Taking the fkselec at face value would amount to
    5156             :          * double-counting the selectivity of the constant restriction for the
    5157             :          * referencing Var.  Hence, look for the restriction clause(s) that
    5158             :          * were applied to the referencing Var(s), and divide out their
    5159             :          * selectivity to correct for this.
    5160             :          */
    5161         846 :         if (fkinfo->nconst_ec > 0)
    5162             :         {
    5163          16 :             for (int i = 0; i < fkinfo->nkeys; i++)
    5164             :             {
    5165          12 :                 EquivalenceClass *ec = fkinfo->eclass[i];
    5166             : 
    5167          12 :                 if (ec && ec->ec_has_const)
    5168             :                 {
    5169           4 :                     EquivalenceMember *em = fkinfo->fk_eclass_member[i];
    5170           4 :                     RestrictInfo *rinfo = find_derived_clause_for_ec_member(ec,
    5171             :                                                                             em);
    5172             : 
    5173           4 :                     if (rinfo)
    5174             :                     {
    5175             :                         Selectivity s0;
    5176             : 
    5177           4 :                         s0 = clause_selectivity(root,
    5178             :                                                 (Node *) rinfo,
    5179             :                                                 0,
    5180             :                                                 jointype,
    5181             :                                                 sjinfo);
    5182           4 :                         if (s0 > 0)
    5183           4 :                             fkselec /= s0;
    5184             :                     }
    5185             :                 }
    5186             :             }
    5187             :         }
    5188             :     }
    5189             : 
    5190      108978 :     *restrictlist = worklist;
    5191      108978 :     CLAMP_PROBABILITY(fkselec);
    5192      108978 :     return fkselec;
    5193             : }
    5194             : 
    5195             : /*
    5196             :  * set_subquery_size_estimates
    5197             :  *      Set the size estimates for a base relation that is a subquery.
    5198             :  *
    5199             :  * The rel's targetlist and restrictinfo list must have been constructed
    5200             :  * already, and the Paths for the subquery must have been completed.
    5201             :  * We look at the subquery's PlannerInfo to extract data.
    5202             :  *
    5203             :  * We set the same fields as set_baserel_size_estimates.
    5204             :  */
    5205             : void
    5206        9908 : set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
    5207             : {
    5208        9908 :     PlannerInfo *subroot = rel->subroot;
    5209             :     RelOptInfo *sub_final_rel;
    5210             :     ListCell   *lc;
    5211             : 
    5212             :     /* Should only be applied to base relations that are subqueries */
    5213             :     Assert(rel->relid > 0);
    5214             :     Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_SUBQUERY);
    5215             : 
    5216             :     /*
    5217             :      * Copy raw number of output rows from subquery.  All of its paths should
    5218             :      * have the same output rowcount, so just look at cheapest-total.
    5219             :      */
    5220        9908 :     sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
    5221        9908 :     rel->tuples = sub_final_rel->cheapest_total_path->rows;
    5222             : 
    5223             :     /*
    5224             :      * Compute per-output-column width estimates by examining the subquery's
    5225             :      * targetlist.  For any output that is a plain Var, get the width estimate
    5226             :      * that was made while planning the subquery.  Otherwise, we leave it to
    5227             :      * set_rel_width to fill in a datatype-based default estimate.
    5228             :      */
    5229       32926 :     foreach(lc, subroot->parse->targetList)
    5230             :     {
    5231       23018 :         TargetEntry *te = lfirst_node(TargetEntry, lc);
    5232       23018 :         Node       *texpr = (Node *) te->expr;
    5233       23018 :         int32       item_width = 0;
    5234             : 
    5235             :         /* junk columns aren't visible to upper query */
    5236       23018 :         if (te->resjunk)
    5237        1080 :             continue;
    5238             : 
    5239             :         /*
    5240             :          * The subquery could be an expansion of a view that's had columns
    5241             :          * added to it since the current query was parsed, so that there are
    5242             :          * non-junk tlist columns in it that don't correspond to any column
    5243             :          * visible at our query level.  Ignore such columns.
    5244             :          */
    5245       21938 :         if (te->resno < rel->min_attr || te->resno > rel->max_attr)
    5246           0 :             continue;
    5247             : 
    5248             :         /*
    5249             :          * XXX This currently doesn't work for subqueries containing set
    5250             :          * operations, because the Vars in their tlists are bogus references
    5251             :          * to the first leaf subquery, which wouldn't give the right answer
    5252             :          * even if we could still get to its PlannerInfo.
    5253             :          *
    5254             :          * Also, the subquery could be an appendrel for which all branches are
    5255             :          * known empty due to constraint exclusion, in which case
    5256             :          * set_append_rel_pathlist will have left the attr_widths set to zero.
    5257             :          *
    5258             :          * In either case, we just leave the width estimate zero until
    5259             :          * set_rel_width fixes it.
    5260             :          */
    5261       21938 :         if (IsA(texpr, Var) &&
    5262       11082 :             subroot->parse->setOperations == NULL)
    5263             :         {
    5264       10332 :             Var        *var = (Var *) texpr;
    5265       10332 :             RelOptInfo *subrel = find_base_rel(subroot, var->varno);
    5266             : 
    5267       10332 :             item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
    5268             :         }
    5269       21938 :         rel->attr_widths[te->resno - rel->min_attr] = item_width;
    5270             :     }
    5271             : 
    5272             :     /* Now estimate number of output rows, etc */
    5273        9908 :     set_baserel_size_estimates(root, rel);
    5274        9908 : }
    5275             : 
    5276             : /*
    5277             :  * set_function_size_estimates
    5278             :  *      Set the size estimates for a base relation that is a function call.
    5279             :  *
    5280             :  * The rel's targetlist and restrictinfo list must have been constructed
    5281             :  * already.
    5282             :  *
    5283             :  * We set the same fields as set_baserel_size_estimates.
    5284             :  */
    5285             : void
    5286       32350 : set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
    5287             : {
    5288             :     RangeTblEntry *rte;
    5289             :     ListCell   *lc;
    5290             : 
    5291             :     /* Should only be applied to base relations that are functions */
    5292             :     Assert(rel->relid > 0);
    5293       32350 :     rte = planner_rt_fetch(rel->relid, root);
    5294             :     Assert(rte->rtekind == RTE_FUNCTION);
    5295             : 
    5296             :     /*
    5297             :      * Estimate number of rows the functions will return. The rowcount of the
    5298             :      * node is that of the largest function result.
    5299             :      */
    5300       32350 :     rel->tuples = 0;
    5301       64916 :     foreach(lc, rte->functions)
    5302             :     {
    5303       32566 :         RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
    5304       32566 :         double      ntup = expression_returns_set_rows(root, rtfunc->funcexpr);
    5305             : 
    5306       32566 :         if (ntup > rel->tuples)
    5307       32366 :             rel->tuples = ntup;
    5308             :     }
    5309             : 
    5310             :     /* Now estimate number of output rows, etc */
    5311       32350 :     set_baserel_size_estimates(root, rel);
    5312       32350 : }
    5313             : 
    5314             : /*
    5315             :  * set_function_size_estimates
    5316             :  *      Set the size estimates for a base relation that is a function call.
    5317             :  *
    5318             :  * The rel's targetlist and restrictinfo list must have been constructed
    5319             :  * already.
    5320             :  *
    5321             :  * We set the same fields as set_tablefunc_size_estimates.
    5322             :  */
    5323             : void
    5324         144 : set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
    5325             : {
    5326             :     /* Should only be applied to base relations that are functions */
    5327             :     Assert(rel->relid > 0);
    5328             :     Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_TABLEFUNC);
    5329             : 
    5330         144 :     rel->tuples = 100;
    5331             : 
    5332             :     /* Now estimate number of output rows, etc */
    5333         144 :     set_baserel_size_estimates(root, rel);
    5334         144 : }
    5335             : 
    5336             : /*
    5337             :  * set_values_size_estimates
    5338             :  *      Set the size estimates for a base relation that is a values list.
    5339             :  *
    5340             :  * The rel's targetlist and restrictinfo list must have been constructed
    5341             :  * already.
    5342             :  *
    5343             :  * We set the same fields as set_baserel_size_estimates.
    5344             :  */
    5345             : void
    5346        4362 : set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
    5347             : {
    5348             :     RangeTblEntry *rte;
    5349             : 
    5350             :     /* Should only be applied to base relations that are values lists */
    5351             :     Assert(rel->relid > 0);
    5352        4362 :     rte = planner_rt_fetch(rel->relid, root);
    5353             :     Assert(rte->rtekind == RTE_VALUES);
    5354             : 
    5355             :     /*
    5356             :      * Estimate number of rows the values list will return. We know this
    5357             :      * precisely based on the list length (well, barring set-returning
    5358             :      * functions in list items, but that's a refinement not catered for
    5359             :      * anywhere else either).
    5360             :      */
    5361        4362 :     rel->tuples = list_length(rte->values_lists);
    5362             : 
    5363             :     /* Now estimate number of output rows, etc */
    5364        4362 :     set_baserel_size_estimates(root, rel);
    5365        4362 : }
    5366             : 
    5367             : /*
    5368             :  * set_cte_size_estimates
    5369             :  *      Set the size estimates for a base relation that is a CTE reference.
    5370             :  *
    5371             :  * The rel's targetlist and restrictinfo list must have been constructed
    5372             :  * already, and we need an estimate of the number of rows returned by the CTE
    5373             :  * (if a regular CTE) or the non-recursive term (if a self-reference).
    5374             :  *
    5375             :  * We set the same fields as set_baserel_size_estimates.
    5376             :  */
    5377             : void
    5378        1304 : set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
    5379             : {
    5380             :     RangeTblEntry *rte;
    5381             : 
    5382             :     /* Should only be applied to base relations that are CTE references */
    5383             :     Assert(rel->relid > 0);
    5384        1304 :     rte = planner_rt_fetch(rel->relid, root);
    5385             :     Assert(rte->rtekind == RTE_CTE);
    5386             : 
    5387        1304 :     if (rte->self_reference)
    5388             :     {
    5389             :         /*
    5390             :          * In a self-reference, arbitrarily assume the average worktable size
    5391             :          * is about 10 times the nonrecursive term's size.
    5392             :          */
    5393         348 :         rel->tuples = 10 * cte_rows;
    5394             :     }
    5395             :     else
    5396             :     {
    5397             :         /* Otherwise just believe the CTE's rowcount estimate */
    5398         956 :         rel->tuples = cte_rows;
    5399             :     }
    5400             : 
    5401             :     /* Now estimate number of output rows, etc */
    5402        1304 :     set_baserel_size_estimates(root, rel);
    5403        1304 : }
    5404             : 
    5405             : /*
    5406             :  * set_namedtuplestore_size_estimates
    5407             :  *      Set the size estimates for a base relation that is a tuplestore reference.
    5408             :  *
    5409             :  * The rel's targetlist and restrictinfo list must have been constructed
    5410             :  * already.
    5411             :  *
    5412             :  * We set the same fields as set_baserel_size_estimates.
    5413             :  */
    5414             : void
    5415         260 : set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
    5416             : {
    5417             :     RangeTblEntry *rte;
    5418             : 
    5419             :     /* Should only be applied to base relations that are tuplestore references */
    5420             :     Assert(rel->relid > 0);
    5421         260 :     rte = planner_rt_fetch(rel->relid, root);
    5422             :     Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
    5423             : 
    5424             :     /*
    5425             :      * Use the estimate provided by the code which is generating the named
    5426             :      * tuplestore.  In some cases, the actual number might be available; in
    5427             :      * others the same plan will be re-used, so a "typical" value might be
    5428             :      * estimated and used.
    5429             :      */
    5430         260 :     rel->tuples = rte->enrtuples;
    5431         260 :     if (rel->tuples < 0)
    5432           0 :         rel->tuples = 1000;
    5433             : 
    5434             :     /* Now estimate number of output rows, etc */
    5435         260 :     set_baserel_size_estimates(root, rel);
    5436         260 : }
    5437             : 
    5438             : /*
    5439             :  * set_result_size_estimates
    5440             :  *      Set the size estimates for an RTE_RESULT base relation
    5441             :  *
    5442             :  * The rel's targetlist and restrictinfo list must have been constructed
    5443             :  * already.
    5444             :  *
    5445             :  * We set the same fields as set_baserel_size_estimates.
    5446             :  */
    5447             : void
    5448         690 : set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
    5449             : {
    5450             :     /* Should only be applied to RTE_RESULT base relations */
    5451             :     Assert(rel->relid > 0);
    5452             :     Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_RESULT);
    5453             : 
    5454             :     /* RTE_RESULT always generates a single row, natively */
    5455         690 :     rel->tuples = 1;
    5456             : 
    5457             :     /* Now estimate number of output rows, etc */
    5458         690 :     set_baserel_size_estimates(root, rel);
    5459         690 : }
    5460             : 
    5461             : /*
    5462             :  * set_foreign_size_estimates
    5463             :  *      Set the size estimates for a base relation that is a foreign table.
    5464             :  *
    5465             :  * There is not a whole lot that we can do here; the foreign-data wrapper
    5466             :  * is responsible for producing useful estimates.  We can do a decent job
    5467             :  * of estimating baserestrictcost, so we set that, and we also set up width
    5468             :  * using what will be purely datatype-driven estimates from the targetlist.
    5469             :  * There is no way to do anything sane with the rows value, so we just put
    5470             :  * a default estimate and hope that the wrapper can improve on it.  The
    5471             :  * wrapper's GetForeignRelSize function will be called momentarily.
    5472             :  *
    5473             :  * The rel's targetlist and restrictinfo list must have been constructed
    5474             :  * already.
    5475             :  */
    5476             : void
    5477        1826 : set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
    5478             : {
    5479             :     /* Should only be applied to base relations */
    5480             :     Assert(rel->relid > 0);
    5481             : 
    5482        1826 :     rel->rows = 1000;            /* entirely bogus default estimate */
    5483             : 
    5484        1826 :     cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
    5485             : 
    5486        1826 :     set_rel_width(root, rel);
    5487        1826 : }
    5488             : 
    5489             : 
    5490             : /*
    5491             :  * set_rel_width
    5492             :  *      Set the estimated output width of a base relation.
    5493             :  *
    5494             :  * The estimated output width is the sum of the per-attribute width estimates
    5495             :  * for the actually-referenced columns, plus any PHVs or other expressions
    5496             :  * that have to be calculated at this relation.  This is the amount of data
    5497             :  * we'd need to pass upwards in case of a sort, hash, etc.
    5498             :  *
    5499             :  * This function also sets reltarget->cost, so it's a bit misnamed now.
    5500             :  *
    5501             :  * NB: this works best on plain relations because it prefers to look at
    5502             :  * real Vars.  For subqueries, set_subquery_size_estimates will already have
    5503             :  * copied up whatever per-column estimates were made within the subquery,
    5504             :  * and for other types of rels there isn't much we can do anyway.  We fall
    5505             :  * back on (fairly stupid) datatype-based width estimates if we can't get
    5506             :  * any better number.
    5507             :  *
    5508             :  * The per-attribute width estimates are cached for possible re-use while
    5509             :  * building join relations or post-scan/join pathtargets.
    5510             :  */
    5511             : static void
    5512      296148 : set_rel_width(PlannerInfo *root, RelOptInfo *rel)
    5513             : {
    5514      296148 :     Oid         reloid = planner_rt_fetch(rel->relid, root)->relid;
    5515      296148 :     int32       tuple_width = 0;
    5516      296148 :     bool        have_wholerow_var = false;
    5517             :     ListCell   *lc;
    5518             : 
    5519             :     /* Vars are assumed to have cost zero, but other exprs do not */
    5520      296148 :     rel->reltarget->cost.startup = 0;
    5521      296148 :     rel->reltarget->cost.per_tuple = 0;
    5522             : 
    5523     1233786 :     foreach(lc, rel->reltarget->exprs)
    5524             :     {
    5525      937638 :         Node       *node = (Node *) lfirst(lc);
    5526             : 
    5527             :         /*
    5528             :          * Ordinarily, a Var in a rel's targetlist must belong to that rel;
    5529             :          * but there are corner cases involving LATERAL references where that
    5530             :          * isn't so.  If the Var has the wrong varno, fall through to the
    5531             :          * generic case (it doesn't seem worth the trouble to be any smarter).
    5532             :          */
    5533      937638 :         if (IsA(node, Var) &&
    5534      932894 :             ((Var *) node)->varno == rel->relid)
    5535      224840 :         {
    5536      932854 :             Var        *var = (Var *) node;
    5537             :             int         ndx;
    5538             :             int32       item_width;
    5539             : 
    5540             :             Assert(var->varattno >= rel->min_attr);
    5541             :             Assert(var->varattno <= rel->max_attr);
    5542             : 
    5543      932854 :             ndx = var->varattno - rel->min_attr;
    5544             : 
    5545             :             /*
    5546             :              * If it's a whole-row Var, we'll deal with it below after we have
    5547             :              * already cached as many attr widths as possible.
    5548             :              */
    5549      932854 :             if (var->varattno == 0)
    5550             :             {
    5551        1720 :                 have_wholerow_var = true;
    5552        1720 :                 continue;
    5553             :             }
    5554             : 
    5555             :             /*
    5556             :              * The width may have been cached already (especially if it's a
    5557             :              * subquery), so don't duplicate effort.
    5558             :              */
    5559      931134 :             if (rel->attr_widths[ndx] > 0)
    5560             :             {
    5561      180174 :                 tuple_width += rel->attr_widths[ndx];
    5562      180174 :                 continue;
    5563             :             }
    5564             : 
    5565             :             /* Try to get column width from statistics */
    5566      750960 :             if (reloid != InvalidOid && var->varattno > 0)
    5567             :             {
    5568      623116 :                 item_width = get_attavgwidth(reloid, var->varattno);
    5569      623116 :                 if (item_width > 0)
    5570             :                 {
    5571      526120 :                     rel->attr_widths[ndx] = item_width;
    5572      526120 :                     tuple_width += item_width;
    5573      526120 :                     continue;
    5574             :                 }
    5575             :             }
    5576             : 
    5577             :             /*
    5578             :              * Not a plain relation, or can't find statistics for it. Estimate
    5579             :              * using just the type info.
    5580             :              */
    5581      224840 :             item_width = get_typavgwidth(var->vartype, var->vartypmod);
    5582             :             Assert(item_width > 0);
    5583      224840 :             rel->attr_widths[ndx] = item_width;
    5584      224840 :             tuple_width += item_width;
    5585             :         }
    5586        4784 :         else if (IsA(node, PlaceHolderVar))
    5587             :         {
    5588             :             /*
    5589             :              * We will need to evaluate the PHV's contained expression while
    5590             :              * scanning this rel, so be sure to include it in reltarget->cost.
    5591             :              */
    5592         516 :             PlaceHolderVar *phv = (PlaceHolderVar *) node;
    5593         516 :             PlaceHolderInfo *phinfo = find_placeholder_info(root, phv, false);
    5594             :             QualCost    cost;
    5595             : 
    5596         516 :             tuple_width += phinfo->ph_width;
    5597         516 :             cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
    5598         516 :             rel->reltarget->cost.startup += cost.startup;
    5599         516 :             rel->reltarget->cost.per_tuple += cost.per_tuple;
    5600             :         }
    5601             :         else
    5602             :         {
    5603             :             /*
    5604             :              * We could be looking at an expression pulled up from a subquery,
    5605             :              * or a ROW() representing a whole-row child Var, etc.  Do what we
    5606             :              * can using the expression type information.
    5607             :              */
    5608             :             int32       item_width;
    5609             :             QualCost    cost;
    5610             : 
    5611        4268 :             item_width = get_typavgwidth(exprType(node), exprTypmod(node));
    5612             :             Assert(item_width > 0);
    5613        4268 :             tuple_width += item_width;
    5614             :             /* Not entirely clear if we need to account for cost, but do so */
    5615        4268 :             cost_qual_eval_node(&cost, node, root);
    5616        4268 :             rel->reltarget->cost.startup += cost.startup;
    5617        4268 :             rel->reltarget->cost.per_tuple += cost.per_tuple;
    5618             :         }
    5619             :     }
    5620             : 
    5621             :     /*
    5622             :      * If we have a whole-row reference, estimate its width as the sum of
    5623             :      * per-column widths plus heap tuple header overhead.
    5624             :      */
    5625      296148 :     if (have_wholerow_var)
    5626             :     {
    5627        1720 :         int32       wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
    5628             : 
    5629        1720 :         if (reloid != InvalidOid)
    5630             :         {
    5631             :             /* Real relation, so estimate true tuple width */
    5632        1324 :             wholerow_width += get_relation_data_width(reloid,
    5633        1324 :                                                       rel->attr_widths - rel->min_attr);
    5634             :         }
    5635             :         else
    5636             :         {
    5637             :             /* Do what we can with info for a phony rel */
    5638             :             AttrNumber  i;
    5639             : 
    5640        1046 :             for (i = 1; i <= rel->max_attr; i++)
    5641         650 :                 wholerow_width += rel->attr_widths[i - rel->min_attr];
    5642             :         }
    5643             : 
    5644        1720 :         rel->attr_widths[0 - rel->min_attr] = wholerow_width;
    5645             : 
    5646             :         /*
    5647             :          * Include the whole-row Var as part of the output tuple.  Yes, that
    5648             :          * really is what happens at runtime.
    5649             :          */
    5650        1720 :         tuple_width += wholerow_width;
    5651             :     }
    5652             : 
    5653             :     Assert(tuple_width >= 0);
    5654      296148 :     rel->reltarget->width = tuple_width;
    5655      296148 : }
    5656             : 
    5657             : /*
    5658             :  * set_pathtarget_cost_width
    5659             :  *      Set the estimated eval cost and output width of a PathTarget tlist.
    5660             :  *
    5661             :  * As a notational convenience, returns the same PathTarget pointer passed in.
    5662             :  *
    5663             :  * Most, though not quite all, uses of this function occur after we've run
    5664             :  * set_rel_width() for base relations; so we can usually obtain cached width
    5665             :  * estimates for Vars.  If we can't, fall back on datatype-based width
    5666             :  * estimates.  Present early-planning uses of PathTargets don't need accurate
    5667             :  * widths badly enough to justify going to the catalogs for better data.
    5668             :  */
    5669             : PathTarget *
    5670      373608 : set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
    5671             : {
    5672      373608 :     int32       tuple_width = 0;
    5673             :     ListCell   *lc;
    5674             : 
    5675             :     /* Vars are assumed to have cost zero, but other exprs do not */
    5676      373608 :     target->cost.startup = 0;
    5677      373608 :     target->cost.per_tuple = 0;
    5678             : 
    5679     1554856 :     foreach(lc, target->exprs)
    5680             :     {
    5681     1181248 :         Node       *node = (Node *) lfirst(lc);
    5682             : 
    5683     1181248 :         if (IsA(node, Var))
    5684             :         {
    5685      653742 :             Var        *var = (Var *) node;
    5686             :             int32       item_width;
    5687             : 
    5688             :             /* We should not see any upper-level Vars here */
    5689             :             Assert(var->varlevelsup == 0);
    5690             : 
    5691             :             /* Try to get data from RelOptInfo cache */
    5692      653742 :             if (var->varno < root->simple_rel_array_size)
    5693             :             {
    5694      653686 :                 RelOptInfo *rel = root->simple_rel_array[var->varno];
    5695             : 
    5696      653686 :                 if (rel != NULL &&
    5697      649276 :                     var->varattno >= rel->min_attr &&
    5698      649276 :                     var->varattno <= rel->max_attr)
    5699             :                 {
    5700      649276 :                     int         ndx = var->varattno - rel->min_attr;
    5701             : 
    5702      649276 :                     if (rel->attr_widths[ndx] > 0)
    5703             :                     {
    5704      643152 :                         tuple_width += rel->attr_widths[ndx];
    5705      643152 :                         continue;
    5706             :                     }
    5707             :                 }
    5708             :             }
    5709             : 
    5710             :             /*
    5711             :              * No cached data available, so estimate using just the type info.
    5712             :              */
    5713       10590 :             item_width = get_typavgwidth(var->vartype, var->vartypmod);
    5714             :             Assert(item_width > 0);
    5715       10590 :             tuple_width += item_width;
    5716             :         }
    5717             :         else
    5718             :         {
    5719             :             /*
    5720             :              * Handle general expressions using type info.
    5721             :              */
    5722             :             int32       item_width;
    5723             :             QualCost    cost;
    5724             : 
    5725      527506 :             item_width = get_typavgwidth(exprType(node), exprTypmod(node));
    5726             :             Assert(item_width > 0);
    5727      527506 :             tuple_width += item_width;
    5728             : 
    5729             :             /* Account for cost, too */
    5730      527506 :             cost_qual_eval_node(&cost, node, root);
    5731      527506 :             target->cost.startup += cost.startup;
    5732      527506 :             target->cost.per_tuple += cost.per_tuple;
    5733             :         }
    5734             :     }
    5735             : 
    5736             :     Assert(tuple_width >= 0);
    5737      373608 :     target->width = tuple_width;
    5738             : 
    5739      373608 :     return target;
    5740             : }
    5741             : 
    5742             : /*
    5743             :  * relation_byte_size
    5744             :  *    Estimate the storage space in bytes for a given number of tuples
    5745             :  *    of a given width (size in bytes).
    5746             :  */
    5747             : static double
    5748     1690948 : relation_byte_size(double tuples, int width)
    5749             : {
    5750     1690948 :     return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
    5751             : }
    5752             : 
    5753             : /*
    5754             :  * page_size
    5755             :  *    Returns an estimate of the number of pages covered by a given
    5756             :  *    number of tuples of a given width (size in bytes).
    5757             :  */
    5758             : static double
    5759        4144 : page_size(double tuples, int width)
    5760             : {
    5761        4144 :     return ceil(relation_byte_size(tuples, width) / BLCKSZ);
    5762             : }
    5763             : 
    5764             : /*
    5765             :  * Estimate the fraction of the work that each worker will do given the
    5766             :  * number of workers budgeted for the path.
    5767             :  */
    5768             : static double
    5769       79782 : get_parallel_divisor(Path *path)
    5770             : {
    5771       79782 :     double      parallel_divisor = path->parallel_workers;
    5772             : 
    5773             :     /*
    5774             :      * Early experience with parallel query suggests that when there is only
    5775             :      * one worker, the leader often makes a very substantial contribution to
    5776             :      * executing the parallel portion of the plan, but as more workers are
    5777             :      * added, it does less and less, because it's busy reading tuples from the
    5778             :      * workers and doing whatever non-parallel post-processing is needed.  By
    5779             :      * the time we reach 4 workers, the leader no longer makes a meaningful
    5780             :      * contribution.  Thus, for now, estimate that the leader spends 30% of
    5781             :      * its time servicing each worker, and the remainder executing the
    5782             :      * parallel plan.
    5783             :      */
    5784       79782 :     if (parallel_leader_participation)
    5785             :     {
    5786             :         double      leader_contribution;
    5787             : 
    5788       79284 :         leader_contribution = 1.0 - (0.3 * path->parallel_workers);
    5789       79284 :         if (leader_contribution > 0)
    5790       78744 :             parallel_divisor += leader_contribution;
    5791             :     }
    5792             : 
    5793       79782 :     return parallel_divisor;
    5794             : }
    5795             : 
    5796             : /*
    5797             :  * compute_bitmap_pages
    5798             :  *
    5799             :  * compute number of pages fetched from heap in bitmap heap scan.
    5800             :  */
    5801             : double
    5802      382572 : compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual,
    5803             :                      int loop_count, Cost *cost, double *tuple)
    5804             : {
    5805             :     Cost        indexTotalCost;
    5806             :     Selectivity indexSelectivity;
    5807             :     double      T;
    5808             :     double      pages_fetched;
    5809             :     double      tuples_fetched;
    5810             :     double      heap_pages;
    5811             :     long        maxentries;
    5812             : 
    5813             :     /*
    5814             :      * Fetch total cost of obtaining the bitmap, as well as its total
    5815             :      * selectivity.
    5816             :      */
    5817      382572 :     cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
    5818             : 
    5819             :     /*
    5820             :      * Estimate number of main-table pages fetched.
    5821             :      */
    5822      382572 :     tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
    5823             : 
    5824      382572 :     T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
    5825             : 
    5826             :     /*
    5827             :      * For a single scan, the number of heap pages that need to be fetched is
    5828             :      * the same as the Mackert and Lohman formula for the case T <= b (ie, no
    5829             :      * re-reads needed).
    5830             :      */
    5831      382572 :     pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
    5832             : 
    5833             :     /*
    5834             :      * Calculate the number of pages fetched from the heap.  Then based on
    5835             :      * current work_mem estimate get the estimated maxentries in the bitmap.
    5836             :      * (Note that we always do this calculation based on the number of pages
    5837             :      * that would be fetched in a single iteration, even if loop_count > 1.
    5838             :      * That's correct, because only that number of entries will be stored in
    5839             :      * the bitmap at one time.)
    5840             :      */
    5841      382572 :     heap_pages = Min(pages_fetched, baserel->pages);
    5842      382572 :     maxentries = tbm_calculate_entries(work_mem * 1024L);
    5843             : 
    5844      382572 :     if (loop_count > 1)
    5845             :     {
    5846             :         /*
    5847             :          * For repeated bitmap scans, scale up the number of tuples fetched in
    5848             :          * the Mackert and Lohman formula by the number of scans, so that we
    5849             :          * estimate the number of pages fetched by all the scans. Then
    5850             :          * pro-rate for one scan.
    5851             :          */
    5852       67942 :         pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
    5853             :                                             baserel->pages,
    5854             :                                             get_indexpath_pages(bitmapqual),
    5855             :                                             root);
    5856       67942 :         pages_fetched /= loop_count;
    5857             :     }
    5858             : 
    5859      382572 :     if (pages_fetched >= T)
    5860       27162 :         pages_fetched = T;
    5861             :     else
    5862      355410 :         pages_fetched = ceil(pages_fetched);
    5863             : 
    5864      382572 :     if (maxentries < heap_pages)
    5865             :     {
    5866             :         double      exact_pages;
    5867             :         double      lossy_pages;
    5868             : 
    5869             :         /*
    5870             :          * Crude approximation of the number of lossy pages.  Because of the
    5871             :          * way tbm_lossify() is coded, the number of lossy pages increases
    5872             :          * very sharply as soon as we run short of memory; this formula has
    5873             :          * that property and seems to perform adequately in testing, but it's
    5874             :          * possible we could do better somehow.
    5875             :          */
    5876          12 :         lossy_pages = Max(0, heap_pages - maxentries / 2);
    5877          12 :         exact_pages = heap_pages - lossy_pages;
    5878             : 
    5879             :         /*
    5880             :          * If there are lossy pages then recompute the  number of tuples
    5881             :          * processed by the bitmap heap node.  We assume here that the chance
    5882             :          * of a given tuple coming from an exact page is the same as the
    5883             :          * chance that a given page is exact.  This might not be true, but
    5884             :          * it's not clear how we can do any better.
    5885             :          */
    5886          12 :         if (lossy_pages > 0)
    5887             :             tuples_fetched =
    5888          24 :                 clamp_row_est(indexSelectivity *
    5889          24 :                               (exact_pages / heap_pages) * baserel->tuples +
    5890          12 :                               (lossy_pages / heap_pages) * baserel->tuples);
    5891             :     }
    5892             : 
    5893      382572 :     if (cost)
    5894      303152 :         *cost = indexTotalCost;
    5895      382572 :     if (tuple)
    5896      303152 :         *tuple = tuples_fetched;
    5897             : 
    5898      382572 :     return pages_fetched;
    5899             : }

Generated by: LCOV version 1.13