LCOV - code coverage report
Current view: top level - src/backend/optimizer/path - allpaths.c (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 1157 1229 94.1 %
Date: 2025-12-07 05:18:03 Functions: 52 52 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * allpaths.c
       4             :  *    Routines to find possible search paths for processing a query
       5             :  *
       6             :  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/optimizer/path/allpaths.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : 
      16             : #include "postgres.h"
      17             : 
      18             : #include <limits.h>
      19             : #include <math.h>
      20             : 
      21             : #include "access/sysattr.h"
      22             : #include "access/tsmapi.h"
      23             : #include "catalog/pg_class.h"
      24             : #include "catalog/pg_operator.h"
      25             : #include "catalog/pg_proc.h"
      26             : #include "foreign/fdwapi.h"
      27             : #include "miscadmin.h"
      28             : #include "nodes/makefuncs.h"
      29             : #include "nodes/nodeFuncs.h"
      30             : #include "nodes/supportnodes.h"
      31             : #ifdef OPTIMIZER_DEBUG
      32             : #include "nodes/print.h"
      33             : #endif
      34             : #include "optimizer/appendinfo.h"
      35             : #include "optimizer/clauses.h"
      36             : #include "optimizer/cost.h"
      37             : #include "optimizer/geqo.h"
      38             : #include "optimizer/optimizer.h"
      39             : #include "optimizer/pathnode.h"
      40             : #include "optimizer/paths.h"
      41             : #include "optimizer/plancat.h"
      42             : #include "optimizer/planner.h"
      43             : #include "optimizer/prep.h"
      44             : #include "optimizer/tlist.h"
      45             : #include "parser/parse_clause.h"
      46             : #include "parser/parsetree.h"
      47             : #include "partitioning/partbounds.h"
      48             : #include "port/pg_bitutils.h"
      49             : #include "rewrite/rewriteManip.h"
      50             : #include "utils/lsyscache.h"
      51             : #include "utils/selfuncs.h"
      52             : 
      53             : 
      54             : /* Bitmask flags for pushdown_safety_info.unsafeFlags */
      55             : #define UNSAFE_HAS_VOLATILE_FUNC        (1 << 0)
      56             : #define UNSAFE_HAS_SET_FUNC             (1 << 1)
      57             : #define UNSAFE_NOTIN_DISTINCTON_CLAUSE  (1 << 2)
      58             : #define UNSAFE_NOTIN_PARTITIONBY_CLAUSE (1 << 3)
      59             : #define UNSAFE_TYPE_MISMATCH            (1 << 4)
      60             : 
      61             : /* results of subquery_is_pushdown_safe */
      62             : typedef struct pushdown_safety_info
      63             : {
      64             :     unsigned char *unsafeFlags; /* bitmask of reasons why this target list
      65             :                                  * column is unsafe for qual pushdown, or 0 if
      66             :                                  * no reason. */
      67             :     bool        unsafeVolatile; /* don't push down volatile quals */
      68             :     bool        unsafeLeaky;    /* don't push down leaky quals */
      69             : } pushdown_safety_info;
      70             : 
      71             : /* Return type for qual_is_pushdown_safe */
      72             : typedef enum pushdown_safe_type
      73             : {
      74             :     PUSHDOWN_UNSAFE,            /* unsafe to push qual into subquery */
      75             :     PUSHDOWN_SAFE,              /* safe to push qual into subquery */
      76             :     PUSHDOWN_WINDOWCLAUSE_RUNCOND,  /* unsafe, but may work as WindowClause
      77             :                                      * run condition */
      78             : } pushdown_safe_type;
      79             : 
      80             : /* These parameters are set by GUC */
      81             : bool        enable_geqo = false;    /* just in case GUC doesn't set it */
      82             : bool        enable_eager_aggregate = true;
      83             : int         geqo_threshold;
      84             : double      min_eager_agg_group_size;
      85             : int         min_parallel_table_scan_size;
      86             : int         min_parallel_index_scan_size;
      87             : 
      88             : /* Hook for plugins to get control in set_rel_pathlist() */
      89             : set_rel_pathlist_hook_type set_rel_pathlist_hook = NULL;
      90             : 
      91             : /* Hook for plugins to replace standard_join_search() */
      92             : join_search_hook_type join_search_hook = NULL;
      93             : 
      94             : 
      95             : static void set_base_rel_consider_startup(PlannerInfo *root);
      96             : static void set_base_rel_sizes(PlannerInfo *root);
      97             : static void setup_simple_grouped_rels(PlannerInfo *root);
      98             : static void set_base_rel_pathlists(PlannerInfo *root);
      99             : static void set_rel_size(PlannerInfo *root, RelOptInfo *rel,
     100             :                          Index rti, RangeTblEntry *rte);
     101             : static void set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
     102             :                              Index rti, RangeTblEntry *rte);
     103             : static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel,
     104             :                                RangeTblEntry *rte);
     105             : static void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel);
     106             : static void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
     107             :                                       RangeTblEntry *rte);
     108             : static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
     109             :                                    RangeTblEntry *rte);
     110             : static void set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel,
     111             :                                      RangeTblEntry *rte);
     112             : static void set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
     113             :                                          RangeTblEntry *rte);
     114             : static void set_foreign_size(PlannerInfo *root, RelOptInfo *rel,
     115             :                              RangeTblEntry *rte);
     116             : static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel,
     117             :                                  RangeTblEntry *rte);
     118             : static void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
     119             :                                 Index rti, RangeTblEntry *rte);
     120             : static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
     121             :                                     Index rti, RangeTblEntry *rte);
     122             : static void set_grouped_rel_pathlist(PlannerInfo *root, RelOptInfo *rel);
     123             : static void generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel,
     124             :                                          List *live_childrels,
     125             :                                          List *all_child_pathkeys);
     126             : static Path *get_cheapest_parameterized_child_path(PlannerInfo *root,
     127             :                                                    RelOptInfo *rel,
     128             :                                                    Relids required_outer);
     129             : static void accumulate_append_subpath(Path *path,
     130             :                                       List **subpaths,
     131             :                                       List **special_subpaths);
     132             : static Path *get_singleton_append_subpath(Path *path);
     133             : static void set_dummy_rel_pathlist(RelOptInfo *rel);
     134             : static void set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
     135             :                                   Index rti, RangeTblEntry *rte);
     136             : static void set_function_pathlist(PlannerInfo *root, RelOptInfo *rel,
     137             :                                   RangeTblEntry *rte);
     138             : static void set_values_pathlist(PlannerInfo *root, RelOptInfo *rel,
     139             :                                 RangeTblEntry *rte);
     140             : static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel,
     141             :                                    RangeTblEntry *rte);
     142             : static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
     143             :                              RangeTblEntry *rte);
     144             : static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
     145             :                                          RangeTblEntry *rte);
     146             : static void set_result_pathlist(PlannerInfo *root, RelOptInfo *rel,
     147             :                                 RangeTblEntry *rte);
     148             : static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel,
     149             :                                    RangeTblEntry *rte);
     150             : static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
     151             : static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
     152             :                                       pushdown_safety_info *safetyInfo);
     153             : static bool recurse_pushdown_safe(Node *setOp, Query *topquery,
     154             :                                   pushdown_safety_info *safetyInfo);
     155             : static void check_output_expressions(Query *subquery,
     156             :                                      pushdown_safety_info *safetyInfo);
     157             : static void compare_tlist_datatypes(List *tlist, List *colTypes,
     158             :                                     pushdown_safety_info *safetyInfo);
     159             : static bool targetIsInAllPartitionLists(TargetEntry *tle, Query *query);
     160             : static pushdown_safe_type qual_is_pushdown_safe(Query *subquery, Index rti,
     161             :                                                 RestrictInfo *rinfo,
     162             :                                                 pushdown_safety_info *safetyInfo);
     163             : static void subquery_push_qual(Query *subquery,
     164             :                                RangeTblEntry *rte, Index rti, Node *qual);
     165             : static void recurse_push_qual(Node *setOp, Query *topquery,
     166             :                               RangeTblEntry *rte, Index rti, Node *qual);
     167             : static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel,
     168             :                                            Bitmapset *extra_used_attrs);
     169             : 
     170             : 
     171             : /*
     172             :  * make_one_rel
     173             :  *    Finds all possible access paths for executing a query, returning a
     174             :  *    single rel that represents the join of all base rels in the query.
     175             :  */
     176             : RelOptInfo *
     177      326656 : make_one_rel(PlannerInfo *root, List *joinlist)
     178             : {
     179             :     RelOptInfo *rel;
     180             :     Index       rti;
     181             :     double      total_pages;
     182             : 
     183             :     /* Mark base rels as to whether we care about fast-start plans */
     184      326656 :     set_base_rel_consider_startup(root);
     185             : 
     186             :     /*
     187             :      * Compute size estimates and consider_parallel flags for each base rel.
     188             :      */
     189      326656 :     set_base_rel_sizes(root);
     190             : 
     191             :     /*
     192             :      * Build grouped relations for simple rels (i.e., base or "other" member
     193             :      * relations) where possible.
     194             :      */
     195      326622 :     setup_simple_grouped_rels(root);
     196             : 
     197             :     /*
     198             :      * We should now have size estimates for every actual table involved in
     199             :      * the query, and we also know which if any have been deleted from the
     200             :      * query by join removal, pruned by partition pruning, or eliminated by
     201             :      * constraint exclusion.  So we can now compute total_table_pages.
     202             :      *
     203             :      * Note that appendrels are not double-counted here, even though we don't
     204             :      * bother to distinguish RelOptInfos for appendrel parents, because the
     205             :      * parents will have pages = 0.
     206             :      *
     207             :      * XXX if a table is self-joined, we will count it once per appearance,
     208             :      * which perhaps is the wrong thing ... but that's not completely clear,
     209             :      * and detecting self-joins here is difficult, so ignore it for now.
     210             :      */
     211      326622 :     total_pages = 0;
     212     1011982 :     for (rti = 1; rti < root->simple_rel_array_size; rti++)
     213             :     {
     214      685360 :         RelOptInfo *brel = root->simple_rel_array[rti];
     215             : 
     216             :         /* there may be empty slots corresponding to non-baserel RTEs */
     217      685360 :         if (brel == NULL)
     218      162100 :             continue;
     219             : 
     220             :         Assert(brel->relid == rti); /* sanity check on array */
     221             : 
     222      523260 :         if (IS_DUMMY_REL(brel))
     223        1394 :             continue;
     224             : 
     225      521866 :         if (IS_SIMPLE_REL(brel))
     226      521866 :             total_pages += (double) brel->pages;
     227             :     }
     228      326622 :     root->total_table_pages = total_pages;
     229             : 
     230             :     /*
     231             :      * Generate access paths for each base rel.
     232             :      */
     233      326622 :     set_base_rel_pathlists(root);
     234             : 
     235             :     /*
     236             :      * Generate access paths for the entire join tree.
     237             :      */
     238      326622 :     rel = make_rel_from_joinlist(root, joinlist);
     239             : 
     240             :     /*
     241             :      * The result should join all and only the query's base + outer-join rels.
     242             :      */
     243             :     Assert(bms_equal(rel->relids, root->all_query_rels));
     244             : 
     245      326622 :     return rel;
     246             : }
     247             : 
     248             : /*
     249             :  * set_base_rel_consider_startup
     250             :  *    Set the consider_[param_]startup flags for each base-relation entry.
     251             :  *
     252             :  * For the moment, we only deal with consider_param_startup here; because the
     253             :  * logic for consider_startup is pretty trivial and is the same for every base
     254             :  * relation, we just let build_simple_rel() initialize that flag correctly to
     255             :  * start with.  If that logic ever gets more complicated it would probably
     256             :  * be better to move it here.
     257             :  */
     258             : static void
     259      326656 : set_base_rel_consider_startup(PlannerInfo *root)
     260             : {
     261             :     /*
     262             :      * Since parameterized paths can only be used on the inside of a nestloop
     263             :      * join plan, there is usually little value in considering fast-start
     264             :      * plans for them.  However, for relations that are on the RHS of a SEMI
     265             :      * or ANTI join, a fast-start plan can be useful because we're only going
     266             :      * to care about fetching one tuple anyway.
     267             :      *
     268             :      * To minimize growth of planning time, we currently restrict this to
     269             :      * cases where the RHS is a single base relation, not a join; there is no
     270             :      * provision for consider_param_startup to get set at all on joinrels.
     271             :      * Also we don't worry about appendrels.  costsize.c's costing rules for
     272             :      * nestloop semi/antijoins don't consider such cases either.
     273             :      */
     274             :     ListCell   *lc;
     275             : 
     276      369088 :     foreach(lc, root->join_info_list)
     277             :     {
     278       42432 :         SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(lc);
     279             :         int         varno;
     280             : 
     281       50956 :         if ((sjinfo->jointype == JOIN_SEMI || sjinfo->jointype == JOIN_ANTI) &&
     282        8524 :             bms_get_singleton_member(sjinfo->syn_righthand, &varno))
     283             :         {
     284        8324 :             RelOptInfo *rel = find_base_rel(root, varno);
     285             : 
     286        8324 :             rel->consider_param_startup = true;
     287             :         }
     288             :     }
     289      326656 : }
     290             : 
     291             : /*
     292             :  * set_base_rel_sizes
     293             :  *    Set the size estimates (rows and widths) for each base-relation entry.
     294             :  *    Also determine whether to consider parallel paths for base relations.
     295             :  *
     296             :  * We do this in a separate pass over the base rels so that rowcount
     297             :  * estimates are available for parameterized path generation, and also so
     298             :  * that each rel's consider_parallel flag is set correctly before we begin to
     299             :  * generate paths.
     300             :  */
     301             : static void
     302      326656 : set_base_rel_sizes(PlannerInfo *root)
     303             : {
     304             :     Index       rti;
     305             : 
     306     1012018 :     for (rti = 1; rti < root->simple_rel_array_size; rti++)
     307             :     {
     308      685396 :         RelOptInfo *rel = root->simple_rel_array[rti];
     309             :         RangeTblEntry *rte;
     310             : 
     311             :         /* there may be empty slots corresponding to non-baserel RTEs */
     312      685396 :         if (rel == NULL)
     313      162102 :             continue;
     314             : 
     315             :         Assert(rel->relid == rti);   /* sanity check on array */
     316             : 
     317             :         /* ignore RTEs that are "other rels" */
     318      523294 :         if (rel->reloptkind != RELOPT_BASEREL)
     319       58080 :             continue;
     320             : 
     321      465214 :         rte = root->simple_rte_array[rti];
     322             : 
     323             :         /*
     324             :          * If parallelism is allowable for this query in general, see whether
     325             :          * it's allowable for this rel in particular.  We have to do this
     326             :          * before set_rel_size(), because (a) if this rel is an inheritance
     327             :          * parent, set_append_rel_size() will use and perhaps change the rel's
     328             :          * consider_parallel flag, and (b) for some RTE types, set_rel_size()
     329             :          * goes ahead and makes paths immediately.
     330             :          */
     331      465214 :         if (root->glob->parallelModeOK)
     332      371264 :             set_rel_consider_parallel(root, rel, rte);
     333             : 
     334      465214 :         set_rel_size(root, rel, rti, rte);
     335             :     }
     336      326622 : }
     337             : 
     338             : /*
     339             :  * setup_simple_grouped_rels
     340             :  *    For each simple relation, build a grouped simple relation if eager
     341             :  *    aggregation is possible and if this relation can produce grouped paths.
     342             :  */
     343             : static void
     344      326622 : setup_simple_grouped_rels(PlannerInfo *root)
     345             : {
     346             :     Index       rti;
     347             : 
     348             :     /*
     349             :      * If there are no aggregate expressions or grouping expressions, eager
     350             :      * aggregation is not possible.
     351             :      */
     352      326622 :     if (root->agg_clause_list == NIL ||
     353         658 :         root->group_expr_list == NIL)
     354      326030 :         return;
     355             : 
     356        5074 :     for (rti = 1; rti < root->simple_rel_array_size; rti++)
     357             :     {
     358        4482 :         RelOptInfo *rel = root->simple_rel_array[rti];
     359             : 
     360             :         /* there may be empty slots corresponding to non-baserel RTEs */
     361        4482 :         if (rel == NULL)
     362        1414 :             continue;
     363             : 
     364             :         Assert(rel->relid == rti);   /* sanity check on array */
     365             :         Assert(IS_SIMPLE_REL(rel)); /* sanity check on rel */
     366             : 
     367        3068 :         (void) build_simple_grouped_rel(root, rel);
     368             :     }
     369             : }
     370             : 
     371             : /*
     372             :  * set_base_rel_pathlists
     373             :  *    Finds all paths available for scanning each base-relation entry.
     374             :  *    Sequential scan and any available indices are considered.
     375             :  *    Each useful path is attached to its relation's 'pathlist' field.
     376             :  */
     377             : static void
     378      326622 : set_base_rel_pathlists(PlannerInfo *root)
     379             : {
     380             :     Index       rti;
     381             : 
     382     1011982 :     for (rti = 1; rti < root->simple_rel_array_size; rti++)
     383             :     {
     384      685360 :         RelOptInfo *rel = root->simple_rel_array[rti];
     385             : 
     386             :         /* there may be empty slots corresponding to non-baserel RTEs */
     387      685360 :         if (rel == NULL)
     388      162100 :             continue;
     389             : 
     390             :         Assert(rel->relid == rti);   /* sanity check on array */
     391             : 
     392             :         /* ignore RTEs that are "other rels" */
     393      523260 :         if (rel->reloptkind != RELOPT_BASEREL)
     394       58080 :             continue;
     395             : 
     396      465180 :         set_rel_pathlist(root, rel, rti, root->simple_rte_array[rti]);
     397             :     }
     398      326622 : }
     399             : 
     400             : /*
     401             :  * set_rel_size
     402             :  *    Set size estimates for a base relation
     403             :  */
     404             : static void
     405      522984 : set_rel_size(PlannerInfo *root, RelOptInfo *rel,
     406             :              Index rti, RangeTblEntry *rte)
     407             : {
     408      988198 :     if (rel->reloptkind == RELOPT_BASEREL &&
     409      465214 :         relation_excluded_by_constraints(root, rel, rte))
     410             :     {
     411             :         /*
     412             :          * We proved we don't need to scan the rel via constraint exclusion,
     413             :          * so set up a single dummy path for it.  Here we only check this for
     414             :          * regular baserels; if it's an otherrel, CE was already checked in
     415             :          * set_append_rel_size().
     416             :          *
     417             :          * In this case, we go ahead and set up the relation's path right away
     418             :          * instead of leaving it for set_rel_pathlist to do.  This is because
     419             :          * we don't have a convention for marking a rel as dummy except by
     420             :          * assigning a dummy path to it.
     421             :          */
     422         658 :         set_dummy_rel_pathlist(rel);
     423             :     }
     424      522326 :     else if (rte->inh)
     425             :     {
     426             :         /* It's an "append relation", process accordingly */
     427       25692 :         set_append_rel_size(root, rel, rti, rte);
     428             :     }
     429             :     else
     430             :     {
     431      496634 :         switch (rel->rtekind)
     432             :         {
     433      408522 :             case RTE_RELATION:
     434      408522 :                 if (rte->relkind == RELKIND_FOREIGN_TABLE)
     435             :                 {
     436             :                     /* Foreign table */
     437        2462 :                     set_foreign_size(root, rel, rte);
     438             :                 }
     439      406060 :                 else if (rte->relkind == RELKIND_PARTITIONED_TABLE)
     440             :                 {
     441             :                     /*
     442             :                      * We could get here if asked to scan a partitioned table
     443             :                      * with ONLY.  In that case we shouldn't scan any of the
     444             :                      * partitions, so mark it as a dummy rel.
     445             :                      */
     446          40 :                     set_dummy_rel_pathlist(rel);
     447             :                 }
     448      406020 :                 else if (rte->tablesample != NULL)
     449             :                 {
     450             :                     /* Sampled relation */
     451         306 :                     set_tablesample_rel_size(root, rel, rte);
     452             :                 }
     453             :                 else
     454             :                 {
     455             :                     /* Plain relation */
     456      405714 :                     set_plain_rel_size(root, rel, rte);
     457             :                 }
     458      408488 :                 break;
     459       17532 :             case RTE_SUBQUERY:
     460             : 
     461             :                 /*
     462             :                  * Subqueries don't support making a choice between
     463             :                  * parameterized and unparameterized paths, so just go ahead
     464             :                  * and build their paths immediately.
     465             :                  */
     466       17532 :                 set_subquery_pathlist(root, rel, rti, rte);
     467       17532 :                 break;
     468       51798 :             case RTE_FUNCTION:
     469       51798 :                 set_function_size_estimates(root, rel);
     470       51798 :                 break;
     471         626 :             case RTE_TABLEFUNC:
     472         626 :                 set_tablefunc_size_estimates(root, rel);
     473         626 :                 break;
     474        8294 :             case RTE_VALUES:
     475        8294 :                 set_values_size_estimates(root, rel);
     476        8294 :                 break;
     477        5176 :             case RTE_CTE:
     478             : 
     479             :                 /*
     480             :                  * CTEs don't support making a choice between parameterized
     481             :                  * and unparameterized paths, so just go ahead and build their
     482             :                  * paths immediately.
     483             :                  */
     484        5176 :                 if (rte->self_reference)
     485         934 :                     set_worktable_pathlist(root, rel, rte);
     486             :                 else
     487        4242 :                     set_cte_pathlist(root, rel, rte);
     488        5176 :                 break;
     489         478 :             case RTE_NAMEDTUPLESTORE:
     490             :                 /* Might as well just build the path immediately */
     491         478 :                 set_namedtuplestore_pathlist(root, rel, rte);
     492         478 :                 break;
     493        4208 :             case RTE_RESULT:
     494             :                 /* Might as well just build the path immediately */
     495        4208 :                 set_result_pathlist(root, rel, rte);
     496        4208 :                 break;
     497           0 :             default:
     498           0 :                 elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
     499             :                 break;
     500             :         }
     501             :     }
     502             : 
     503             :     /*
     504             :      * We insist that all non-dummy rels have a nonzero rowcount estimate.
     505             :      */
     506             :     Assert(rel->rows > 0 || IS_DUMMY_REL(rel));
     507      522948 : }
     508             : 
     509             : /*
     510             :  * set_rel_pathlist
     511             :  *    Build access paths for a base relation
     512             :  */
     513             : static void
     514      523086 : set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
     515             :                  Index rti, RangeTblEntry *rte)
     516             : {
     517      523086 :     if (IS_DUMMY_REL(rel))
     518             :     {
     519             :         /* We already proved the relation empty, so nothing more to do */
     520             :     }
     521      521830 :     else if (rte->inh)
     522             :     {
     523             :         /* It's an "append relation", process accordingly */
     524       25396 :         set_append_rel_pathlist(root, rel, rti, rte);
     525             :     }
     526             :     else
     527             :     {
     528      496434 :         switch (rel->rtekind)
     529             :         {
     530      408448 :             case RTE_RELATION:
     531      408448 :                 if (rte->relkind == RELKIND_FOREIGN_TABLE)
     532             :                 {
     533             :                     /* Foreign table */
     534        2458 :                     set_foreign_pathlist(root, rel, rte);
     535             :                 }
     536      405990 :                 else if (rte->tablesample != NULL)
     537             :                 {
     538             :                     /* Sampled relation */
     539         306 :                     set_tablesample_rel_pathlist(root, rel, rte);
     540             :                 }
     541             :                 else
     542             :                 {
     543             :                     /* Plain relation */
     544      405684 :                     set_plain_rel_pathlist(root, rel, rte);
     545             :                 }
     546      408448 :                 break;
     547       17406 :             case RTE_SUBQUERY:
     548             :                 /* Subquery --- fully handled during set_rel_size */
     549       17406 :                 break;
     550       51798 :             case RTE_FUNCTION:
     551             :                 /* RangeFunction */
     552       51798 :                 set_function_pathlist(root, rel, rte);
     553       51798 :                 break;
     554         626 :             case RTE_TABLEFUNC:
     555             :                 /* Table Function */
     556         626 :                 set_tablefunc_pathlist(root, rel, rte);
     557         626 :                 break;
     558        8294 :             case RTE_VALUES:
     559             :                 /* Values list */
     560        8294 :                 set_values_pathlist(root, rel, rte);
     561        8294 :                 break;
     562        5176 :             case RTE_CTE:
     563             :                 /* CTE reference --- fully handled during set_rel_size */
     564        5176 :                 break;
     565         478 :             case RTE_NAMEDTUPLESTORE:
     566             :                 /* tuplestore reference --- fully handled during set_rel_size */
     567         478 :                 break;
     568        4208 :             case RTE_RESULT:
     569             :                 /* simple Result --- fully handled during set_rel_size */
     570        4208 :                 break;
     571           0 :             default:
     572           0 :                 elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
     573             :                 break;
     574             :         }
     575             :     }
     576             : 
     577             :     /*
     578             :      * Allow a plugin to editorialize on the set of Paths for this base
     579             :      * relation.  It could add new paths (such as CustomPaths) by calling
     580             :      * add_path(), or add_partial_path() if parallel aware.  It could also
     581             :      * delete or modify paths added by the core code.
     582             :      */
     583      523086 :     if (set_rel_pathlist_hook)
     584           0 :         (*set_rel_pathlist_hook) (root, rel, rti, rte);
     585             : 
     586             :     /*
     587             :      * If this is a baserel, we should normally consider gathering any partial
     588             :      * paths we may have created for it.  We have to do this after calling the
     589             :      * set_rel_pathlist_hook, else it cannot add partial paths to be included
     590             :      * here.
     591             :      *
     592             :      * However, if this is an inheritance child, skip it.  Otherwise, we could
     593             :      * end up with a very large number of gather nodes, each trying to grab
     594             :      * its own pool of workers.  Instead, we'll consider gathering partial
     595             :      * paths for the parent appendrel.
     596             :      *
     597             :      * Also, if this is the topmost scan/join rel, we postpone gathering until
     598             :      * the final scan/join targetlist is available (see grouping_planner).
     599             :      */
     600      523086 :     if (rel->reloptkind == RELOPT_BASEREL &&
     601      465180 :         !bms_equal(rel->relids, root->all_query_rels))
     602      239048 :         generate_useful_gather_paths(root, rel, false);
     603             : 
     604             :     /* Now find the cheapest of the paths for this rel */
     605      523086 :     set_cheapest(rel);
     606             : 
     607             :     /*
     608             :      * If a grouped relation for this rel exists, build partial aggregation
     609             :      * paths for it.
     610             :      *
     611             :      * Note that this can only happen after we've called set_cheapest() for
     612             :      * this base rel, because we need its cheapest paths.
     613             :      */
     614      523086 :     set_grouped_rel_pathlist(root, rel);
     615             : 
     616             : #ifdef OPTIMIZER_DEBUG
     617             :     pprint(rel);
     618             : #endif
     619      523086 : }
     620             : 
     621             : /*
     622             :  * set_plain_rel_size
     623             :  *    Set size estimates for a plain relation (no subquery, no inheritance)
     624             :  */
     625             : static void
     626      405714 : set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
     627             : {
     628             :     /*
     629             :      * Test any partial indexes of rel for applicability.  We must do this
     630             :      * first since partial unique indexes can affect size estimates.
     631             :      */
     632      405714 :     check_index_predicates(root, rel);
     633             : 
     634             :     /* Mark rel with estimated output rows, width, etc */
     635      405714 :     set_baserel_size_estimates(root, rel);
     636      405684 : }
     637             : 
     638             : /*
     639             :  * If this relation could possibly be scanned from within a worker, then set
     640             :  * its consider_parallel flag.
     641             :  */
     642             : static void
     643      414424 : set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
     644             :                           RangeTblEntry *rte)
     645             : {
     646             :     /*
     647             :      * The flag has previously been initialized to false, so we can just
     648             :      * return if it becomes clear that we can't safely set it.
     649             :      */
     650             :     Assert(!rel->consider_parallel);
     651             : 
     652             :     /* Don't call this if parallelism is disallowed for the entire query. */
     653             :     Assert(root->glob->parallelModeOK);
     654             : 
     655             :     /* This should only be called for baserels and appendrel children. */
     656             :     Assert(IS_SIMPLE_REL(rel));
     657             : 
     658             :     /* Assorted checks based on rtekind. */
     659      414424 :     switch (rte->rtekind)
     660             :     {
     661      355202 :         case RTE_RELATION:
     662             : 
     663             :             /*
     664             :              * Currently, parallel workers can't access the leader's temporary
     665             :              * tables.  We could possibly relax this if we wrote all of its
     666             :              * local buffers at the start of the query and made no changes
     667             :              * thereafter (maybe we could allow hint bit changes), and if we
     668             :              * taught the workers to read them.  Writing a large number of
     669             :              * temporary buffers could be expensive, though, and we don't have
     670             :              * the rest of the necessary infrastructure right now anyway.  So
     671             :              * for now, bail out if we see a temporary table.
     672             :              */
     673      355202 :             if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP)
     674        8540 :                 return;
     675             : 
     676             :             /*
     677             :              * Table sampling can be pushed down to workers if the sample
     678             :              * function and its arguments are safe.
     679             :              */
     680      346662 :             if (rte->tablesample != NULL)
     681             :             {
     682         330 :                 char        proparallel = func_parallel(rte->tablesample->tsmhandler);
     683             : 
     684         330 :                 if (proparallel != PROPARALLEL_SAFE)
     685          36 :                     return;
     686         294 :                 if (!is_parallel_safe(root, (Node *) rte->tablesample->args))
     687          12 :                     return;
     688             :             }
     689             : 
     690             :             /*
     691             :              * Ask FDWs whether they can support performing a ForeignScan
     692             :              * within a worker.  Most often, the answer will be no.  For
     693             :              * example, if the nature of the FDW is such that it opens a TCP
     694             :              * connection with a remote server, each parallel worker would end
     695             :              * up with a separate connection, and these connections might not
     696             :              * be appropriately coordinated between workers and the leader.
     697             :              */
     698      346614 :             if (rte->relkind == RELKIND_FOREIGN_TABLE)
     699             :             {
     700             :                 Assert(rel->fdwroutine);
     701        1552 :                 if (!rel->fdwroutine->IsForeignScanParallelSafe)
     702        1480 :                     return;
     703          72 :                 if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte))
     704           0 :                     return;
     705             :             }
     706             : 
     707             :             /*
     708             :              * There are additional considerations for appendrels, which we'll
     709             :              * deal with in set_append_rel_size and set_append_rel_pathlist.
     710             :              * For now, just set consider_parallel based on the rel's own
     711             :              * quals and targetlist.
     712             :              */
     713      345134 :             break;
     714             : 
     715       19278 :         case RTE_SUBQUERY:
     716             : 
     717             :             /*
     718             :              * There's no intrinsic problem with scanning a subquery-in-FROM
     719             :              * (as distinct from a SubPlan or InitPlan) in a parallel worker.
     720             :              * If the subquery doesn't happen to have any parallel-safe paths,
     721             :              * then flagging it as consider_parallel won't change anything,
     722             :              * but that's true for plain tables, too.  We must set
     723             :              * consider_parallel based on the rel's own quals and targetlist,
     724             :              * so that if a subquery path is parallel-safe but the quals and
     725             :              * projection we're sticking onto it are not, we correctly mark
     726             :              * the SubqueryScanPath as not parallel-safe.  (Note that
     727             :              * set_subquery_pathlist() might push some of these quals down
     728             :              * into the subquery itself, but that doesn't change anything.)
     729             :              *
     730             :              * We can't push sub-select containing LIMIT/OFFSET to workers as
     731             :              * there is no guarantee that the row order will be fully
     732             :              * deterministic, and applying LIMIT/OFFSET will lead to
     733             :              * inconsistent results at the top-level.  (In some cases, where
     734             :              * the result is ordered, we could relax this restriction.  But it
     735             :              * doesn't currently seem worth expending extra effort to do so.)
     736             :              */
     737             :             {
     738       19278 :                 Query      *subquery = castNode(Query, rte->subquery);
     739             : 
     740       19278 :                 if (limit_needed(subquery))
     741         508 :                     return;
     742             :             }
     743       18770 :             break;
     744             : 
     745           0 :         case RTE_JOIN:
     746             :             /* Shouldn't happen; we're only considering baserels here. */
     747             :             Assert(false);
     748           0 :             return;
     749             : 
     750       28044 :         case RTE_FUNCTION:
     751             :             /* Check for parallel-restricted functions. */
     752       28044 :             if (!is_parallel_safe(root, (Node *) rte->functions))
     753       12800 :                 return;
     754       15244 :             break;
     755             : 
     756         626 :         case RTE_TABLEFUNC:
     757             :             /* not parallel safe */
     758         626 :             return;
     759             : 
     760        2844 :         case RTE_VALUES:
     761             :             /* Check for parallel-restricted functions. */
     762        2844 :             if (!is_parallel_safe(root, (Node *) rte->values_lists))
     763          12 :                 return;
     764        2832 :             break;
     765             : 
     766        4214 :         case RTE_CTE:
     767             : 
     768             :             /*
     769             :              * CTE tuplestores aren't shared among parallel workers, so we
     770             :              * force all CTE scans to happen in the leader.  Also, populating
     771             :              * the CTE would require executing a subplan that's not available
     772             :              * in the worker, might be parallel-restricted, and must get
     773             :              * executed only once.
     774             :              */
     775        4214 :             return;
     776             : 
     777         450 :         case RTE_NAMEDTUPLESTORE:
     778             : 
     779             :             /*
     780             :              * tuplestore cannot be shared, at least without more
     781             :              * infrastructure to support that.
     782             :              */
     783         450 :             return;
     784             : 
     785        3766 :         case RTE_RESULT:
     786             :             /* RESULT RTEs, in themselves, are no problem. */
     787        3766 :             break;
     788           0 :         case RTE_GROUP:
     789             :             /* Shouldn't happen; we're only considering baserels here. */
     790             :             Assert(false);
     791           0 :             return;
     792             :     }
     793             : 
     794             :     /*
     795             :      * If there's anything in baserestrictinfo that's parallel-restricted, we
     796             :      * give up on parallelizing access to this relation.  We could consider
     797             :      * instead postponing application of the restricted quals until we're
     798             :      * above all the parallelism in the plan tree, but it's not clear that
     799             :      * that would be a win in very many cases, and it might be tricky to make
     800             :      * outer join clauses work correctly.  It would likely break equivalence
     801             :      * classes, too.
     802             :      */
     803      385746 :     if (!is_parallel_safe(root, (Node *) rel->baserestrictinfo))
     804       27162 :         return;
     805             : 
     806             :     /*
     807             :      * Likewise, if the relation's outputs are not parallel-safe, give up.
     808             :      * (Usually, they're just Vars, but sometimes they're not.)
     809             :      */
     810      358584 :     if (!is_parallel_safe(root, (Node *) rel->reltarget->exprs))
     811          18 :         return;
     812             : 
     813             :     /* We have a winner. */
     814      358566 :     rel->consider_parallel = true;
     815             : }
     816             : 
     817             : /*
     818             :  * set_plain_rel_pathlist
     819             :  *    Build access paths for a plain relation (no subquery, no inheritance)
     820             :  */
     821             : static void
     822      405684 : set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
     823             : {
     824             :     Relids      required_outer;
     825             : 
     826             :     /*
     827             :      * We don't support pushing join clauses into the quals of a seqscan, but
     828             :      * it could still have required parameterization due to LATERAL refs in
     829             :      * its tlist.
     830             :      */
     831      405684 :     required_outer = rel->lateral_relids;
     832             : 
     833             :     /*
     834             :      * Consider TID scans.
     835             :      *
     836             :      * If create_tidscan_paths returns true, then a TID scan path is forced.
     837             :      * This happens when rel->baserestrictinfo contains CurrentOfExpr, because
     838             :      * the executor can't handle any other type of path for such queries.
     839             :      * Hence, we return without adding any other paths.
     840             :      */
     841      405684 :     if (create_tidscan_paths(root, rel))
     842         404 :         return;
     843             : 
     844             :     /* Consider sequential scan */
     845      405280 :     add_path(rel, create_seqscan_path(root, rel, required_outer, 0));
     846             : 
     847             :     /* If appropriate, consider parallel sequential scan */
     848      405280 :     if (rel->consider_parallel && required_outer == NULL)
     849      304294 :         create_plain_partial_paths(root, rel);
     850             : 
     851             :     /* Consider index scans */
     852      405280 :     create_index_paths(root, rel);
     853             : }
     854             : 
     855             : /*
     856             :  * create_plain_partial_paths
     857             :  *    Build partial access paths for parallel scan of a plain relation
     858             :  */
     859             : static void
     860      304294 : create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
     861             : {
     862             :     int         parallel_workers;
     863             : 
     864      304294 :     parallel_workers = compute_parallel_worker(rel, rel->pages, -1,
     865             :                                                max_parallel_workers_per_gather);
     866             : 
     867             :     /* If any limit was set to zero, the user doesn't want a parallel scan. */
     868      304294 :     if (parallel_workers <= 0)
     869      276842 :         return;
     870             : 
     871             :     /* Add an unordered partial path based on a parallel sequential scan. */
     872       27452 :     add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
     873             : }
     874             : 
     875             : /*
     876             :  * set_tablesample_rel_size
     877             :  *    Set size estimates for a sampled relation
     878             :  */
     879             : static void
     880         306 : set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
     881             : {
     882         306 :     TableSampleClause *tsc = rte->tablesample;
     883             :     TsmRoutine *tsm;
     884             :     BlockNumber pages;
     885             :     double      tuples;
     886             : 
     887             :     /*
     888             :      * Test any partial indexes of rel for applicability.  We must do this
     889             :      * first since partial unique indexes can affect size estimates.
     890             :      */
     891         306 :     check_index_predicates(root, rel);
     892             : 
     893             :     /*
     894             :      * Call the sampling method's estimation function to estimate the number
     895             :      * of pages it will read and the number of tuples it will return.  (Note:
     896             :      * we assume the function returns sane values.)
     897             :      */
     898         306 :     tsm = GetTsmRoutine(tsc->tsmhandler);
     899         306 :     tsm->SampleScanGetSampleSize(root, rel, tsc->args,
     900             :                                  &pages, &tuples);
     901             : 
     902             :     /*
     903             :      * For the moment, because we will only consider a SampleScan path for the
     904             :      * rel, it's okay to just overwrite the pages and tuples estimates for the
     905             :      * whole relation.  If we ever consider multiple path types for sampled
     906             :      * rels, we'll need more complication.
     907             :      */
     908         306 :     rel->pages = pages;
     909         306 :     rel->tuples = tuples;
     910             : 
     911             :     /* Mark rel with estimated output rows, width, etc */
     912         306 :     set_baserel_size_estimates(root, rel);
     913         306 : }
     914             : 
     915             : /*
     916             :  * set_tablesample_rel_pathlist
     917             :  *    Build access paths for a sampled relation
     918             :  */
     919             : static void
     920         306 : set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
     921             : {
     922             :     Relids      required_outer;
     923             :     Path       *path;
     924             : 
     925             :     /*
     926             :      * We don't support pushing join clauses into the quals of a samplescan,
     927             :      * but it could still have required parameterization due to LATERAL refs
     928             :      * in its tlist or TABLESAMPLE arguments.
     929             :      */
     930         306 :     required_outer = rel->lateral_relids;
     931             : 
     932             :     /* Consider sampled scan */
     933         306 :     path = create_samplescan_path(root, rel, required_outer);
     934             : 
     935             :     /*
     936             :      * If the sampling method does not support repeatable scans, we must avoid
     937             :      * plans that would scan the rel multiple times.  Ideally, we'd simply
     938             :      * avoid putting the rel on the inside of a nestloop join; but adding such
     939             :      * a consideration to the planner seems like a great deal of complication
     940             :      * to support an uncommon usage of second-rate sampling methods.  Instead,
     941             :      * if there is a risk that the query might perform an unsafe join, just
     942             :      * wrap the SampleScan in a Materialize node.  We can check for joins by
     943             :      * counting the membership of all_query_rels (note that this correctly
     944             :      * counts inheritance trees as single rels).  If we're inside a subquery,
     945             :      * we can't easily check whether a join might occur in the outer query, so
     946             :      * just assume one is possible.
     947             :      *
     948             :      * GetTsmRoutine is relatively expensive compared to the other tests here,
     949             :      * so check repeatable_across_scans last, even though that's a bit odd.
     950             :      */
     951         586 :     if ((root->query_level > 1 ||
     952         280 :          bms_membership(root->all_query_rels) != BMS_SINGLETON) &&
     953          98 :         !(GetTsmRoutine(rte->tablesample->tsmhandler)->repeatable_across_scans))
     954             :     {
     955           8 :         path = (Path *) create_material_path(rel, path);
     956             :     }
     957             : 
     958         306 :     add_path(rel, path);
     959             : 
     960             :     /* For the moment, at least, there are no other paths to consider */
     961         306 : }
     962             : 
     963             : /*
     964             :  * set_foreign_size
     965             :  *      Set size estimates for a foreign table RTE
     966             :  */
     967             : static void
     968        2462 : set_foreign_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
     969             : {
     970             :     /* Mark rel with estimated output rows, width, etc */
     971        2462 :     set_foreign_size_estimates(root, rel);
     972             : 
     973             :     /* Let FDW adjust the size estimates, if it can */
     974        2462 :     rel->fdwroutine->GetForeignRelSize(root, rel, rte->relid);
     975             : 
     976             :     /* ... but do not let it set the rows estimate to zero */
     977        2458 :     rel->rows = clamp_row_est(rel->rows);
     978             : 
     979             :     /*
     980             :      * Also, make sure rel->tuples is not insane relative to rel->rows.
     981             :      * Notably, this ensures sanity if pg_class.reltuples contains -1 and the
     982             :      * FDW doesn't do anything to replace that.
     983             :      */
     984        2458 :     rel->tuples = Max(rel->tuples, rel->rows);
     985        2458 : }
     986             : 
     987             : /*
     988             :  * set_foreign_pathlist
     989             :  *      Build access paths for a foreign table RTE
     990             :  */
     991             : static void
     992        2458 : set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
     993             : {
     994             :     /* Call the FDW's GetForeignPaths function to generate path(s) */
     995        2458 :     rel->fdwroutine->GetForeignPaths(root, rel, rte->relid);
     996        2458 : }
     997             : 
     998             : /*
     999             :  * set_append_rel_size
    1000             :  *    Set size estimates for a simple "append relation"
    1001             :  *
    1002             :  * The passed-in rel and RTE represent the entire append relation.  The
    1003             :  * relation's contents are computed by appending together the output of the
    1004             :  * individual member relations.  Note that in the non-partitioned inheritance
    1005             :  * case, the first member relation is actually the same table as is mentioned
    1006             :  * in the parent RTE ... but it has a different RTE and RelOptInfo.  This is
    1007             :  * a good thing because their outputs are not the same size.
    1008             :  */
    1009             : static void
    1010       25692 : set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
    1011             :                     Index rti, RangeTblEntry *rte)
    1012             : {
    1013       25692 :     int         parentRTindex = rti;
    1014             :     bool        has_live_children;
    1015             :     double      parent_tuples;
    1016             :     double      parent_rows;
    1017             :     double      parent_size;
    1018             :     double     *parent_attrsizes;
    1019             :     int         nattrs;
    1020             :     ListCell   *l;
    1021             : 
    1022             :     /* Guard against stack overflow due to overly deep inheritance tree. */
    1023       25692 :     check_stack_depth();
    1024             : 
    1025             :     Assert(IS_SIMPLE_REL(rel));
    1026             : 
    1027             :     /*
    1028             :      * If this is a partitioned baserel, set the consider_partitionwise_join
    1029             :      * flag; currently, we only consider partitionwise joins with the baserel
    1030             :      * if its targetlist doesn't contain a whole-row Var.
    1031             :      */
    1032       25692 :     if (enable_partitionwise_join &&
    1033        4994 :         rel->reloptkind == RELOPT_BASEREL &&
    1034        3974 :         rte->relkind == RELKIND_PARTITIONED_TABLE &&
    1035        3974 :         bms_is_empty(rel->attr_needed[InvalidAttrNumber - rel->min_attr]))
    1036        3898 :         rel->consider_partitionwise_join = true;
    1037             : 
    1038             :     /*
    1039             :      * Initialize to compute size estimates for whole append relation.
    1040             :      *
    1041             :      * We handle tuples estimates by setting "tuples" to the total number of
    1042             :      * tuples accumulated from each live child, rather than using "rows".
    1043             :      * Although an appendrel itself doesn't directly enforce any quals, its
    1044             :      * child relations may.  Therefore, setting "tuples" equal to "rows" for
    1045             :      * an appendrel isn't always appropriate, and can lead to inaccurate cost
    1046             :      * estimates.  For example, when estimating the number of distinct values
    1047             :      * from an appendrel, we would be unable to adjust the estimate based on
    1048             :      * the restriction selectivity (see estimate_num_groups).
    1049             :      *
    1050             :      * We handle width estimates by weighting the widths of different child
    1051             :      * rels proportionally to their number of rows.  This is sensible because
    1052             :      * the use of width estimates is mainly to compute the total relation
    1053             :      * "footprint" if we have to sort or hash it.  To do this, we sum the
    1054             :      * total equivalent size (in "double" arithmetic) and then divide by the
    1055             :      * total rowcount estimate.  This is done separately for the total rel
    1056             :      * width and each attribute.
    1057             :      *
    1058             :      * Note: if you consider changing this logic, beware that child rels could
    1059             :      * have zero rows and/or width, if they were excluded by constraints.
    1060             :      */
    1061       25692 :     has_live_children = false;
    1062       25692 :     parent_tuples = 0;
    1063       25692 :     parent_rows = 0;
    1064       25692 :     parent_size = 0;
    1065       25692 :     nattrs = rel->max_attr - rel->min_attr + 1;
    1066       25692 :     parent_attrsizes = (double *) palloc0(nattrs * sizeof(double));
    1067             : 
    1068      135102 :     foreach(l, root->append_rel_list)
    1069             :     {
    1070      109412 :         AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
    1071             :         int         childRTindex;
    1072             :         RangeTblEntry *childRTE;
    1073             :         RelOptInfo *childrel;
    1074             :         List       *childrinfos;
    1075             :         ListCell   *parentvars;
    1076             :         ListCell   *childvars;
    1077             :         ListCell   *lc;
    1078             : 
    1079             :         /* append_rel_list contains all append rels; ignore others */
    1080      109412 :         if (appinfo->parent_relid != parentRTindex)
    1081       51780 :             continue;
    1082             : 
    1083       57974 :         childRTindex = appinfo->child_relid;
    1084       57974 :         childRTE = root->simple_rte_array[childRTindex];
    1085             : 
    1086             :         /*
    1087             :          * The child rel's RelOptInfo was already created during
    1088             :          * add_other_rels_to_query.
    1089             :          */
    1090       57974 :         childrel = find_base_rel(root, childRTindex);
    1091             :         Assert(childrel->reloptkind == RELOPT_OTHER_MEMBER_REL);
    1092             : 
    1093             :         /* We may have already proven the child to be dummy. */
    1094       57974 :         if (IS_DUMMY_REL(childrel))
    1095          18 :             continue;
    1096             : 
    1097             :         /*
    1098             :          * We have to copy the parent's targetlist and quals to the child,
    1099             :          * with appropriate substitution of variables.  However, the
    1100             :          * baserestrictinfo quals were already copied/substituted when the
    1101             :          * child RelOptInfo was built.  So we don't need any additional setup
    1102             :          * before applying constraint exclusion.
    1103             :          */
    1104       57956 :         if (relation_excluded_by_constraints(root, childrel, childRTE))
    1105             :         {
    1106             :             /*
    1107             :              * This child need not be scanned, so we can omit it from the
    1108             :              * appendrel.
    1109             :              */
    1110         186 :             set_dummy_rel_pathlist(childrel);
    1111         186 :             continue;
    1112             :         }
    1113             : 
    1114             :         /*
    1115             :          * Constraint exclusion failed, so copy the parent's join quals and
    1116             :          * targetlist to the child, with appropriate variable substitutions.
    1117             :          *
    1118             :          * We skip join quals that came from above outer joins that can null
    1119             :          * this rel, since they would be of no value while generating paths
    1120             :          * for the child.  This saves some effort while processing the child
    1121             :          * rel, and it also avoids an implementation restriction in
    1122             :          * adjust_appendrel_attrs (it can't apply nullingrels to a non-Var).
    1123             :          */
    1124       57770 :         childrinfos = NIL;
    1125       70958 :         foreach(lc, rel->joininfo)
    1126             :         {
    1127       13188 :             RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
    1128             : 
    1129       13188 :             if (!bms_overlap(rinfo->clause_relids, rel->nulling_relids))
    1130       10866 :                 childrinfos = lappend(childrinfos,
    1131       10866 :                                       adjust_appendrel_attrs(root,
    1132             :                                                              (Node *) rinfo,
    1133             :                                                              1, &appinfo));
    1134             :         }
    1135       57770 :         childrel->joininfo = childrinfos;
    1136             : 
    1137             :         /*
    1138             :          * Now for the child's targetlist.
    1139             :          *
    1140             :          * NB: the resulting childrel->reltarget->exprs may contain arbitrary
    1141             :          * expressions, which otherwise would not occur in a rel's targetlist.
    1142             :          * Code that might be looking at an appendrel child must cope with
    1143             :          * such.  (Normally, a rel's targetlist would only include Vars and
    1144             :          * PlaceHolderVars.)  XXX we do not bother to update the cost or width
    1145             :          * fields of childrel->reltarget; not clear if that would be useful.
    1146             :          */
    1147      115540 :         childrel->reltarget->exprs = (List *)
    1148       57770 :             adjust_appendrel_attrs(root,
    1149       57770 :                                    (Node *) rel->reltarget->exprs,
    1150             :                                    1, &appinfo);
    1151             : 
    1152             :         /*
    1153             :          * We have to make child entries in the EquivalenceClass data
    1154             :          * structures as well.  This is needed either if the parent
    1155             :          * participates in some eclass joins (because we will want to consider
    1156             :          * inner-indexscan joins on the individual children) or if the parent
    1157             :          * has useful pathkeys (because we should try to build MergeAppend
    1158             :          * paths that produce those sort orderings).
    1159             :          */
    1160       57770 :         if (rel->has_eclass_joins || has_useful_pathkeys(root, rel))
    1161       35370 :             add_child_rel_equivalences(root, appinfo, rel, childrel);
    1162       57770 :         childrel->has_eclass_joins = rel->has_eclass_joins;
    1163             : 
    1164             :         /*
    1165             :          * Note: we could compute appropriate attr_needed data for the child's
    1166             :          * variables, by transforming the parent's attr_needed through the
    1167             :          * translated_vars mapping.  However, currently there's no need
    1168             :          * because attr_needed is only examined for base relations not
    1169             :          * otherrels.  So we just leave the child's attr_needed empty.
    1170             :          */
    1171             : 
    1172             :         /*
    1173             :          * If we consider partitionwise joins with the parent rel, do the same
    1174             :          * for partitioned child rels.
    1175             :          *
    1176             :          * Note: here we abuse the consider_partitionwise_join flag by setting
    1177             :          * it for child rels that are not themselves partitioned.  We do so to
    1178             :          * tell try_partitionwise_join() that the child rel is sufficiently
    1179             :          * valid to be used as a per-partition input, even if it later gets
    1180             :          * proven to be dummy.  (It's not usable until we've set up the
    1181             :          * reltarget and EC entries, which we just did.)
    1182             :          */
    1183       57770 :         if (rel->consider_partitionwise_join)
    1184       13184 :             childrel->consider_partitionwise_join = true;
    1185             : 
    1186             :         /*
    1187             :          * If parallelism is allowable for this query in general, see whether
    1188             :          * it's allowable for this childrel in particular.  But if we've
    1189             :          * already decided the appendrel is not parallel-safe as a whole,
    1190             :          * there's no point in considering parallelism for this child.  For
    1191             :          * consistency, do this before calling set_rel_size() for the child.
    1192             :          */
    1193       57770 :         if (root->glob->parallelModeOK && rel->consider_parallel)
    1194       43160 :             set_rel_consider_parallel(root, childrel, childRTE);
    1195             : 
    1196             :         /*
    1197             :          * Compute the child's size.
    1198             :          */
    1199       57770 :         set_rel_size(root, childrel, childRTindex, childRTE);
    1200             : 
    1201             :         /*
    1202             :          * It is possible that constraint exclusion detected a contradiction
    1203             :          * within a child subquery, even though we didn't prove one above. If
    1204             :          * so, we can skip this child.
    1205             :          */
    1206       57768 :         if (IS_DUMMY_REL(childrel))
    1207         138 :             continue;
    1208             : 
    1209             :         /* We have at least one live child. */
    1210       57630 :         has_live_children = true;
    1211             : 
    1212             :         /*
    1213             :          * If any live child is not parallel-safe, treat the whole appendrel
    1214             :          * as not parallel-safe.  In future we might be able to generate plans
    1215             :          * in which some children are farmed out to workers while others are
    1216             :          * not; but we don't have that today, so it's a waste to consider
    1217             :          * partial paths anywhere in the appendrel unless it's all safe.
    1218             :          * (Child rels visited before this one will be unmarked in
    1219             :          * set_append_rel_pathlist().)
    1220             :          */
    1221       57630 :         if (!childrel->consider_parallel)
    1222       15302 :             rel->consider_parallel = false;
    1223             : 
    1224             :         /*
    1225             :          * Accumulate size information from each live child.
    1226             :          */
    1227             :         Assert(childrel->rows > 0);
    1228             : 
    1229       57630 :         parent_tuples += childrel->tuples;
    1230       57630 :         parent_rows += childrel->rows;
    1231       57630 :         parent_size += childrel->reltarget->width * childrel->rows;
    1232             : 
    1233             :         /*
    1234             :          * Accumulate per-column estimates too.  We need not do anything for
    1235             :          * PlaceHolderVars in the parent list.  If child expression isn't a
    1236             :          * Var, or we didn't record a width estimate for it, we have to fall
    1237             :          * back on a datatype-based estimate.
    1238             :          *
    1239             :          * By construction, child's targetlist is 1-to-1 with parent's.
    1240             :          */
    1241      189472 :         forboth(parentvars, rel->reltarget->exprs,
    1242             :                 childvars, childrel->reltarget->exprs)
    1243             :         {
    1244      131842 :             Var        *parentvar = (Var *) lfirst(parentvars);
    1245      131842 :             Node       *childvar = (Node *) lfirst(childvars);
    1246             : 
    1247      131842 :             if (IsA(parentvar, Var) && parentvar->varno == parentRTindex)
    1248             :             {
    1249      118914 :                 int         pndx = parentvar->varattno - rel->min_attr;
    1250      118914 :                 int32       child_width = 0;
    1251             : 
    1252      118914 :                 if (IsA(childvar, Var) &&
    1253      114134 :                     ((Var *) childvar)->varno == childrel->relid)
    1254             :                 {
    1255      114068 :                     int         cndx = ((Var *) childvar)->varattno - childrel->min_attr;
    1256             : 
    1257      114068 :                     child_width = childrel->attr_widths[cndx];
    1258             :                 }
    1259      118914 :                 if (child_width <= 0)
    1260        4846 :                     child_width = get_typavgwidth(exprType(childvar),
    1261             :                                                   exprTypmod(childvar));
    1262             :                 Assert(child_width > 0);
    1263      118914 :                 parent_attrsizes[pndx] += child_width * childrel->rows;
    1264             :             }
    1265             :         }
    1266             :     }
    1267             : 
    1268       25690 :     if (has_live_children)
    1269             :     {
    1270             :         /*
    1271             :          * Save the finished size estimates.
    1272             :          */
    1273             :         int         i;
    1274             : 
    1275             :         Assert(parent_rows > 0);
    1276       25396 :         rel->tuples = parent_tuples;
    1277       25396 :         rel->rows = parent_rows;
    1278       25396 :         rel->reltarget->width = rint(parent_size / parent_rows);
    1279      236436 :         for (i = 0; i < nattrs; i++)
    1280      211040 :             rel->attr_widths[i] = rint(parent_attrsizes[i] / parent_rows);
    1281             : 
    1282             :         /*
    1283             :          * Note that we leave rel->pages as zero; this is important to avoid
    1284             :          * double-counting the appendrel tree in total_table_pages.
    1285             :          */
    1286             :     }
    1287             :     else
    1288             :     {
    1289             :         /*
    1290             :          * All children were excluded by constraints, so mark the whole
    1291             :          * appendrel dummy.  We must do this in this phase so that the rel's
    1292             :          * dummy-ness is visible when we generate paths for other rels.
    1293             :          */
    1294         294 :         set_dummy_rel_pathlist(rel);
    1295             :     }
    1296             : 
    1297       25690 :     pfree(parent_attrsizes);
    1298       25690 : }
    1299             : 
    1300             : /*
    1301             :  * set_append_rel_pathlist
    1302             :  *    Build access paths for an "append relation"
    1303             :  */
    1304             : static void
    1305       25396 : set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
    1306             :                         Index rti, RangeTblEntry *rte)
    1307             : {
    1308       25396 :     int         parentRTindex = rti;
    1309       25396 :     List       *live_childrels = NIL;
    1310             :     ListCell   *l;
    1311             : 
    1312             :     /*
    1313             :      * Generate access paths for each member relation, and remember the
    1314             :      * non-dummy children.
    1315             :      */
    1316      134392 :     foreach(l, root->append_rel_list)
    1317             :     {
    1318      108996 :         AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
    1319             :         int         childRTindex;
    1320             :         RangeTblEntry *childRTE;
    1321             :         RelOptInfo *childrel;
    1322             : 
    1323             :         /* append_rel_list contains all append rels; ignore others */
    1324      108996 :         if (appinfo->parent_relid != parentRTindex)
    1325       51090 :             continue;
    1326             : 
    1327             :         /* Re-locate the child RTE and RelOptInfo */
    1328       57906 :         childRTindex = appinfo->child_relid;
    1329       57906 :         childRTE = root->simple_rte_array[childRTindex];
    1330       57906 :         childrel = root->simple_rel_array[childRTindex];
    1331             : 
    1332             :         /*
    1333             :          * If set_append_rel_size() decided the parent appendrel was
    1334             :          * parallel-unsafe at some point after visiting this child rel, we
    1335             :          * need to propagate the unsafety marking down to the child, so that
    1336             :          * we don't generate useless partial paths for it.
    1337             :          */
    1338       57906 :         if (!rel->consider_parallel)
    1339       15498 :             childrel->consider_parallel = false;
    1340             : 
    1341             :         /*
    1342             :          * Compute the child's access paths.
    1343             :          */
    1344       57906 :         set_rel_pathlist(root, childrel, childRTindex, childRTE);
    1345             : 
    1346             :         /*
    1347             :          * If child is dummy, ignore it.
    1348             :          */
    1349       57906 :         if (IS_DUMMY_REL(childrel))
    1350         276 :             continue;
    1351             : 
    1352             :         /*
    1353             :          * Child is live, so add it to the live_childrels list for use below.
    1354             :          */
    1355       57630 :         live_childrels = lappend(live_childrels, childrel);
    1356             :     }
    1357             : 
    1358             :     /* Add paths to the append relation. */
    1359       25396 :     add_paths_to_append_rel(root, rel, live_childrels);
    1360       25396 : }
    1361             : 
    1362             : /*
    1363             :  * set_grouped_rel_pathlist
    1364             :  *    If a grouped relation for the given 'rel' exists, build partial
    1365             :  *    aggregation paths for it.
    1366             :  */
    1367             : static void
    1368      523086 : set_grouped_rel_pathlist(PlannerInfo *root, RelOptInfo *rel)
    1369             : {
    1370             :     RelOptInfo *grouped_rel;
    1371             : 
    1372             :     /*
    1373             :      * If there are no aggregate expressions or grouping expressions, eager
    1374             :      * aggregation is not possible.
    1375             :      */
    1376      523086 :     if (root->agg_clause_list == NIL ||
    1377        3332 :         root->group_expr_list == NIL)
    1378      520018 :         return;
    1379             : 
    1380             :     /* Add paths to the grouped base relation if one exists. */
    1381        3068 :     grouped_rel = rel->grouped_rel;
    1382        3068 :     if (grouped_rel)
    1383             :     {
    1384             :         Assert(IS_GROUPED_REL(grouped_rel));
    1385             : 
    1386         586 :         generate_grouped_paths(root, grouped_rel, rel);
    1387         586 :         set_cheapest(grouped_rel);
    1388             :     }
    1389             : }
    1390             : 
    1391             : 
    1392             : /*
    1393             :  * add_paths_to_append_rel
    1394             :  *      Generate paths for the given append relation given the set of non-dummy
    1395             :  *      child rels.
    1396             :  *
    1397             :  * The function collects all parameterizations and orderings supported by the
    1398             :  * non-dummy children. For every such parameterization or ordering, it creates
    1399             :  * an append path collecting one path from each non-dummy child with given
    1400             :  * parameterization or ordering. Similarly it collects partial paths from
    1401             :  * non-dummy children to create partial append paths.
    1402             :  */
    1403             : void
    1404       46520 : add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
    1405             :                         List *live_childrels)
    1406             : {
    1407       46520 :     List       *subpaths = NIL;
    1408       46520 :     bool        subpaths_valid = true;
    1409       46520 :     List       *startup_subpaths = NIL;
    1410       46520 :     bool        startup_subpaths_valid = true;
    1411       46520 :     List       *partial_subpaths = NIL;
    1412       46520 :     List       *pa_partial_subpaths = NIL;
    1413       46520 :     List       *pa_nonpartial_subpaths = NIL;
    1414       46520 :     bool        partial_subpaths_valid = true;
    1415             :     bool        pa_subpaths_valid;
    1416       46520 :     List       *all_child_pathkeys = NIL;
    1417       46520 :     List       *all_child_outers = NIL;
    1418             :     ListCell   *l;
    1419       46520 :     double      partial_rows = -1;
    1420             : 
    1421             :     /* If appropriate, consider parallel append */
    1422       46520 :     pa_subpaths_valid = enable_parallel_append && rel->consider_parallel;
    1423             : 
    1424             :     /*
    1425             :      * For every non-dummy child, remember the cheapest path.  Also, identify
    1426             :      * all pathkeys (orderings) and parameterizations (required_outer sets)
    1427             :      * available for the non-dummy member relations.
    1428             :      */
    1429      148878 :     foreach(l, live_childrels)
    1430             :     {
    1431      102358 :         RelOptInfo *childrel = lfirst(l);
    1432             :         ListCell   *lcp;
    1433      102358 :         Path       *cheapest_partial_path = NULL;
    1434             : 
    1435             :         /*
    1436             :          * If child has an unparameterized cheapest-total path, add that to
    1437             :          * the unparameterized Append path we are constructing for the parent.
    1438             :          * If not, there's no workable unparameterized path.
    1439             :          *
    1440             :          * With partitionwise aggregates, the child rel's pathlist may be
    1441             :          * empty, so don't assume that a path exists here.
    1442             :          */
    1443      102358 :         if (childrel->pathlist != NIL &&
    1444      102358 :             childrel->cheapest_total_path->param_info == NULL)
    1445      101626 :             accumulate_append_subpath(childrel->cheapest_total_path,
    1446             :                                       &subpaths, NULL);
    1447             :         else
    1448         732 :             subpaths_valid = false;
    1449             : 
    1450             :         /*
    1451             :          * When the planner is considering cheap startup plans, we'll also
    1452             :          * collect all the cheapest_startup_paths (if set) and build an
    1453             :          * AppendPath containing those as subpaths.
    1454             :          */
    1455      102358 :         if (rel->consider_startup && childrel->cheapest_startup_path != NULL)
    1456        1730 :         {
    1457             :             Path       *cheapest_path;
    1458             : 
    1459             :             /*
    1460             :              * With an indication of how many tuples the query should provide,
    1461             :              * the optimizer tries to choose the path optimal for that
    1462             :              * specific number of tuples.
    1463             :              */
    1464        1730 :             if (root->tuple_fraction > 0.0)
    1465             :                 cheapest_path =
    1466        1730 :                     get_cheapest_fractional_path(childrel,
    1467             :                                                  root->tuple_fraction);
    1468             :             else
    1469           0 :                 cheapest_path = childrel->cheapest_startup_path;
    1470             : 
    1471             :             /* cheapest_startup_path must not be a parameterized path. */
    1472             :             Assert(cheapest_path->param_info == NULL);
    1473        1730 :             accumulate_append_subpath(cheapest_path,
    1474             :                                       &startup_subpaths,
    1475             :                                       NULL);
    1476             :         }
    1477             :         else
    1478      100628 :             startup_subpaths_valid = false;
    1479             : 
    1480             : 
    1481             :         /* Same idea, but for a partial plan. */
    1482      102358 :         if (childrel->partial_pathlist != NIL)
    1483             :         {
    1484       63282 :             cheapest_partial_path = linitial(childrel->partial_pathlist);
    1485       63282 :             accumulate_append_subpath(cheapest_partial_path,
    1486             :                                       &partial_subpaths, NULL);
    1487             :         }
    1488             :         else
    1489       39076 :             partial_subpaths_valid = false;
    1490             : 
    1491             :         /*
    1492             :          * Same idea, but for a parallel append mixing partial and non-partial
    1493             :          * paths.
    1494             :          */
    1495      102358 :         if (pa_subpaths_valid)
    1496             :         {
    1497       77110 :             Path       *nppath = NULL;
    1498             : 
    1499             :             nppath =
    1500       77110 :                 get_cheapest_parallel_safe_total_inner(childrel->pathlist);
    1501             : 
    1502       77110 :             if (cheapest_partial_path == NULL && nppath == NULL)
    1503             :             {
    1504             :                 /* Neither a partial nor a parallel-safe path?  Forget it. */
    1505         546 :                 pa_subpaths_valid = false;
    1506             :             }
    1507       76564 :             else if (nppath == NULL ||
    1508       62832 :                      (cheapest_partial_path != NULL &&
    1509       62832 :                       cheapest_partial_path->total_cost < nppath->total_cost))
    1510             :             {
    1511             :                 /* Partial path is cheaper or the only option. */
    1512             :                 Assert(cheapest_partial_path != NULL);
    1513       62692 :                 accumulate_append_subpath(cheapest_partial_path,
    1514             :                                           &pa_partial_subpaths,
    1515             :                                           &pa_nonpartial_subpaths);
    1516             :             }
    1517             :             else
    1518             :             {
    1519             :                 /*
    1520             :                  * Either we've got only a non-partial path, or we think that
    1521             :                  * a single backend can execute the best non-partial path
    1522             :                  * faster than all the parallel backends working together can
    1523             :                  * execute the best partial path.
    1524             :                  *
    1525             :                  * It might make sense to be more aggressive here.  Even if
    1526             :                  * the best non-partial path is more expensive than the best
    1527             :                  * partial path, it could still be better to choose the
    1528             :                  * non-partial path if there are several such paths that can
    1529             :                  * be given to different workers.  For now, we don't try to
    1530             :                  * figure that out.
    1531             :                  */
    1532       13872 :                 accumulate_append_subpath(nppath,
    1533             :                                           &pa_nonpartial_subpaths,
    1534             :                                           NULL);
    1535             :             }
    1536             :         }
    1537             : 
    1538             :         /*
    1539             :          * Collect lists of all the available path orderings and
    1540             :          * parameterizations for all the children.  We use these as a
    1541             :          * heuristic to indicate which sort orderings and parameterizations we
    1542             :          * should build Append and MergeAppend paths for.
    1543             :          */
    1544      241916 :         foreach(lcp, childrel->pathlist)
    1545             :         {
    1546      139558 :             Path       *childpath = (Path *) lfirst(lcp);
    1547      139558 :             List       *childkeys = childpath->pathkeys;
    1548      139558 :             Relids      childouter = PATH_REQ_OUTER(childpath);
    1549             : 
    1550             :             /* Unsorted paths don't contribute to pathkey list */
    1551      139558 :             if (childkeys != NIL)
    1552             :             {
    1553             :                 ListCell   *lpk;
    1554       36854 :                 bool        found = false;
    1555             : 
    1556             :                 /* Have we already seen this ordering? */
    1557       37084 :                 foreach(lpk, all_child_pathkeys)
    1558             :                 {
    1559       24916 :                     List       *existing_pathkeys = (List *) lfirst(lpk);
    1560             : 
    1561       24916 :                     if (compare_pathkeys(existing_pathkeys,
    1562             :                                          childkeys) == PATHKEYS_EQUAL)
    1563             :                     {
    1564       24686 :                         found = true;
    1565       24686 :                         break;
    1566             :                     }
    1567             :                 }
    1568       36854 :                 if (!found)
    1569             :                 {
    1570             :                     /* No, so add it to all_child_pathkeys */
    1571       12168 :                     all_child_pathkeys = lappend(all_child_pathkeys,
    1572             :                                                  childkeys);
    1573             :                 }
    1574             :             }
    1575             : 
    1576             :             /* Unparameterized paths don't contribute to param-set list */
    1577      139558 :             if (childouter)
    1578             :             {
    1579             :                 ListCell   *lco;
    1580        6596 :                 bool        found = false;
    1581             : 
    1582             :                 /* Have we already seen this param set? */
    1583        7316 :                 foreach(lco, all_child_outers)
    1584             :                 {
    1585        4810 :                     Relids      existing_outers = (Relids) lfirst(lco);
    1586             : 
    1587        4810 :                     if (bms_equal(existing_outers, childouter))
    1588             :                     {
    1589        4090 :                         found = true;
    1590        4090 :                         break;
    1591             :                     }
    1592             :                 }
    1593        6596 :                 if (!found)
    1594             :                 {
    1595             :                     /* No, so add it to all_child_outers */
    1596        2506 :                     all_child_outers = lappend(all_child_outers,
    1597             :                                                childouter);
    1598             :                 }
    1599             :             }
    1600             :         }
    1601             :     }
    1602             : 
    1603             :     /*
    1604             :      * If we found unparameterized paths for all children, build an unordered,
    1605             :      * unparameterized Append path for the rel.  (Note: this is correct even
    1606             :      * if we have zero or one live subpath due to constraint exclusion.)
    1607             :      */
    1608       46520 :     if (subpaths_valid)
    1609       46208 :         add_path(rel, (Path *) create_append_path(root, rel, subpaths, NIL,
    1610             :                                                   NIL, NULL, 0, false,
    1611             :                                                   -1));
    1612             : 
    1613             :     /* build an AppendPath for the cheap startup paths, if valid */
    1614       46520 :     if (startup_subpaths_valid)
    1615         700 :         add_path(rel, (Path *) create_append_path(root, rel, startup_subpaths,
    1616             :                                                   NIL, NIL, NULL, 0, false, -1));
    1617             : 
    1618             :     /*
    1619             :      * Consider an append of unordered, unparameterized partial paths.  Make
    1620             :      * it parallel-aware if possible.
    1621             :      */
    1622       46520 :     if (partial_subpaths_valid && partial_subpaths != NIL)
    1623             :     {
    1624             :         AppendPath *appendpath;
    1625             :         ListCell   *lc;
    1626       27738 :         int         parallel_workers = 0;
    1627             : 
    1628             :         /* Find the highest number of workers requested for any subpath. */
    1629       95326 :         foreach(lc, partial_subpaths)
    1630             :         {
    1631       67588 :             Path       *path = lfirst(lc);
    1632             : 
    1633       67588 :             parallel_workers = Max(parallel_workers, path->parallel_workers);
    1634             :         }
    1635             :         Assert(parallel_workers > 0);
    1636             : 
    1637             :         /*
    1638             :          * If the use of parallel append is permitted, always request at least
    1639             :          * log2(# of children) workers.  We assume it can be useful to have
    1640             :          * extra workers in this case because they will be spread out across
    1641             :          * the children.  The precise formula is just a guess, but we don't
    1642             :          * want to end up with a radically different answer for a table with N
    1643             :          * partitions vs. an unpartitioned table with the same data, so the
    1644             :          * use of some kind of log-scaling here seems to make some sense.
    1645             :          */
    1646       27738 :         if (enable_parallel_append)
    1647             :         {
    1648       27690 :             parallel_workers = Max(parallel_workers,
    1649             :                                    pg_leftmost_one_pos32(list_length(live_childrels)) + 1);
    1650       27690 :             parallel_workers = Min(parallel_workers,
    1651             :                                    max_parallel_workers_per_gather);
    1652             :         }
    1653             :         Assert(parallel_workers > 0);
    1654             : 
    1655             :         /* Generate a partial append path. */
    1656       27738 :         appendpath = create_append_path(root, rel, NIL, partial_subpaths,
    1657             :                                         NIL, NULL, parallel_workers,
    1658             :                                         enable_parallel_append,
    1659             :                                         -1);
    1660             : 
    1661             :         /*
    1662             :          * Make sure any subsequent partial paths use the same row count
    1663             :          * estimate.
    1664             :          */
    1665       27738 :         partial_rows = appendpath->path.rows;
    1666             : 
    1667             :         /* Add the path. */
    1668       27738 :         add_partial_path(rel, (Path *) appendpath);
    1669             :     }
    1670             : 
    1671             :     /*
    1672             :      * Consider a parallel-aware append using a mix of partial and non-partial
    1673             :      * paths.  (This only makes sense if there's at least one child which has
    1674             :      * a non-partial path that is substantially cheaper than any partial path;
    1675             :      * otherwise, we should use the append path added in the previous step.)
    1676             :      */
    1677       46520 :     if (pa_subpaths_valid && pa_nonpartial_subpaths != NIL)
    1678             :     {
    1679             :         AppendPath *appendpath;
    1680             :         ListCell   *lc;
    1681        4966 :         int         parallel_workers = 0;
    1682             : 
    1683             :         /*
    1684             :          * Find the highest number of workers requested for any partial
    1685             :          * subpath.
    1686             :          */
    1687        5868 :         foreach(lc, pa_partial_subpaths)
    1688             :         {
    1689         902 :             Path       *path = lfirst(lc);
    1690             : 
    1691         902 :             parallel_workers = Max(parallel_workers, path->parallel_workers);
    1692             :         }
    1693             : 
    1694             :         /*
    1695             :          * Same formula here as above.  It's even more important in this
    1696             :          * instance because the non-partial paths won't contribute anything to
    1697             :          * the planned number of parallel workers.
    1698             :          */
    1699        4966 :         parallel_workers = Max(parallel_workers,
    1700             :                                pg_leftmost_one_pos32(list_length(live_childrels)) + 1);
    1701        4966 :         parallel_workers = Min(parallel_workers,
    1702             :                                max_parallel_workers_per_gather);
    1703             :         Assert(parallel_workers > 0);
    1704             : 
    1705        4966 :         appendpath = create_append_path(root, rel, pa_nonpartial_subpaths,
    1706             :                                         pa_partial_subpaths,
    1707             :                                         NIL, NULL, parallel_workers, true,
    1708             :                                         partial_rows);
    1709        4966 :         add_partial_path(rel, (Path *) appendpath);
    1710             :     }
    1711             : 
    1712             :     /*
    1713             :      * Also build unparameterized ordered append paths based on the collected
    1714             :      * list of child pathkeys.
    1715             :      */
    1716       46520 :     if (subpaths_valid)
    1717       46208 :         generate_orderedappend_paths(root, rel, live_childrels,
    1718             :                                      all_child_pathkeys);
    1719             : 
    1720             :     /*
    1721             :      * Build Append paths for each parameterization seen among the child rels.
    1722             :      * (This may look pretty expensive, but in most cases of practical
    1723             :      * interest, the child rels will expose mostly the same parameterizations,
    1724             :      * so that not that many cases actually get considered here.)
    1725             :      *
    1726             :      * The Append node itself cannot enforce quals, so all qual checking must
    1727             :      * be done in the child paths.  This means that to have a parameterized
    1728             :      * Append path, we must have the exact same parameterization for each
    1729             :      * child path; otherwise some children might be failing to check the
    1730             :      * moved-down quals.  To make them match up, we can try to increase the
    1731             :      * parameterization of lesser-parameterized paths.
    1732             :      */
    1733       49026 :     foreach(l, all_child_outers)
    1734             :     {
    1735        2506 :         Relids      required_outer = (Relids) lfirst(l);
    1736             :         ListCell   *lcr;
    1737             : 
    1738             :         /* Select the child paths for an Append with this parameterization */
    1739        2506 :         subpaths = NIL;
    1740        2506 :         subpaths_valid = true;
    1741        9198 :         foreach(lcr, live_childrels)
    1742             :         {
    1743        6704 :             RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
    1744             :             Path       *subpath;
    1745             : 
    1746        6704 :             if (childrel->pathlist == NIL)
    1747             :             {
    1748             :                 /* failed to make a suitable path for this child */
    1749           0 :                 subpaths_valid = false;
    1750           0 :                 break;
    1751             :             }
    1752             : 
    1753        6704 :             subpath = get_cheapest_parameterized_child_path(root,
    1754             :                                                             childrel,
    1755             :                                                             required_outer);
    1756        6704 :             if (subpath == NULL)
    1757             :             {
    1758             :                 /* failed to make a suitable path for this child */
    1759          12 :                 subpaths_valid = false;
    1760          12 :                 break;
    1761             :             }
    1762        6692 :             accumulate_append_subpath(subpath, &subpaths, NULL);
    1763             :         }
    1764             : 
    1765        2506 :         if (subpaths_valid)
    1766        2494 :             add_path(rel, (Path *)
    1767        2494 :                      create_append_path(root, rel, subpaths, NIL,
    1768             :                                         NIL, required_outer, 0, false,
    1769             :                                         -1));
    1770             :     }
    1771             : 
    1772             :     /*
    1773             :      * When there is only a single child relation, the Append path can inherit
    1774             :      * any ordering available for the child rel's path, so that it's useful to
    1775             :      * consider ordered partial paths.  Above we only considered the cheapest
    1776             :      * partial path for each child, but let's also make paths using any
    1777             :      * partial paths that have pathkeys.
    1778             :      */
    1779       46520 :     if (list_length(live_childrels) == 1)
    1780             :     {
    1781       14380 :         RelOptInfo *childrel = (RelOptInfo *) linitial(live_childrels);
    1782             : 
    1783             :         /* skip the cheapest partial path, since we already used that above */
    1784       14584 :         for_each_from(l, childrel->partial_pathlist, 1)
    1785             :         {
    1786         204 :             Path       *path = (Path *) lfirst(l);
    1787             :             AppendPath *appendpath;
    1788             : 
    1789             :             /* skip paths with no pathkeys. */
    1790         204 :             if (path->pathkeys == NIL)
    1791           0 :                 continue;
    1792             : 
    1793         204 :             appendpath = create_append_path(root, rel, NIL, list_make1(path),
    1794             :                                             NIL, NULL,
    1795             :                                             path->parallel_workers, true,
    1796             :                                             partial_rows);
    1797         204 :             add_partial_path(rel, (Path *) appendpath);
    1798             :         }
    1799             :     }
    1800       46520 : }
    1801             : 
    1802             : /*
    1803             :  * generate_orderedappend_paths
    1804             :  *      Generate ordered append paths for an append relation
    1805             :  *
    1806             :  * Usually we generate MergeAppend paths here, but there are some special
    1807             :  * cases where we can generate simple Append paths, because the subpaths
    1808             :  * can provide tuples in the required order already.
    1809             :  *
    1810             :  * We generate a path for each ordering (pathkey list) appearing in
    1811             :  * all_child_pathkeys.
    1812             :  *
    1813             :  * We consider the cheapest-startup and cheapest-total cases, and also the
    1814             :  * cheapest-fractional case when not all tuples need to be retrieved.  For each
    1815             :  * interesting ordering, we collect all the cheapest startup subpaths, all the
    1816             :  * cheapest total paths, and, if applicable, all the cheapest fractional paths,
    1817             :  * and build a suitable path for each case.
    1818             :  *
    1819             :  * We don't currently generate any parameterized ordered paths here.  While
    1820             :  * it would not take much more code here to do so, it's very unclear that it
    1821             :  * is worth the planning cycles to investigate such paths: there's little
    1822             :  * use for an ordered path on the inside of a nestloop.  In fact, it's likely
    1823             :  * that the current coding of add_path would reject such paths out of hand,
    1824             :  * because add_path gives no credit for sort ordering of parameterized paths,
    1825             :  * and a parameterized MergeAppend is going to be more expensive than the
    1826             :  * corresponding parameterized Append path.  If we ever try harder to support
    1827             :  * parameterized mergejoin plans, it might be worth adding support for
    1828             :  * parameterized paths here to feed such joins.  (See notes in
    1829             :  * optimizer/README for why that might not ever happen, though.)
    1830             :  */
    1831             : static void
    1832       46208 : generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel,
    1833             :                              List *live_childrels,
    1834             :                              List *all_child_pathkeys)
    1835             : {
    1836             :     ListCell   *lcp;
    1837       46208 :     List       *partition_pathkeys = NIL;
    1838       46208 :     List       *partition_pathkeys_desc = NIL;
    1839       46208 :     bool        partition_pathkeys_partial = true;
    1840       46208 :     bool        partition_pathkeys_desc_partial = true;
    1841             : 
    1842             :     /*
    1843             :      * Some partitioned table setups may allow us to use an Append node
    1844             :      * instead of a MergeAppend.  This is possible in cases such as RANGE
    1845             :      * partitioned tables where it's guaranteed that an earlier partition must
    1846             :      * contain rows which come earlier in the sort order.  To detect whether
    1847             :      * this is relevant, build pathkey descriptions of the partition ordering,
    1848             :      * for both forward and reverse scans.
    1849             :      */
    1850       74594 :     if (rel->part_scheme != NULL && IS_SIMPLE_REL(rel) &&
    1851       28386 :         partitions_are_ordered(rel->boundinfo, rel->live_parts))
    1852             :     {
    1853       23814 :         partition_pathkeys = build_partition_pathkeys(root, rel,
    1854             :                                                       ForwardScanDirection,
    1855             :                                                       &partition_pathkeys_partial);
    1856             : 
    1857       23814 :         partition_pathkeys_desc = build_partition_pathkeys(root, rel,
    1858             :                                                            BackwardScanDirection,
    1859             :                                                            &partition_pathkeys_desc_partial);
    1860             : 
    1861             :         /*
    1862             :          * You might think we should truncate_useless_pathkeys here, but
    1863             :          * allowing partition keys which are a subset of the query's pathkeys
    1864             :          * can often be useful.  For example, consider a table partitioned by
    1865             :          * RANGE (a, b), and a query with ORDER BY a, b, c.  If we have child
    1866             :          * paths that can produce the a, b, c ordering (perhaps via indexes on
    1867             :          * (a, b, c)) then it works to consider the appendrel output as
    1868             :          * ordered by a, b, c.
    1869             :          */
    1870             :     }
    1871             : 
    1872             :     /* Now consider each interesting sort ordering */
    1873       58316 :     foreach(lcp, all_child_pathkeys)
    1874             :     {
    1875       12108 :         List       *pathkeys = (List *) lfirst(lcp);
    1876       12108 :         List       *startup_subpaths = NIL;
    1877       12108 :         List       *total_subpaths = NIL;
    1878       12108 :         List       *fractional_subpaths = NIL;
    1879       12108 :         bool        startup_neq_total = false;
    1880       12108 :         bool        fraction_neq_total = false;
    1881             :         bool        match_partition_order;
    1882             :         bool        match_partition_order_desc;
    1883             :         int         end_index;
    1884             :         int         first_index;
    1885             :         int         direction;
    1886             : 
    1887             :         /*
    1888             :          * Determine if this sort ordering matches any partition pathkeys we
    1889             :          * have, for both ascending and descending partition order.  If the
    1890             :          * partition pathkeys happen to be contained in pathkeys then it still
    1891             :          * works, as described above, providing that the partition pathkeys
    1892             :          * are complete and not just a prefix of the partition keys.  (In such
    1893             :          * cases we'll be relying on the child paths to have sorted the
    1894             :          * lower-order columns of the required pathkeys.)
    1895             :          */
    1896       12108 :         match_partition_order =
    1897       21948 :             pathkeys_contained_in(pathkeys, partition_pathkeys) ||
    1898       10048 :             (!partition_pathkeys_partial &&
    1899         208 :              pathkeys_contained_in(partition_pathkeys, pathkeys));
    1900             : 
    1901       31536 :         match_partition_order_desc = !match_partition_order &&
    1902        9732 :             (pathkeys_contained_in(pathkeys, partition_pathkeys_desc) ||
    1903        9760 :              (!partition_pathkeys_desc_partial &&
    1904          64 :               pathkeys_contained_in(partition_pathkeys_desc, pathkeys)));
    1905             : 
    1906             :         /*
    1907             :          * When the required pathkeys match the reverse of the partition
    1908             :          * order, we must build the list of paths in reverse starting with the
    1909             :          * last matching partition first.  We can get away without making any
    1910             :          * special cases for this in the loop below by just looping backward
    1911             :          * over the child relations in this case.
    1912             :          */
    1913       12108 :         if (match_partition_order_desc)
    1914             :         {
    1915             :             /* loop backward */
    1916          48 :             first_index = list_length(live_childrels) - 1;
    1917          48 :             end_index = -1;
    1918          48 :             direction = -1;
    1919             : 
    1920             :             /*
    1921             :              * Set this to true to save us having to check for
    1922             :              * match_partition_order_desc in the loop below.
    1923             :              */
    1924          48 :             match_partition_order = true;
    1925             :         }
    1926             :         else
    1927             :         {
    1928             :             /* for all other case, loop forward */
    1929       12060 :             first_index = 0;
    1930       12060 :             end_index = list_length(live_childrels);
    1931       12060 :             direction = 1;
    1932             :         }
    1933             : 
    1934             :         /* Select the child paths for this ordering... */
    1935       43482 :         for (int i = first_index; i != end_index; i += direction)
    1936             :         {
    1937       31374 :             RelOptInfo *childrel = list_nth_node(RelOptInfo, live_childrels, i);
    1938             :             Path       *cheapest_startup,
    1939             :                        *cheapest_total,
    1940       31374 :                        *cheapest_fractional = NULL;
    1941             : 
    1942             :             /* Locate the right paths, if they are available. */
    1943             :             cheapest_startup =
    1944       31374 :                 get_cheapest_path_for_pathkeys(childrel->pathlist,
    1945             :                                                pathkeys,
    1946             :                                                NULL,
    1947             :                                                STARTUP_COST,
    1948             :                                                false);
    1949             :             cheapest_total =
    1950       31374 :                 get_cheapest_path_for_pathkeys(childrel->pathlist,
    1951             :                                                pathkeys,
    1952             :                                                NULL,
    1953             :                                                TOTAL_COST,
    1954             :                                                false);
    1955             : 
    1956             :             /*
    1957             :              * If we can't find any paths with the right order just use the
    1958             :              * cheapest-total path; we'll have to sort it later.
    1959             :              */
    1960       31374 :             if (cheapest_startup == NULL || cheapest_total == NULL)
    1961             :             {
    1962         340 :                 cheapest_startup = cheapest_total =
    1963             :                     childrel->cheapest_total_path;
    1964             :                 /* Assert we do have an unparameterized path for this child */
    1965             :                 Assert(cheapest_total->param_info == NULL);
    1966             :             }
    1967             : 
    1968             :             /*
    1969             :              * When building a fractional path, determine a cheapest
    1970             :              * fractional path for each child relation too. Looking at startup
    1971             :              * and total costs is not enough, because the cheapest fractional
    1972             :              * path may be dominated by two separate paths (one for startup,
    1973             :              * one for total).
    1974             :              *
    1975             :              * When needed (building fractional path), determine the cheapest
    1976             :              * fractional path too.
    1977             :              */
    1978       31374 :             if (root->tuple_fraction > 0)
    1979             :             {
    1980         896 :                 double      path_fraction = root->tuple_fraction;
    1981             : 
    1982             :                 /*
    1983             :                  * We should not have a dummy child relation here.  However,
    1984             :                  * we cannot use childrel->rows to compute the tuple fraction,
    1985             :                  * as childrel can be an upper relation with an unset row
    1986             :                  * estimate.  Instead, we use the row estimate from the
    1987             :                  * cheapest_total path, which should already have been forced
    1988             :                  * to a sane value.
    1989             :                  */
    1990             :                 Assert(cheapest_total->rows > 0);
    1991             : 
    1992             :                 /* Convert absolute limit to a path fraction */
    1993         896 :                 if (path_fraction >= 1.0)
    1994         896 :                     path_fraction /= cheapest_total->rows;
    1995             : 
    1996             :                 cheapest_fractional =
    1997         896 :                     get_cheapest_fractional_path_for_pathkeys(childrel->pathlist,
    1998             :                                                               pathkeys,
    1999             :                                                               NULL,
    2000             :                                                               path_fraction);
    2001             : 
    2002             :                 /*
    2003             :                  * If we found no path with matching pathkeys, use the
    2004             :                  * cheapest total path instead.
    2005             :                  *
    2006             :                  * XXX We might consider partially sorted paths too (with an
    2007             :                  * incremental sort on top). But we'd have to build all the
    2008             :                  * incremental paths, do the costing etc.
    2009             :                  *
    2010             :                  * Also, notice whether we actually have different paths for
    2011             :                  * the "fractional" and "total" cases.  This helps avoid
    2012             :                  * generating two identical ordered append paths.
    2013             :                  */
    2014         896 :                 if (cheapest_fractional == NULL)
    2015          44 :                     cheapest_fractional = cheapest_total;
    2016         852 :                 else if (cheapest_fractional != cheapest_total)
    2017           0 :                     fraction_neq_total = true;
    2018             :             }
    2019             : 
    2020             :             /*
    2021             :              * Notice whether we actually have different paths for the
    2022             :              * "cheapest" and "total" cases.  This helps avoid generating two
    2023             :              * identical ordered append paths.
    2024             :              */
    2025       31374 :             if (cheapest_startup != cheapest_total)
    2026          96 :                 startup_neq_total = true;
    2027             : 
    2028             :             /*
    2029             :              * Collect the appropriate child paths.  The required logic varies
    2030             :              * for the Append and MergeAppend cases.
    2031             :              */
    2032       31374 :             if (match_partition_order)
    2033             :             {
    2034             :                 /*
    2035             :                  * We're going to make a plain Append path.  We don't need
    2036             :                  * most of what accumulate_append_subpath would do, but we do
    2037             :                  * want to cut out child Appends or MergeAppends if they have
    2038             :                  * just a single subpath (and hence aren't doing anything
    2039             :                  * useful).
    2040             :                  */
    2041        6476 :                 cheapest_startup = get_singleton_append_subpath(cheapest_startup);
    2042        6476 :                 cheapest_total = get_singleton_append_subpath(cheapest_total);
    2043             : 
    2044        6476 :                 startup_subpaths = lappend(startup_subpaths, cheapest_startup);
    2045        6476 :                 total_subpaths = lappend(total_subpaths, cheapest_total);
    2046             : 
    2047        6476 :                 if (cheapest_fractional)
    2048             :                 {
    2049         144 :                     cheapest_fractional = get_singleton_append_subpath(cheapest_fractional);
    2050         144 :                     fractional_subpaths = lappend(fractional_subpaths, cheapest_fractional);
    2051             :                 }
    2052             :             }
    2053             :             else
    2054             :             {
    2055             :                 /*
    2056             :                  * Otherwise, rely on accumulate_append_subpath to collect the
    2057             :                  * child paths for the MergeAppend.
    2058             :                  */
    2059       24898 :                 accumulate_append_subpath(cheapest_startup,
    2060             :                                           &startup_subpaths, NULL);
    2061       24898 :                 accumulate_append_subpath(cheapest_total,
    2062             :                                           &total_subpaths, NULL);
    2063             : 
    2064       24898 :                 if (cheapest_fractional)
    2065         752 :                     accumulate_append_subpath(cheapest_fractional,
    2066             :                                               &fractional_subpaths, NULL);
    2067             :             }
    2068             :         }
    2069             : 
    2070             :         /* ... and build the Append or MergeAppend paths */
    2071       12108 :         if (match_partition_order)
    2072             :         {
    2073             :             /* We only need Append */
    2074        2424 :             add_path(rel, (Path *) create_append_path(root,
    2075             :                                                       rel,
    2076             :                                                       startup_subpaths,
    2077             :                                                       NIL,
    2078             :                                                       pathkeys,
    2079             :                                                       NULL,
    2080             :                                                       0,
    2081             :                                                       false,
    2082             :                                                       -1));
    2083        2424 :             if (startup_neq_total)
    2084           0 :                 add_path(rel, (Path *) create_append_path(root,
    2085             :                                                           rel,
    2086             :                                                           total_subpaths,
    2087             :                                                           NIL,
    2088             :                                                           pathkeys,
    2089             :                                                           NULL,
    2090             :                                                           0,
    2091             :                                                           false,
    2092             :                                                           -1));
    2093             : 
    2094        2424 :             if (fractional_subpaths && fraction_neq_total)
    2095           0 :                 add_path(rel, (Path *) create_append_path(root,
    2096             :                                                           rel,
    2097             :                                                           fractional_subpaths,
    2098             :                                                           NIL,
    2099             :                                                           pathkeys,
    2100             :                                                           NULL,
    2101             :                                                           0,
    2102             :                                                           false,
    2103             :                                                           -1));
    2104             :         }
    2105             :         else
    2106             :         {
    2107             :             /* We need MergeAppend */
    2108        9684 :             add_path(rel, (Path *) create_merge_append_path(root,
    2109             :                                                             rel,
    2110             :                                                             startup_subpaths,
    2111             :                                                             pathkeys,
    2112             :                                                             NULL));
    2113        9684 :             if (startup_neq_total)
    2114          60 :                 add_path(rel, (Path *) create_merge_append_path(root,
    2115             :                                                                 rel,
    2116             :                                                                 total_subpaths,
    2117             :                                                                 pathkeys,
    2118             :                                                                 NULL));
    2119             : 
    2120        9684 :             if (fractional_subpaths && fraction_neq_total)
    2121           0 :                 add_path(rel, (Path *) create_merge_append_path(root,
    2122             :                                                                 rel,
    2123             :                                                                 fractional_subpaths,
    2124             :                                                                 pathkeys,
    2125             :                                                                 NULL));
    2126             :         }
    2127             :     }
    2128       46208 : }
    2129             : 
    2130             : /*
    2131             :  * get_cheapest_parameterized_child_path
    2132             :  *      Get cheapest path for this relation that has exactly the requested
    2133             :  *      parameterization.
    2134             :  *
    2135             :  * Returns NULL if unable to create such a path.
    2136             :  */
    2137             : static Path *
    2138        6704 : get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel,
    2139             :                                       Relids required_outer)
    2140             : {
    2141             :     Path       *cheapest;
    2142             :     ListCell   *lc;
    2143             : 
    2144             :     /*
    2145             :      * Look up the cheapest existing path with no more than the needed
    2146             :      * parameterization.  If it has exactly the needed parameterization, we're
    2147             :      * done.
    2148             :      */
    2149        6704 :     cheapest = get_cheapest_path_for_pathkeys(rel->pathlist,
    2150             :                                               NIL,
    2151             :                                               required_outer,
    2152             :                                               TOTAL_COST,
    2153             :                                               false);
    2154             :     Assert(cheapest != NULL);
    2155        6704 :     if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer))
    2156        6364 :         return cheapest;
    2157             : 
    2158             :     /*
    2159             :      * Otherwise, we can "reparameterize" an existing path to match the given
    2160             :      * parameterization, which effectively means pushing down additional
    2161             :      * joinquals to be checked within the path's scan.  However, some existing
    2162             :      * paths might check the available joinquals already while others don't;
    2163             :      * therefore, it's not clear which existing path will be cheapest after
    2164             :      * reparameterization.  We have to go through them all and find out.
    2165             :      */
    2166         340 :     cheapest = NULL;
    2167        1180 :     foreach(lc, rel->pathlist)
    2168             :     {
    2169         840 :         Path       *path = (Path *) lfirst(lc);
    2170             : 
    2171             :         /* Can't use it if it needs more than requested parameterization */
    2172         840 :         if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
    2173          24 :             continue;
    2174             : 
    2175             :         /*
    2176             :          * Reparameterization can only increase the path's cost, so if it's
    2177             :          * already more expensive than the current cheapest, forget it.
    2178             :          */
    2179        1272 :         if (cheapest != NULL &&
    2180         456 :             compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
    2181         384 :             continue;
    2182             : 
    2183             :         /* Reparameterize if needed, then recheck cost */
    2184         432 :         if (!bms_equal(PATH_REQ_OUTER(path), required_outer))
    2185             :         {
    2186         356 :             path = reparameterize_path(root, path, required_outer, 1.0);
    2187         356 :             if (path == NULL)
    2188          32 :                 continue;       /* failed to reparameterize this one */
    2189             :             Assert(bms_equal(PATH_REQ_OUTER(path), required_outer));
    2190             : 
    2191         324 :             if (cheapest != NULL &&
    2192           0 :                 compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
    2193           0 :                 continue;
    2194             :         }
    2195             : 
    2196             :         /* We have a new best path */
    2197         400 :         cheapest = path;
    2198             :     }
    2199             : 
    2200             :     /* Return the best path, or NULL if we found no suitable candidate */
    2201         340 :     return cheapest;
    2202             : }
    2203             : 
    2204             : /*
    2205             :  * accumulate_append_subpath
    2206             :  *      Add a subpath to the list being built for an Append or MergeAppend.
    2207             :  *
    2208             :  * It's possible that the child is itself an Append or MergeAppend path, in
    2209             :  * which case we can "cut out the middleman" and just add its child paths to
    2210             :  * our own list.  (We don't try to do this earlier because we need to apply
    2211             :  * both levels of transformation to the quals.)
    2212             :  *
    2213             :  * Note that if we omit a child MergeAppend in this way, we are effectively
    2214             :  * omitting a sort step, which seems fine: if the parent is to be an Append,
    2215             :  * its result would be unsorted anyway, while if the parent is to be a
    2216             :  * MergeAppend, there's no point in a separate sort on a child.
    2217             :  *
    2218             :  * Normally, either path is a partial path and subpaths is a list of partial
    2219             :  * paths, or else path is a non-partial plan and subpaths is a list of those.
    2220             :  * However, if path is a parallel-aware Append, then we add its partial path
    2221             :  * children to subpaths and the rest to special_subpaths.  If the latter is
    2222             :  * NULL, we don't flatten the path at all (unless it contains only partial
    2223             :  * paths).
    2224             :  */
    2225             : static void
    2226      300442 : accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths)
    2227             : {
    2228      300442 :     if (IsA(path, AppendPath))
    2229             :     {
    2230       15404 :         AppendPath *apath = (AppendPath *) path;
    2231             : 
    2232       15404 :         if (!apath->path.parallel_aware || apath->first_partial_path == 0)
    2233             :         {
    2234       15068 :             *subpaths = list_concat(*subpaths, apath->subpaths);
    2235       15068 :             return;
    2236             :         }
    2237         336 :         else if (special_subpaths != NULL)
    2238             :         {
    2239             :             List       *new_special_subpaths;
    2240             : 
    2241             :             /* Split Parallel Append into partial and non-partial subpaths */
    2242         168 :             *subpaths = list_concat(*subpaths,
    2243         168 :                                     list_copy_tail(apath->subpaths,
    2244             :                                                    apath->first_partial_path));
    2245         168 :             new_special_subpaths = list_copy_head(apath->subpaths,
    2246             :                                                   apath->first_partial_path);
    2247         168 :             *special_subpaths = list_concat(*special_subpaths,
    2248             :                                             new_special_subpaths);
    2249         168 :             return;
    2250             :         }
    2251             :     }
    2252      285038 :     else if (IsA(path, MergeAppendPath))
    2253             :     {
    2254        1076 :         MergeAppendPath *mpath = (MergeAppendPath *) path;
    2255             : 
    2256        1076 :         *subpaths = list_concat(*subpaths, mpath->subpaths);
    2257        1076 :         return;
    2258             :     }
    2259             : 
    2260      284130 :     *subpaths = lappend(*subpaths, path);
    2261             : }
    2262             : 
    2263             : /*
    2264             :  * get_singleton_append_subpath
    2265             :  *      Returns the single subpath of an Append/MergeAppend, or just
    2266             :  *      return 'path' if it's not a single sub-path Append/MergeAppend.
    2267             :  *
    2268             :  * Note: 'path' must not be a parallel-aware path.
    2269             :  */
    2270             : static Path *
    2271       13096 : get_singleton_append_subpath(Path *path)
    2272             : {
    2273             :     Assert(!path->parallel_aware);
    2274             : 
    2275       13096 :     if (IsA(path, AppendPath))
    2276             :     {
    2277         388 :         AppendPath *apath = (AppendPath *) path;
    2278             : 
    2279         388 :         if (list_length(apath->subpaths) == 1)
    2280         192 :             return (Path *) linitial(apath->subpaths);
    2281             :     }
    2282       12708 :     else if (IsA(path, MergeAppendPath))
    2283             :     {
    2284         348 :         MergeAppendPath *mpath = (MergeAppendPath *) path;
    2285             : 
    2286         348 :         if (list_length(mpath->subpaths) == 1)
    2287           0 :             return (Path *) linitial(mpath->subpaths);
    2288             :     }
    2289             : 
    2290       12904 :     return path;
    2291             : }
    2292             : 
    2293             : /*
    2294             :  * set_dummy_rel_pathlist
    2295             :  *    Build a dummy path for a relation that's been excluded by constraints
    2296             :  *
    2297             :  * Rather than inventing a special "dummy" path type, we represent this as an
    2298             :  * AppendPath with no members (see also IS_DUMMY_APPEND/IS_DUMMY_REL macros).
    2299             :  *
    2300             :  * (See also mark_dummy_rel, which does basically the same thing, but is
    2301             :  * typically used to change a rel into dummy state after we already made
    2302             :  * paths for it.)
    2303             :  */
    2304             : static void
    2305        1304 : set_dummy_rel_pathlist(RelOptInfo *rel)
    2306             : {
    2307             :     /* Set dummy size estimates --- we leave attr_widths[] as zeroes */
    2308        1304 :     rel->rows = 0;
    2309        1304 :     rel->reltarget->width = 0;
    2310             : 
    2311             :     /* Discard any pre-existing paths; no further need for them */
    2312        1304 :     rel->pathlist = NIL;
    2313        1304 :     rel->partial_pathlist = NIL;
    2314             : 
    2315             :     /* Set up the dummy path */
    2316        1304 :     add_path(rel, (Path *) create_append_path(NULL, rel, NIL, NIL,
    2317             :                                               NIL, rel->lateral_relids,
    2318             :                                               0, false, -1));
    2319             : 
    2320             :     /*
    2321             :      * We set the cheapest-path fields immediately, just in case they were
    2322             :      * pointing at some discarded path.  This is redundant in current usage
    2323             :      * because set_rel_pathlist will do it later, but it's cheap so we keep it
    2324             :      * for safety and consistency with mark_dummy_rel.
    2325             :      */
    2326        1304 :     set_cheapest(rel);
    2327        1304 : }
    2328             : 
    2329             : /*
    2330             :  * find_window_run_conditions
    2331             :  *      Determine if 'wfunc' is really a WindowFunc and call its prosupport
    2332             :  *      function to determine the function's monotonic properties.  We then
    2333             :  *      see if 'opexpr' can be used to short-circuit execution.
    2334             :  *
    2335             :  * For example row_number() over (order by ...) always produces a value one
    2336             :  * higher than the previous.  If someone has a window function in a subquery
    2337             :  * and has a WHERE clause in the outer query to filter rows <= 10, then we may
    2338             :  * as well stop processing the windowagg once the row number reaches 11.  Here
    2339             :  * we check if 'opexpr' might help us to stop doing needless extra processing
    2340             :  * in WindowAgg nodes.
    2341             :  *
    2342             :  * '*keep_original' is set to true if the caller should also use 'opexpr' for
    2343             :  * its original purpose.  This is set to false if the caller can assume that
    2344             :  * the run condition will handle all of the required filtering.
    2345             :  *
    2346             :  * Returns true if 'opexpr' was found to be useful and was added to the
    2347             :  * WindowFunc's runCondition.  We also set *keep_original accordingly and add
    2348             :  * 'attno' to *run_cond_attrs offset by FirstLowInvalidHeapAttributeNumber.
    2349             :  * If the 'opexpr' cannot be used then we set *keep_original to true and
    2350             :  * return false.
    2351             :  */
    2352             : static bool
    2353         240 : find_window_run_conditions(Query *subquery, AttrNumber attno,
    2354             :                            WindowFunc *wfunc, OpExpr *opexpr, bool wfunc_left,
    2355             :                            bool *keep_original, Bitmapset **run_cond_attrs)
    2356             : {
    2357             :     Oid         prosupport;
    2358             :     Expr       *otherexpr;
    2359             :     SupportRequestWFuncMonotonic req;
    2360             :     SupportRequestWFuncMonotonic *res;
    2361             :     WindowClause *wclause;
    2362             :     List       *opinfos;
    2363             :     OpExpr     *runopexpr;
    2364             :     Oid         runoperator;
    2365             :     ListCell   *lc;
    2366             : 
    2367         240 :     *keep_original = true;
    2368             : 
    2369         240 :     while (IsA(wfunc, RelabelType))
    2370           0 :         wfunc = (WindowFunc *) ((RelabelType *) wfunc)->arg;
    2371             : 
    2372             :     /* we can only work with window functions */
    2373         240 :     if (!IsA(wfunc, WindowFunc))
    2374          24 :         return false;
    2375             : 
    2376             :     /* can't use it if there are subplans in the WindowFunc */
    2377         216 :     if (contain_subplans((Node *) wfunc))
    2378           6 :         return false;
    2379             : 
    2380         210 :     prosupport = get_func_support(wfunc->winfnoid);
    2381             : 
    2382             :     /* Check if there's a support function for 'wfunc' */
    2383         210 :     if (!OidIsValid(prosupport))
    2384          18 :         return false;
    2385             : 
    2386             :     /* get the Expr from the other side of the OpExpr */
    2387         192 :     if (wfunc_left)
    2388         168 :         otherexpr = lsecond(opexpr->args);
    2389             :     else
    2390          24 :         otherexpr = linitial(opexpr->args);
    2391             : 
    2392             :     /*
    2393             :      * The value being compared must not change during the evaluation of the
    2394             :      * window partition.
    2395             :      */
    2396         192 :     if (!is_pseudo_constant_clause((Node *) otherexpr))
    2397           0 :         return false;
    2398             : 
    2399             :     /* find the window clause belonging to the window function */
    2400         192 :     wclause = (WindowClause *) list_nth(subquery->windowClause,
    2401         192 :                                         wfunc->winref - 1);
    2402             : 
    2403         192 :     req.type = T_SupportRequestWFuncMonotonic;
    2404         192 :     req.window_func = wfunc;
    2405         192 :     req.window_clause = wclause;
    2406             : 
    2407             :     /* call the support function */
    2408             :     res = (SupportRequestWFuncMonotonic *)
    2409         192 :         DatumGetPointer(OidFunctionCall1(prosupport,
    2410             :                                          PointerGetDatum(&req)));
    2411             : 
    2412             :     /*
    2413             :      * Nothing to do if the function is neither monotonically increasing nor
    2414             :      * monotonically decreasing.
    2415             :      */
    2416         192 :     if (res == NULL || res->monotonic == MONOTONICFUNC_NONE)
    2417           0 :         return false;
    2418             : 
    2419         192 :     runopexpr = NULL;
    2420         192 :     runoperator = InvalidOid;
    2421         192 :     opinfos = get_op_index_interpretation(opexpr->opno);
    2422             : 
    2423         192 :     foreach(lc, opinfos)
    2424             :     {
    2425         192 :         OpIndexInterpretation *opinfo = (OpIndexInterpretation *) lfirst(lc);
    2426         192 :         CompareType cmptype = opinfo->cmptype;
    2427             : 
    2428             :         /* handle < / <= */
    2429         192 :         if (cmptype == COMPARE_LT || cmptype == COMPARE_LE)
    2430             :         {
    2431             :             /*
    2432             :              * < / <= is supported for monotonically increasing functions in
    2433             :              * the form <wfunc> op <pseudoconst> and <pseudoconst> op <wfunc>
    2434             :              * for monotonically decreasing functions.
    2435             :              */
    2436         138 :             if ((wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)) ||
    2437          18 :                 (!wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)))
    2438             :             {
    2439         126 :                 *keep_original = false;
    2440         126 :                 runopexpr = opexpr;
    2441         126 :                 runoperator = opexpr->opno;
    2442             :             }
    2443         138 :             break;
    2444             :         }
    2445             :         /* handle > / >= */
    2446          54 :         else if (cmptype == COMPARE_GT || cmptype == COMPARE_GE)
    2447             :         {
    2448             :             /*
    2449             :              * > / >= is supported for monotonically decreasing functions in
    2450             :              * the form <wfunc> op <pseudoconst> and <pseudoconst> op <wfunc>
    2451             :              * for monotonically increasing functions.
    2452             :              */
    2453          18 :             if ((wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)) ||
    2454          12 :                 (!wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)))
    2455             :             {
    2456          18 :                 *keep_original = false;
    2457          18 :                 runopexpr = opexpr;
    2458          18 :                 runoperator = opexpr->opno;
    2459             :             }
    2460          18 :             break;
    2461             :         }
    2462             :         /* handle = */
    2463          36 :         else if (cmptype == COMPARE_EQ)
    2464             :         {
    2465             :             CompareType newcmptype;
    2466             : 
    2467             :             /*
    2468             :              * When both monotonically increasing and decreasing then the
    2469             :              * return value of the window function will be the same each time.
    2470             :              * We can simply use 'opexpr' as the run condition without
    2471             :              * modifying it.
    2472             :              */
    2473          36 :             if ((res->monotonic & MONOTONICFUNC_BOTH) == MONOTONICFUNC_BOTH)
    2474             :             {
    2475           6 :                 *keep_original = false;
    2476           6 :                 runopexpr = opexpr;
    2477           6 :                 runoperator = opexpr->opno;
    2478           6 :                 break;
    2479             :             }
    2480             : 
    2481             :             /*
    2482             :              * When monotonically increasing we make a qual with <wfunc> <=
    2483             :              * <value> or <value> >= <wfunc> in order to filter out values
    2484             :              * which are above the value in the equality condition.  For
    2485             :              * monotonically decreasing functions we want to filter values
    2486             :              * below the value in the equality condition.
    2487             :              */
    2488          30 :             if (res->monotonic & MONOTONICFUNC_INCREASING)
    2489          30 :                 newcmptype = wfunc_left ? COMPARE_LE : COMPARE_GE;
    2490             :             else
    2491           0 :                 newcmptype = wfunc_left ? COMPARE_GE : COMPARE_LE;
    2492             : 
    2493             :             /* We must keep the original equality qual */
    2494          30 :             *keep_original = true;
    2495          30 :             runopexpr = opexpr;
    2496             : 
    2497             :             /* determine the operator to use for the WindowFuncRunCondition */
    2498          30 :             runoperator = get_opfamily_member_for_cmptype(opinfo->opfamily_id,
    2499             :                                                           opinfo->oplefttype,
    2500             :                                                           opinfo->oprighttype,
    2501             :                                                           newcmptype);
    2502          30 :             break;
    2503             :         }
    2504             :     }
    2505             : 
    2506         192 :     if (runopexpr != NULL)
    2507             :     {
    2508             :         WindowFuncRunCondition *wfuncrc;
    2509             : 
    2510         180 :         wfuncrc = makeNode(WindowFuncRunCondition);
    2511         180 :         wfuncrc->opno = runoperator;
    2512         180 :         wfuncrc->inputcollid = runopexpr->inputcollid;
    2513         180 :         wfuncrc->wfunc_left = wfunc_left;
    2514         180 :         wfuncrc->arg = copyObject(otherexpr);
    2515             : 
    2516         180 :         wfunc->runCondition = lappend(wfunc->runCondition, wfuncrc);
    2517             : 
    2518             :         /* record that this attno was used in a run condition */
    2519         180 :         *run_cond_attrs = bms_add_member(*run_cond_attrs,
    2520             :                                          attno - FirstLowInvalidHeapAttributeNumber);
    2521         180 :         return true;
    2522             :     }
    2523             : 
    2524             :     /* unsupported OpExpr */
    2525          12 :     return false;
    2526             : }
    2527             : 
    2528             : /*
    2529             :  * check_and_push_window_quals
    2530             :  *      Check if 'clause' is a qual that can be pushed into a WindowFunc
    2531             :  *      as a 'runCondition' qual.  These, when present, allow some unnecessary
    2532             :  *      work to be skipped during execution.
    2533             :  *
    2534             :  * 'run_cond_attrs' will be populated with all targetlist resnos of subquery
    2535             :  * targets (offset by FirstLowInvalidHeapAttributeNumber) that we pushed
    2536             :  * window quals for.
    2537             :  *
    2538             :  * Returns true if the caller still must keep the original qual or false if
    2539             :  * the caller can safely ignore the original qual because the WindowAgg node
    2540             :  * will use the runCondition to stop returning tuples.
    2541             :  */
    2542             : static bool
    2543         252 : check_and_push_window_quals(Query *subquery, Node *clause,
    2544             :                             Bitmapset **run_cond_attrs)
    2545             : {
    2546         252 :     OpExpr     *opexpr = (OpExpr *) clause;
    2547         252 :     bool        keep_original = true;
    2548             :     Var        *var1;
    2549             :     Var        *var2;
    2550             : 
    2551             :     /* We're only able to use OpExprs with 2 operands */
    2552         252 :     if (!IsA(opexpr, OpExpr))
    2553          18 :         return true;
    2554             : 
    2555         234 :     if (list_length(opexpr->args) != 2)
    2556           0 :         return true;
    2557             : 
    2558             :     /*
    2559             :      * Currently, we restrict this optimization to strict OpExprs.  The reason
    2560             :      * for this is that during execution, once the runcondition becomes false,
    2561             :      * we stop evaluating WindowFuncs.  To avoid leaving around stale window
    2562             :      * function result values, we set them to NULL.  Having only strict
    2563             :      * OpExprs here ensures that we properly filter out the tuples with NULLs
    2564             :      * in the top-level WindowAgg.
    2565             :      */
    2566         234 :     set_opfuncid(opexpr);
    2567         234 :     if (!func_strict(opexpr->opfuncid))
    2568           0 :         return true;
    2569             : 
    2570             :     /*
    2571             :      * Check for plain Vars that reference window functions in the subquery.
    2572             :      * If we find any, we'll ask find_window_run_conditions() if 'opexpr' can
    2573             :      * be used as part of the run condition.
    2574             :      */
    2575             : 
    2576             :     /* Check the left side of the OpExpr */
    2577         234 :     var1 = linitial(opexpr->args);
    2578         234 :     if (IsA(var1, Var) && var1->varattno > 0)
    2579             :     {
    2580         198 :         TargetEntry *tle = list_nth(subquery->targetList, var1->varattno - 1);
    2581         198 :         WindowFunc *wfunc = (WindowFunc *) tle->expr;
    2582             : 
    2583         198 :         if (find_window_run_conditions(subquery, tle->resno, wfunc, opexpr,
    2584             :                                        true, &keep_original, run_cond_attrs))
    2585         162 :             return keep_original;
    2586             :     }
    2587             : 
    2588             :     /* and check the right side */
    2589          72 :     var2 = lsecond(opexpr->args);
    2590          72 :     if (IsA(var2, Var) && var2->varattno > 0)
    2591             :     {
    2592          42 :         TargetEntry *tle = list_nth(subquery->targetList, var2->varattno - 1);
    2593          42 :         WindowFunc *wfunc = (WindowFunc *) tle->expr;
    2594             : 
    2595          42 :         if (find_window_run_conditions(subquery, tle->resno, wfunc, opexpr,
    2596             :                                        false, &keep_original, run_cond_attrs))
    2597          18 :             return keep_original;
    2598             :     }
    2599             : 
    2600          54 :     return true;
    2601             : }
    2602             : 
    2603             : /*
    2604             :  * set_subquery_pathlist
    2605             :  *      Generate SubqueryScan access paths for a subquery RTE
    2606             :  *
    2607             :  * We don't currently support generating parameterized paths for subqueries
    2608             :  * by pushing join clauses down into them; it seems too expensive to re-plan
    2609             :  * the subquery multiple times to consider different alternatives.
    2610             :  * (XXX that could stand to be reconsidered, now that we use Paths.)
    2611             :  * So the paths made here will be parameterized if the subquery contains
    2612             :  * LATERAL references, otherwise not.  As long as that's true, there's no need
    2613             :  * for a separate set_subquery_size phase: just make the paths right away.
    2614             :  */
    2615             : static void
    2616       17532 : set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
    2617             :                       Index rti, RangeTblEntry *rte)
    2618             : {
    2619       17532 :     Query      *parse = root->parse;
    2620       17532 :     Query      *subquery = rte->subquery;
    2621             :     bool        trivial_pathtarget;
    2622             :     Relids      required_outer;
    2623             :     pushdown_safety_info safetyInfo;
    2624             :     double      tuple_fraction;
    2625             :     RelOptInfo *sub_final_rel;
    2626       17532 :     Bitmapset  *run_cond_attrs = NULL;
    2627             :     ListCell   *lc;
    2628             :     char       *plan_name;
    2629             : 
    2630             :     /*
    2631             :      * Must copy the Query so that planning doesn't mess up the RTE contents
    2632             :      * (really really need to fix the planner to not scribble on its input,
    2633             :      * someday ... but see remove_unused_subquery_outputs to start with).
    2634             :      */
    2635       17532 :     subquery = copyObject(subquery);
    2636             : 
    2637             :     /*
    2638             :      * If it's a LATERAL subquery, it might contain some Vars of the current
    2639             :      * query level, requiring it to be treated as parameterized, even though
    2640             :      * we don't support pushing down join quals into subqueries.
    2641             :      */
    2642       17532 :     required_outer = rel->lateral_relids;
    2643             : 
    2644             :     /*
    2645             :      * Zero out result area for subquery_is_pushdown_safe, so that it can set
    2646             :      * flags as needed while recursing.  In particular, we need a workspace
    2647             :      * for keeping track of the reasons why columns are unsafe to reference.
    2648             :      * These reasons are stored in the bits inside unsafeFlags[i] when we
    2649             :      * discover reasons that column i of the subquery is unsafe to be used in
    2650             :      * a pushed-down qual.
    2651             :      */
    2652       17532 :     memset(&safetyInfo, 0, sizeof(safetyInfo));
    2653       17532 :     safetyInfo.unsafeFlags = (unsigned char *)
    2654       17532 :         palloc0((list_length(subquery->targetList) + 1) * sizeof(unsigned char));
    2655             : 
    2656             :     /*
    2657             :      * If the subquery has the "security_barrier" flag, it means the subquery
    2658             :      * originated from a view that must enforce row-level security.  Then we
    2659             :      * must not push down quals that contain leaky functions.  (Ideally this
    2660             :      * would be checked inside subquery_is_pushdown_safe, but since we don't
    2661             :      * currently pass the RTE to that function, we must do it here.)
    2662             :      */
    2663       17532 :     safetyInfo.unsafeLeaky = rte->security_barrier;
    2664             : 
    2665             :     /*
    2666             :      * If there are any restriction clauses that have been attached to the
    2667             :      * subquery relation, consider pushing them down to become WHERE or HAVING
    2668             :      * quals of the subquery itself.  This transformation is useful because it
    2669             :      * may allow us to generate a better plan for the subquery than evaluating
    2670             :      * all the subquery output rows and then filtering them.
    2671             :      *
    2672             :      * There are several cases where we cannot push down clauses. Restrictions
    2673             :      * involving the subquery are checked by subquery_is_pushdown_safe().
    2674             :      * Restrictions on individual clauses are checked by
    2675             :      * qual_is_pushdown_safe().  Also, we don't want to push down
    2676             :      * pseudoconstant clauses; better to have the gating node above the
    2677             :      * subquery.
    2678             :      *
    2679             :      * Non-pushed-down clauses will get evaluated as qpquals of the
    2680             :      * SubqueryScan node.
    2681             :      *
    2682             :      * XXX Are there any cases where we want to make a policy decision not to
    2683             :      * push down a pushable qual, because it'd result in a worse plan?
    2684             :      */
    2685       19492 :     if (rel->baserestrictinfo != NIL &&
    2686        1960 :         subquery_is_pushdown_safe(subquery, subquery, &safetyInfo))
    2687             :     {
    2688             :         /* OK to consider pushing down individual quals */
    2689        1814 :         List       *upperrestrictlist = NIL;
    2690             :         ListCell   *l;
    2691             : 
    2692        4580 :         foreach(l, rel->baserestrictinfo)
    2693             :         {
    2694        2766 :             RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
    2695        2766 :             Node       *clause = (Node *) rinfo->clause;
    2696             : 
    2697        2766 :             if (rinfo->pseudoconstant)
    2698             :             {
    2699           4 :                 upperrestrictlist = lappend(upperrestrictlist, rinfo);
    2700           4 :                 continue;
    2701             :             }
    2702             : 
    2703        2762 :             switch (qual_is_pushdown_safe(subquery, rti, rinfo, &safetyInfo))
    2704             :             {
    2705        2056 :                 case PUSHDOWN_SAFE:
    2706             :                     /* Push it down */
    2707        2056 :                     subquery_push_qual(subquery, rte, rti, clause);
    2708        2056 :                     break;
    2709             : 
    2710         252 :                 case PUSHDOWN_WINDOWCLAUSE_RUNCOND:
    2711             : 
    2712             :                     /*
    2713             :                      * Since we can't push the qual down into the subquery,
    2714             :                      * check if it happens to reference a window function.  If
    2715             :                      * so then it might be useful to use for the WindowAgg's
    2716             :                      * runCondition.
    2717             :                      */
    2718         504 :                     if (!subquery->hasWindowFuncs ||
    2719         252 :                         check_and_push_window_quals(subquery, clause,
    2720             :                                                     &run_cond_attrs))
    2721             :                     {
    2722             :                         /*
    2723             :                          * subquery has no window funcs or the clause is not a
    2724             :                          * suitable window run condition qual or it is, but
    2725             :                          * the original must also be kept in the upper query.
    2726             :                          */
    2727         102 :                         upperrestrictlist = lappend(upperrestrictlist, rinfo);
    2728             :                     }
    2729         252 :                     break;
    2730             : 
    2731         454 :                 case PUSHDOWN_UNSAFE:
    2732         454 :                     upperrestrictlist = lappend(upperrestrictlist, rinfo);
    2733         454 :                     break;
    2734             :             }
    2735             :         }
    2736        1814 :         rel->baserestrictinfo = upperrestrictlist;
    2737             :         /* We don't bother recomputing baserestrict_min_security */
    2738             :     }
    2739             : 
    2740       17532 :     pfree(safetyInfo.unsafeFlags);
    2741             : 
    2742             :     /*
    2743             :      * The upper query might not use all the subquery's output columns; if
    2744             :      * not, we can simplify.  Pass the attributes that were pushed down into
    2745             :      * WindowAgg run conditions to ensure we don't accidentally think those
    2746             :      * are unused.
    2747             :      */
    2748       17532 :     remove_unused_subquery_outputs(subquery, rel, run_cond_attrs);
    2749             : 
    2750             :     /*
    2751             :      * We can safely pass the outer tuple_fraction down to the subquery if the
    2752             :      * outer level has no joining, aggregation, or sorting to do. Otherwise
    2753             :      * we'd better tell the subquery to plan for full retrieval. (XXX This
    2754             :      * could probably be made more intelligent ...)
    2755             :      */
    2756       17532 :     if (parse->hasAggs ||
    2757       16286 :         parse->groupClause ||
    2758       16268 :         parse->groupingSets ||
    2759       16268 :         root->hasHavingQual ||
    2760       16268 :         parse->distinctClause ||
    2761       22060 :         parse->sortClause ||
    2762        6308 :         bms_membership(root->all_baserels) == BMS_MULTIPLE)
    2763       12558 :         tuple_fraction = 0.0;   /* default case */
    2764             :     else
    2765        4974 :         tuple_fraction = root->tuple_fraction;
    2766             : 
    2767             :     /* plan_params should not be in use in current query level */
    2768             :     Assert(root->plan_params == NIL);
    2769             : 
    2770             :     /* Generate a subroot and Paths for the subquery */
    2771       17532 :     plan_name = choose_plan_name(root->glob, rte->eref->aliasname, false);
    2772       17532 :     rel->subroot = subquery_planner(root->glob, subquery, plan_name,
    2773             :                                     root, false, tuple_fraction, NULL);
    2774             : 
    2775             :     /* Isolate the params needed by this specific subplan */
    2776       17532 :     rel->subplan_params = root->plan_params;
    2777       17532 :     root->plan_params = NIL;
    2778             : 
    2779             :     /*
    2780             :      * It's possible that constraint exclusion proved the subquery empty. If
    2781             :      * so, it's desirable to produce an unadorned dummy path so that we will
    2782             :      * recognize appropriate optimizations at this query level.
    2783             :      */
    2784       17532 :     sub_final_rel = fetch_upper_rel(rel->subroot, UPPERREL_FINAL, NULL);
    2785             : 
    2786       17532 :     if (IS_DUMMY_REL(sub_final_rel))
    2787             :     {
    2788         126 :         set_dummy_rel_pathlist(rel);
    2789         126 :         return;
    2790             :     }
    2791             : 
    2792             :     /*
    2793             :      * Mark rel with estimated output rows, width, etc.  Note that we have to
    2794             :      * do this before generating outer-query paths, else cost_subqueryscan is
    2795             :      * not happy.
    2796             :      */
    2797       17406 :     set_subquery_size_estimates(root, rel);
    2798             : 
    2799             :     /*
    2800             :      * Also detect whether the reltarget is trivial, so that we can pass that
    2801             :      * info to cost_subqueryscan (rather than re-deriving it multiple times).
    2802             :      * It's trivial if it fetches all the subplan output columns in order.
    2803             :      */
    2804       17406 :     if (list_length(rel->reltarget->exprs) != list_length(subquery->targetList))
    2805        7714 :         trivial_pathtarget = false;
    2806             :     else
    2807             :     {
    2808        9692 :         trivial_pathtarget = true;
    2809       31640 :         foreach(lc, rel->reltarget->exprs)
    2810             :         {
    2811       22246 :             Node       *node = (Node *) lfirst(lc);
    2812             :             Var        *var;
    2813             : 
    2814       22246 :             if (!IsA(node, Var))
    2815             :             {
    2816           0 :                 trivial_pathtarget = false;
    2817           0 :                 break;
    2818             :             }
    2819       22246 :             var = (Var *) node;
    2820       22246 :             if (var->varno != rti ||
    2821       22246 :                 var->varattno != foreach_current_index(lc) + 1)
    2822             :             {
    2823         298 :                 trivial_pathtarget = false;
    2824         298 :                 break;
    2825             :             }
    2826             :         }
    2827             :     }
    2828             : 
    2829             :     /*
    2830             :      * For each Path that subquery_planner produced, make a SubqueryScanPath
    2831             :      * in the outer query.
    2832             :      */
    2833       36836 :     foreach(lc, sub_final_rel->pathlist)
    2834             :     {
    2835       19430 :         Path       *subpath = (Path *) lfirst(lc);
    2836             :         List       *pathkeys;
    2837             : 
    2838             :         /* Convert subpath's pathkeys to outer representation */
    2839       19430 :         pathkeys = convert_subquery_pathkeys(root,
    2840             :                                              rel,
    2841             :                                              subpath->pathkeys,
    2842             :                                              make_tlist_from_pathtarget(subpath->pathtarget));
    2843             : 
    2844             :         /* Generate outer path using this subpath */
    2845       19430 :         add_path(rel, (Path *)
    2846       19430 :                  create_subqueryscan_path(root, rel, subpath,
    2847             :                                           trivial_pathtarget,
    2848             :                                           pathkeys, required_outer));
    2849             :     }
    2850             : 
    2851             :     /* If outer rel allows parallelism, do same for partial paths. */
    2852       17406 :     if (rel->consider_parallel && bms_is_empty(required_outer))
    2853             :     {
    2854             :         /* If consider_parallel is false, there should be no partial paths. */
    2855             :         Assert(sub_final_rel->consider_parallel ||
    2856             :                sub_final_rel->partial_pathlist == NIL);
    2857             : 
    2858             :         /* Same for partial paths. */
    2859       13206 :         foreach(lc, sub_final_rel->partial_pathlist)
    2860             :         {
    2861          42 :             Path       *subpath = (Path *) lfirst(lc);
    2862             :             List       *pathkeys;
    2863             : 
    2864             :             /* Convert subpath's pathkeys to outer representation */
    2865          42 :             pathkeys = convert_subquery_pathkeys(root,
    2866             :                                                  rel,
    2867             :                                                  subpath->pathkeys,
    2868             :                                                  make_tlist_from_pathtarget(subpath->pathtarget));
    2869             : 
    2870             :             /* Generate outer path using this subpath */
    2871          42 :             add_partial_path(rel, (Path *)
    2872          42 :                              create_subqueryscan_path(root, rel, subpath,
    2873             :                                                       trivial_pathtarget,
    2874             :                                                       pathkeys,
    2875             :                                                       required_outer));
    2876             :         }
    2877             :     }
    2878             : }
    2879             : 
    2880             : /*
    2881             :  * set_function_pathlist
    2882             :  *      Build the (single) access path for a function RTE
    2883             :  */
    2884             : static void
    2885       51798 : set_function_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
    2886             : {
    2887             :     Relids      required_outer;
    2888       51798 :     List       *pathkeys = NIL;
    2889             : 
    2890             :     /*
    2891             :      * We don't support pushing join clauses into the quals of a function
    2892             :      * scan, but it could still have required parameterization due to LATERAL
    2893             :      * refs in the function expression.
    2894             :      */
    2895       51798 :     required_outer = rel->lateral_relids;
    2896             : 
    2897             :     /*
    2898             :      * The result is considered unordered unless ORDINALITY was used, in which
    2899             :      * case it is ordered by the ordinal column (the last one).  See if we
    2900             :      * care, by checking for uses of that Var in equivalence classes.
    2901             :      */
    2902       51798 :     if (rte->funcordinality)
    2903             :     {
    2904         922 :         AttrNumber  ordattno = rel->max_attr;
    2905         922 :         Var        *var = NULL;
    2906             :         ListCell   *lc;
    2907             : 
    2908             :         /*
    2909             :          * Is there a Var for it in rel's targetlist?  If not, the query did
    2910             :          * not reference the ordinality column, or at least not in any way
    2911             :          * that would be interesting for sorting.
    2912             :          */
    2913        2106 :         foreach(lc, rel->reltarget->exprs)
    2914             :         {
    2915        2100 :             Var        *node = (Var *) lfirst(lc);
    2916             : 
    2917             :             /* checking varno/varlevelsup is just paranoia */
    2918        2100 :             if (IsA(node, Var) &&
    2919        2100 :                 node->varattno == ordattno &&
    2920         916 :                 node->varno == rel->relid &&
    2921         916 :                 node->varlevelsup == 0)
    2922             :             {
    2923         916 :                 var = node;
    2924         916 :                 break;
    2925             :             }
    2926             :         }
    2927             : 
    2928             :         /*
    2929             :          * Try to build pathkeys for this Var with int8 sorting.  We tell
    2930             :          * build_expression_pathkey not to build any new equivalence class; if
    2931             :          * the Var isn't already mentioned in some EC, it means that nothing
    2932             :          * cares about the ordering.
    2933             :          */
    2934         922 :         if (var)
    2935         916 :             pathkeys = build_expression_pathkey(root,
    2936             :                                                 (Expr *) var,
    2937             :                                                 Int8LessOperator,
    2938             :                                                 rel->relids,
    2939             :                                                 false);
    2940             :     }
    2941             : 
    2942             :     /* Generate appropriate path */
    2943       51798 :     add_path(rel, create_functionscan_path(root, rel,
    2944             :                                            pathkeys, required_outer));
    2945       51798 : }
    2946             : 
    2947             : /*
    2948             :  * set_values_pathlist
    2949             :  *      Build the (single) access path for a VALUES RTE
    2950             :  */
    2951             : static void
    2952        8294 : set_values_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
    2953             : {
    2954             :     Relids      required_outer;
    2955             : 
    2956             :     /*
    2957             :      * We don't support pushing join clauses into the quals of a values scan,
    2958             :      * but it could still have required parameterization due to LATERAL refs
    2959             :      * in the values expressions.
    2960             :      */
    2961        8294 :     required_outer = rel->lateral_relids;
    2962             : 
    2963             :     /* Generate appropriate path */
    2964        8294 :     add_path(rel, create_valuesscan_path(root, rel, required_outer));
    2965        8294 : }
    2966             : 
    2967             : /*
    2968             :  * set_tablefunc_pathlist
    2969             :  *      Build the (single) access path for a table func RTE
    2970             :  */
    2971             : static void
    2972         626 : set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
    2973             : {
    2974             :     Relids      required_outer;
    2975             : 
    2976             :     /*
    2977             :      * We don't support pushing join clauses into the quals of a tablefunc
    2978             :      * scan, but it could still have required parameterization due to LATERAL
    2979             :      * refs in the function expression.
    2980             :      */
    2981         626 :     required_outer = rel->lateral_relids;
    2982             : 
    2983             :     /* Generate appropriate path */
    2984         626 :     add_path(rel, create_tablefuncscan_path(root, rel,
    2985             :                                             required_outer));
    2986         626 : }
    2987             : 
    2988             : /*
    2989             :  * set_cte_pathlist
    2990             :  *      Build the (single) access path for a non-self-reference CTE RTE
    2991             :  *
    2992             :  * There's no need for a separate set_cte_size phase, since we don't
    2993             :  * support join-qual-parameterized paths for CTEs.
    2994             :  */
    2995             : static void
    2996        4242 : set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
    2997             : {
    2998             :     Path       *ctepath;
    2999             :     Plan       *cteplan;
    3000             :     PlannerInfo *cteroot;
    3001             :     Index       levelsup;
    3002             :     List       *pathkeys;
    3003             :     int         ndx;
    3004             :     ListCell   *lc;
    3005             :     int         plan_id;
    3006             :     Relids      required_outer;
    3007             : 
    3008             :     /*
    3009             :      * Find the referenced CTE, and locate the path and plan previously made
    3010             :      * for it.
    3011             :      */
    3012        4242 :     levelsup = rte->ctelevelsup;
    3013        4242 :     cteroot = root;
    3014        7406 :     while (levelsup-- > 0)
    3015             :     {
    3016        3164 :         cteroot = cteroot->parent_root;
    3017        3164 :         if (!cteroot)           /* shouldn't happen */
    3018           0 :             elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
    3019             :     }
    3020             : 
    3021             :     /*
    3022             :      * Note: cte_plan_ids can be shorter than cteList, if we are still working
    3023             :      * on planning the CTEs (ie, this is a side-reference from another CTE).
    3024             :      * So we mustn't use forboth here.
    3025             :      */
    3026        4242 :     ndx = 0;
    3027        5824 :     foreach(lc, cteroot->parse->cteList)
    3028             :     {
    3029        5824 :         CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
    3030             : 
    3031        5824 :         if (strcmp(cte->ctename, rte->ctename) == 0)
    3032        4242 :             break;
    3033        1582 :         ndx++;
    3034             :     }
    3035        4242 :     if (lc == NULL)             /* shouldn't happen */
    3036           0 :         elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
    3037        4242 :     if (ndx >= list_length(cteroot->cte_plan_ids))
    3038           0 :         elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
    3039        4242 :     plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
    3040        4242 :     if (plan_id <= 0)
    3041           0 :         elog(ERROR, "no plan was made for CTE \"%s\"", rte->ctename);
    3042             : 
    3043             :     Assert(list_length(root->glob->subpaths) == list_length(root->glob->subplans));
    3044        4242 :     ctepath = (Path *) list_nth(root->glob->subpaths, plan_id - 1);
    3045        4242 :     cteplan = (Plan *) list_nth(root->glob->subplans, plan_id - 1);
    3046             : 
    3047             :     /* Mark rel with estimated output rows, width, etc */
    3048        4242 :     set_cte_size_estimates(root, rel, cteplan->plan_rows);
    3049             : 
    3050             :     /* Convert the ctepath's pathkeys to outer query's representation */
    3051        4242 :     pathkeys = convert_subquery_pathkeys(root,
    3052             :                                          rel,
    3053             :                                          ctepath->pathkeys,
    3054             :                                          cteplan->targetlist);
    3055             : 
    3056             :     /*
    3057             :      * We don't support pushing join clauses into the quals of a CTE scan, but
    3058             :      * it could still have required parameterization due to LATERAL refs in
    3059             :      * its tlist.
    3060             :      */
    3061        4242 :     required_outer = rel->lateral_relids;
    3062             : 
    3063             :     /* Generate appropriate path */
    3064        4242 :     add_path(rel, create_ctescan_path(root, rel, pathkeys, required_outer));
    3065        4242 : }
    3066             : 
    3067             : /*
    3068             :  * set_namedtuplestore_pathlist
    3069             :  *      Build the (single) access path for a named tuplestore RTE
    3070             :  *
    3071             :  * There's no need for a separate set_namedtuplestore_size phase, since we
    3072             :  * don't support join-qual-parameterized paths for tuplestores.
    3073             :  */
    3074             : static void
    3075         478 : set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
    3076             :                              RangeTblEntry *rte)
    3077             : {
    3078             :     Relids      required_outer;
    3079             : 
    3080             :     /* Mark rel with estimated output rows, width, etc */
    3081         478 :     set_namedtuplestore_size_estimates(root, rel);
    3082             : 
    3083             :     /*
    3084             :      * We don't support pushing join clauses into the quals of a tuplestore
    3085             :      * scan, but it could still have required parameterization due to LATERAL
    3086             :      * refs in its tlist.
    3087             :      */
    3088         478 :     required_outer = rel->lateral_relids;
    3089             : 
    3090             :     /* Generate appropriate path */
    3091         478 :     add_path(rel, create_namedtuplestorescan_path(root, rel, required_outer));
    3092         478 : }
    3093             : 
    3094             : /*
    3095             :  * set_result_pathlist
    3096             :  *      Build the (single) access path for an RTE_RESULT RTE
    3097             :  *
    3098             :  * There's no need for a separate set_result_size phase, since we
    3099             :  * don't support join-qual-parameterized paths for these RTEs.
    3100             :  */
    3101             : static void
    3102        4208 : set_result_pathlist(PlannerInfo *root, RelOptInfo *rel,
    3103             :                     RangeTblEntry *rte)
    3104             : {
    3105             :     Relids      required_outer;
    3106             : 
    3107             :     /* Mark rel with estimated output rows, width, etc */
    3108        4208 :     set_result_size_estimates(root, rel);
    3109             : 
    3110             :     /*
    3111             :      * We don't support pushing join clauses into the quals of a Result scan,
    3112             :      * but it could still have required parameterization due to LATERAL refs
    3113             :      * in its tlist.
    3114             :      */
    3115        4208 :     required_outer = rel->lateral_relids;
    3116             : 
    3117             :     /* Generate appropriate path */
    3118        4208 :     add_path(rel, create_resultscan_path(root, rel, required_outer));
    3119        4208 : }
    3120             : 
    3121             : /*
    3122             :  * set_worktable_pathlist
    3123             :  *      Build the (single) access path for a self-reference CTE RTE
    3124             :  *
    3125             :  * There's no need for a separate set_worktable_size phase, since we don't
    3126             :  * support join-qual-parameterized paths for CTEs.
    3127             :  */
    3128             : static void
    3129         934 : set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
    3130             : {
    3131             :     Path       *ctepath;
    3132             :     PlannerInfo *cteroot;
    3133             :     Index       levelsup;
    3134             :     Relids      required_outer;
    3135             : 
    3136             :     /*
    3137             :      * We need to find the non-recursive term's path, which is in the plan
    3138             :      * level that's processing the recursive UNION, which is one level *below*
    3139             :      * where the CTE comes from.
    3140             :      */
    3141         934 :     levelsup = rte->ctelevelsup;
    3142         934 :     if (levelsup == 0)          /* shouldn't happen */
    3143           0 :         elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
    3144         934 :     levelsup--;
    3145         934 :     cteroot = root;
    3146        2280 :     while (levelsup-- > 0)
    3147             :     {
    3148        1346 :         cteroot = cteroot->parent_root;
    3149        1346 :         if (!cteroot)           /* shouldn't happen */
    3150           0 :             elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
    3151             :     }
    3152         934 :     ctepath = cteroot->non_recursive_path;
    3153         934 :     if (!ctepath)               /* shouldn't happen */
    3154           0 :         elog(ERROR, "could not find path for CTE \"%s\"", rte->ctename);
    3155             : 
    3156             :     /* Mark rel with estimated output rows, width, etc */
    3157         934 :     set_cte_size_estimates(root, rel, ctepath->rows);
    3158             : 
    3159             :     /*
    3160             :      * We don't support pushing join clauses into the quals of a worktable
    3161             :      * scan, but it could still have required parameterization due to LATERAL
    3162             :      * refs in its tlist.  (I'm not sure this is actually possible given the
    3163             :      * restrictions on recursive references, but it's easy enough to support.)
    3164             :      */
    3165         934 :     required_outer = rel->lateral_relids;
    3166             : 
    3167             :     /* Generate appropriate path */
    3168         934 :     add_path(rel, create_worktablescan_path(root, rel, required_outer));
    3169         934 : }
    3170             : 
    3171             : /*
    3172             :  * generate_gather_paths
    3173             :  *      Generate parallel access paths for a relation by pushing a Gather or
    3174             :  *      Gather Merge on top of a partial path.
    3175             :  *
    3176             :  * This must not be called until after we're done creating all partial paths
    3177             :  * for the specified relation.  (Otherwise, add_partial_path might delete a
    3178             :  * path that some GatherPath or GatherMergePath has a reference to.)
    3179             :  *
    3180             :  * If we're generating paths for a scan or join relation, override_rows will
    3181             :  * be false, and we'll just use the relation's size estimate.  When we're
    3182             :  * being called for a partially-grouped or partially-distinct path, though, we
    3183             :  * need to override the rowcount estimate.  (It's not clear that the
    3184             :  * particular value we're using here is actually best, but the underlying rel
    3185             :  * has no estimate so we must do something.)
    3186             :  */
    3187             : void
    3188       24854 : generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
    3189             : {
    3190             :     Path       *cheapest_partial_path;
    3191             :     Path       *simple_gather_path;
    3192             :     ListCell   *lc;
    3193             :     double      rows;
    3194       24854 :     double     *rowsp = NULL;
    3195             : 
    3196             :     /* If there are no partial paths, there's nothing to do here. */
    3197       24854 :     if (rel->partial_pathlist == NIL)
    3198           0 :         return;
    3199             : 
    3200             :     /* Should we override the rel's rowcount estimate? */
    3201       24854 :     if (override_rows)
    3202        6118 :         rowsp = &rows;
    3203             : 
    3204             :     /*
    3205             :      * The output of Gather is always unsorted, so there's only one partial
    3206             :      * path of interest: the cheapest one.  That will be the one at the front
    3207             :      * of partial_pathlist because of the way add_partial_path works.
    3208             :      */
    3209       24854 :     cheapest_partial_path = linitial(rel->partial_pathlist);
    3210       24854 :     rows = compute_gather_rows(cheapest_partial_path);
    3211             :     simple_gather_path = (Path *)
    3212       24854 :         create_gather_path(root, rel, cheapest_partial_path, rel->reltarget,
    3213             :                            NULL, rowsp);
    3214       24854 :     add_path(rel, simple_gather_path);
    3215             : 
    3216             :     /*
    3217             :      * For each useful ordering, we can consider an order-preserving Gather
    3218             :      * Merge.
    3219             :      */
    3220       55746 :     foreach(lc, rel->partial_pathlist)
    3221             :     {
    3222       30892 :         Path       *subpath = (Path *) lfirst(lc);
    3223             :         GatherMergePath *path;
    3224             : 
    3225       30892 :         if (subpath->pathkeys == NIL)
    3226       24124 :             continue;
    3227             : 
    3228        6768 :         rows = compute_gather_rows(subpath);
    3229        6768 :         path = create_gather_merge_path(root, rel, subpath, rel->reltarget,
    3230             :                                         subpath->pathkeys, NULL, rowsp);
    3231        6768 :         add_path(rel, &path->path);
    3232             :     }
    3233             : }
    3234             : 
    3235             : /*
    3236             :  * get_useful_pathkeys_for_relation
    3237             :  *      Determine which orderings of a relation might be useful.
    3238             :  *
    3239             :  * Getting data in sorted order can be useful either because the requested
    3240             :  * order matches the final output ordering for the overall query we're
    3241             :  * planning, or because it enables an efficient merge join.  Here, we try
    3242             :  * to figure out which pathkeys to consider.
    3243             :  *
    3244             :  * This allows us to do incremental sort on top of an index scan under a gather
    3245             :  * merge node, i.e. parallelized.
    3246             :  *
    3247             :  * If the require_parallel_safe is true, we also require the expressions to
    3248             :  * be parallel safe (which allows pushing the sort below Gather Merge).
    3249             :  *
    3250             :  * XXX At the moment this can only ever return a list with a single element,
    3251             :  * because it looks at query_pathkeys only. So we might return the pathkeys
    3252             :  * directly, but it seems plausible we'll want to consider other orderings
    3253             :  * in the future. For example, we might want to consider pathkeys useful for
    3254             :  * merge joins.
    3255             :  */
    3256             : static List *
    3257       24854 : get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel,
    3258             :                                  bool require_parallel_safe)
    3259             : {
    3260       24854 :     List       *useful_pathkeys_list = NIL;
    3261             : 
    3262             :     /*
    3263             :      * Considering query_pathkeys is always worth it, because it might allow
    3264             :      * us to avoid a total sort when we have a partially presorted path
    3265             :      * available or to push the total sort into the parallel portion of the
    3266             :      * query.
    3267             :      */
    3268       24854 :     if (root->query_pathkeys)
    3269             :     {
    3270             :         ListCell   *lc;
    3271       14918 :         int         npathkeys = 0;  /* useful pathkeys */
    3272             : 
    3273       26062 :         foreach(lc, root->query_pathkeys)
    3274             :         {
    3275       18850 :             PathKey    *pathkey = (PathKey *) lfirst(lc);
    3276       18850 :             EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
    3277             : 
    3278             :             /*
    3279             :              * We can only build a sort for pathkeys that contain a
    3280             :              * safe-to-compute-early EC member computable from the current
    3281             :              * relation's reltarget, so ignore the remainder of the list as
    3282             :              * soon as we find a pathkey without such a member.
    3283             :              *
    3284             :              * It's still worthwhile to return any prefix of the pathkeys list
    3285             :              * that meets this requirement, as we may be able to do an
    3286             :              * incremental sort.
    3287             :              *
    3288             :              * If requested, ensure the sort expression is parallel-safe too.
    3289             :              */
    3290       18850 :             if (!relation_can_be_sorted_early(root, rel, pathkey_ec,
    3291             :                                               require_parallel_safe))
    3292        7706 :                 break;
    3293             : 
    3294       11144 :             npathkeys++;
    3295             :         }
    3296             : 
    3297             :         /*
    3298             :          * The whole query_pathkeys list matches, so append it directly, to
    3299             :          * allow comparing pathkeys easily by comparing list pointer. If we
    3300             :          * have to truncate the pathkeys, we gotta do a copy though.
    3301             :          */
    3302       14918 :         if (npathkeys == list_length(root->query_pathkeys))
    3303        7212 :             useful_pathkeys_list = lappend(useful_pathkeys_list,
    3304        7212 :                                            root->query_pathkeys);
    3305        7706 :         else if (npathkeys > 0)
    3306         474 :             useful_pathkeys_list = lappend(useful_pathkeys_list,
    3307         474 :                                            list_copy_head(root->query_pathkeys,
    3308             :                                                           npathkeys));
    3309             :     }
    3310             : 
    3311       24854 :     return useful_pathkeys_list;
    3312             : }
    3313             : 
    3314             : /*
    3315             :  * generate_useful_gather_paths
    3316             :  *      Generate parallel access paths for a relation by pushing a Gather or
    3317             :  *      Gather Merge on top of a partial path.
    3318             :  *
    3319             :  * Unlike plain generate_gather_paths, this looks both at pathkeys of input
    3320             :  * paths (aiming to preserve the ordering), but also considers ordering that
    3321             :  * might be useful for nodes above the gather merge node, and tries to add
    3322             :  * a sort (regular or incremental) to provide that.
    3323             :  */
    3324             : void
    3325      616466 : generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
    3326             : {
    3327             :     ListCell   *lc;
    3328             :     double      rows;
    3329      616466 :     double     *rowsp = NULL;
    3330      616466 :     List       *useful_pathkeys_list = NIL;
    3331      616466 :     Path       *cheapest_partial_path = NULL;
    3332             : 
    3333             :     /* If there are no partial paths, there's nothing to do here. */
    3334      616466 :     if (rel->partial_pathlist == NIL)
    3335      591612 :         return;
    3336             : 
    3337             :     /* Should we override the rel's rowcount estimate? */
    3338       24854 :     if (override_rows)
    3339        6118 :         rowsp = &rows;
    3340             : 
    3341             :     /* generate the regular gather (merge) paths */
    3342       24854 :     generate_gather_paths(root, rel, override_rows);
    3343             : 
    3344             :     /* consider incremental sort for interesting orderings */
    3345       24854 :     useful_pathkeys_list = get_useful_pathkeys_for_relation(root, rel, true);
    3346             : 
    3347             :     /* used for explicit (full) sort paths */
    3348       24854 :     cheapest_partial_path = linitial(rel->partial_pathlist);
    3349             : 
    3350             :     /*
    3351             :      * Consider sorted paths for each interesting ordering. We generate both
    3352             :      * incremental and full sort.
    3353             :      */
    3354       32540 :     foreach(lc, useful_pathkeys_list)
    3355             :     {
    3356        7686 :         List       *useful_pathkeys = lfirst(lc);
    3357             :         ListCell   *lc2;
    3358             :         bool        is_sorted;
    3359             :         int         presorted_keys;
    3360             : 
    3361       18194 :         foreach(lc2, rel->partial_pathlist)
    3362             :         {
    3363       10508 :             Path       *subpath = (Path *) lfirst(lc2);
    3364             :             GatherMergePath *path;
    3365             : 
    3366       10508 :             is_sorted = pathkeys_count_contained_in(useful_pathkeys,
    3367             :                                                     subpath->pathkeys,
    3368             :                                                     &presorted_keys);
    3369             : 
    3370             :             /*
    3371             :              * We don't need to consider the case where a subpath is already
    3372             :              * fully sorted because generate_gather_paths already creates a
    3373             :              * gather merge path for every subpath that has pathkeys present.
    3374             :              *
    3375             :              * But since the subpath is already sorted, we know we don't need
    3376             :              * to consider adding a sort (full or incremental) on top of it,
    3377             :              * so we can continue here.
    3378             :              */
    3379       10508 :             if (is_sorted)
    3380        2980 :                 continue;
    3381             : 
    3382             :             /*
    3383             :              * Try at least sorting the cheapest path and also try
    3384             :              * incrementally sorting any path which is partially sorted
    3385             :              * already (no need to deal with paths which have presorted keys
    3386             :              * when incremental sort is disabled unless it's the cheapest
    3387             :              * input path).
    3388             :              */
    3389        7528 :             if (subpath != cheapest_partial_path &&
    3390         378 :                 (presorted_keys == 0 || !enable_incremental_sort))
    3391         102 :                 continue;
    3392             : 
    3393             :             /*
    3394             :              * Consider regular sort for any path that's not presorted or if
    3395             :              * incremental sort is disabled.  We've no need to consider both
    3396             :              * sort and incremental sort on the same path.  We assume that
    3397             :              * incremental sort is always faster when there are presorted
    3398             :              * keys.
    3399             :              *
    3400             :              * This is not redundant with the gather paths created in
    3401             :              * generate_gather_paths, because that doesn't generate ordered
    3402             :              * output. Here we add an explicit sort to match the useful
    3403             :              * ordering.
    3404             :              */
    3405        7426 :             if (presorted_keys == 0 || !enable_incremental_sort)
    3406             :             {
    3407        7138 :                 subpath = (Path *) create_sort_path(root,
    3408             :                                                     rel,
    3409             :                                                     subpath,
    3410             :                                                     useful_pathkeys,
    3411             :                                                     -1.0);
    3412             :             }
    3413             :             else
    3414         288 :                 subpath = (Path *) create_incremental_sort_path(root,
    3415             :                                                                 rel,
    3416             :                                                                 subpath,
    3417             :                                                                 useful_pathkeys,
    3418             :                                                                 presorted_keys,
    3419             :                                                                 -1);
    3420        7426 :             rows = compute_gather_rows(subpath);
    3421        7426 :             path = create_gather_merge_path(root, rel,
    3422             :                                             subpath,
    3423        7426 :                                             rel->reltarget,
    3424             :                                             subpath->pathkeys,
    3425             :                                             NULL,
    3426             :                                             rowsp);
    3427             : 
    3428        7426 :             add_path(rel, &path->path);
    3429             :         }
    3430             :     }
    3431             : }
    3432             : 
    3433             : /*
    3434             :  * generate_grouped_paths
    3435             :  *      Generate paths for a grouped relation by adding sorted and hashed
    3436             :  *      partial aggregation paths on top of paths of the ungrouped relation.
    3437             :  *
    3438             :  * The information needed is provided by the RelAggInfo structure stored in
    3439             :  * "grouped_rel".
    3440             :  */
    3441             : void
    3442         898 : generate_grouped_paths(PlannerInfo *root, RelOptInfo *grouped_rel,
    3443             :                        RelOptInfo *rel)
    3444             : {
    3445         898 :     RelAggInfo *agg_info = grouped_rel->agg_info;
    3446             :     AggClauseCosts agg_costs;
    3447             :     bool        can_hash;
    3448             :     bool        can_sort;
    3449         898 :     Path       *cheapest_total_path = NULL;
    3450         898 :     Path       *cheapest_partial_path = NULL;
    3451         898 :     double      dNumGroups = 0;
    3452         898 :     double      dNumPartialGroups = 0;
    3453         898 :     List       *group_pathkeys = NIL;
    3454             : 
    3455         898 :     if (IS_DUMMY_REL(rel))
    3456             :     {
    3457           0 :         mark_dummy_rel(grouped_rel);
    3458           0 :         return;
    3459             :     }
    3460             : 
    3461             :     /*
    3462             :      * We push partial aggregation only to the lowest possible level in the
    3463             :      * join tree that is deemed useful.
    3464             :      */
    3465         898 :     if (!bms_equal(agg_info->apply_agg_at, rel->relids) ||
    3466         898 :         !agg_info->agg_useful)
    3467           0 :         return;
    3468             : 
    3469        5388 :     MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
    3470         898 :     get_agg_clause_costs(root, AGGSPLIT_INITIAL_SERIAL, &agg_costs);
    3471             : 
    3472             :     /*
    3473             :      * Determine whether it's possible to perform sort-based implementations
    3474             :      * of grouping, and generate the pathkeys that represent the grouping
    3475             :      * requirements in that case.
    3476             :      */
    3477         898 :     can_sort = grouping_is_sortable(agg_info->group_clauses);
    3478         898 :     if (can_sort)
    3479             :     {
    3480             :         RelOptInfo *top_grouped_rel;
    3481             :         List       *top_group_tlist;
    3482             : 
    3483         502 :         top_grouped_rel = IS_OTHER_REL(rel) ?
    3484        1400 :             rel->top_parent->grouped_rel : grouped_rel;
    3485             :         top_group_tlist =
    3486         898 :             make_tlist_from_pathtarget(top_grouped_rel->agg_info->target);
    3487             : 
    3488             :         group_pathkeys =
    3489         898 :             make_pathkeys_for_sortclauses(root, agg_info->group_clauses,
    3490             :                                           top_group_tlist);
    3491             :     }
    3492             : 
    3493             :     /*
    3494             :      * Determine whether we should consider hash-based implementations of
    3495             :      * grouping.
    3496             :      */
    3497             :     Assert(root->numOrderedAggs == 0);
    3498        1796 :     can_hash = (agg_info->group_clauses != NIL &&
    3499         898 :                 grouping_is_hashable(agg_info->group_clauses));
    3500             : 
    3501             :     /*
    3502             :      * Consider whether we should generate partially aggregated non-partial
    3503             :      * paths.  We can only do this if we have a non-partial path.
    3504             :      */
    3505         898 :     if (rel->pathlist != NIL)
    3506             :     {
    3507         898 :         cheapest_total_path = rel->cheapest_total_path;
    3508             :         Assert(cheapest_total_path != NULL);
    3509             :     }
    3510             : 
    3511             :     /*
    3512             :      * If parallelism is possible for grouped_rel, then we should consider
    3513             :      * generating partially-grouped partial paths.  However, if the ungrouped
    3514             :      * rel has no partial paths, then we can't.
    3515             :      */
    3516         898 :     if (grouped_rel->consider_parallel && rel->partial_pathlist != NIL)
    3517             :     {
    3518         732 :         cheapest_partial_path = linitial(rel->partial_pathlist);
    3519             :         Assert(cheapest_partial_path != NULL);
    3520             :     }
    3521             : 
    3522             :     /* Estimate number of partial groups. */
    3523         898 :     if (cheapest_total_path != NULL)
    3524         898 :         dNumGroups = estimate_num_groups(root,
    3525             :                                          agg_info->group_exprs,
    3526             :                                          cheapest_total_path->rows,
    3527             :                                          NULL, NULL);
    3528         898 :     if (cheapest_partial_path != NULL)
    3529         732 :         dNumPartialGroups = estimate_num_groups(root,
    3530             :                                                 agg_info->group_exprs,
    3531             :                                                 cheapest_partial_path->rows,
    3532             :                                                 NULL, NULL);
    3533             : 
    3534         898 :     if (can_sort && cheapest_total_path != NULL)
    3535             :     {
    3536             :         ListCell   *lc;
    3537             : 
    3538             :         /*
    3539             :          * Use any available suitably-sorted path as input, and also consider
    3540             :          * sorting the cheapest-total path and incremental sort on any paths
    3541             :          * with presorted keys.
    3542             :          *
    3543             :          * To save planning time, we ignore parameterized input paths unless
    3544             :          * they are the cheapest-total path.
    3545             :          */
    3546        2174 :         foreach(lc, rel->pathlist)
    3547             :         {
    3548        1276 :             Path       *input_path = (Path *) lfirst(lc);
    3549             :             Path       *path;
    3550             :             bool        is_sorted;
    3551             :             int         presorted_keys;
    3552             : 
    3553             :             /*
    3554             :              * Ignore parameterized paths that are not the cheapest-total
    3555             :              * path.
    3556             :              */
    3557        1276 :             if (input_path->param_info &&
    3558             :                 input_path != cheapest_total_path)
    3559          30 :                 continue;
    3560             : 
    3561        1270 :             is_sorted = pathkeys_count_contained_in(group_pathkeys,
    3562             :                                                     input_path->pathkeys,
    3563             :                                                     &presorted_keys);
    3564             : 
    3565             :             /*
    3566             :              * Ignore paths that are not suitably or partially sorted, unless
    3567             :              * they are the cheapest total path (no need to deal with paths
    3568             :              * which have presorted keys when incremental sort is disabled).
    3569             :              */
    3570        1270 :             if (!is_sorted && input_path != cheapest_total_path &&
    3571         168 :                 (presorted_keys == 0 || !enable_incremental_sort))
    3572          24 :                 continue;
    3573             : 
    3574             :             /*
    3575             :              * Since the path originates from a non-grouped relation that is
    3576             :              * not aware of eager aggregation, we must ensure that it provides
    3577             :              * the correct input for partial aggregation.
    3578             :              */
    3579        1246 :             path = (Path *) create_projection_path(root,
    3580             :                                                    grouped_rel,
    3581             :                                                    input_path,
    3582        1246 :                                                    agg_info->agg_input);
    3583             : 
    3584        1246 :             if (!is_sorted)
    3585             :             {
    3586             :                 /*
    3587             :                  * We've no need to consider both a sort and incremental sort.
    3588             :                  * We'll just do a sort if there are no presorted keys and an
    3589             :                  * incremental sort when there are presorted keys.
    3590             :                  */
    3591        1036 :                 if (presorted_keys == 0 || !enable_incremental_sort)
    3592         892 :                     path = (Path *) create_sort_path(root,
    3593             :                                                      grouped_rel,
    3594             :                                                      path,
    3595             :                                                      group_pathkeys,
    3596             :                                                      -1.0);
    3597             :                 else
    3598         144 :                     path = (Path *) create_incremental_sort_path(root,
    3599             :                                                                  grouped_rel,
    3600             :                                                                  path,
    3601             :                                                                  group_pathkeys,
    3602             :                                                                  presorted_keys,
    3603             :                                                                  -1.0);
    3604             :             }
    3605             : 
    3606             :             /*
    3607             :              * qual is NIL because the HAVING clause cannot be evaluated until
    3608             :              * the final value of the aggregate is known.
    3609             :              */
    3610        1246 :             path = (Path *) create_agg_path(root,
    3611             :                                             grouped_rel,
    3612             :                                             path,
    3613        1246 :                                             agg_info->target,
    3614             :                                             AGG_SORTED,
    3615             :                                             AGGSPLIT_INITIAL_SERIAL,
    3616             :                                             agg_info->group_clauses,
    3617             :                                             NIL,
    3618             :                                             &agg_costs,
    3619             :                                             dNumGroups);
    3620             : 
    3621        1246 :             add_path(grouped_rel, path);
    3622             :         }
    3623             :     }
    3624             : 
    3625         898 :     if (can_sort && cheapest_partial_path != NULL)
    3626             :     {
    3627             :         ListCell   *lc;
    3628             : 
    3629             :         /* Similar to above logic, but for partial paths. */
    3630        1704 :         foreach(lc, rel->partial_pathlist)
    3631             :         {
    3632         972 :             Path       *input_path = (Path *) lfirst(lc);
    3633             :             Path       *path;
    3634             :             bool        is_sorted;
    3635             :             int         presorted_keys;
    3636             : 
    3637         972 :             is_sorted = pathkeys_count_contained_in(group_pathkeys,
    3638             :                                                     input_path->pathkeys,
    3639             :                                                     &presorted_keys);
    3640             : 
    3641             :             /*
    3642             :              * Ignore paths that are not suitably or partially sorted, unless
    3643             :              * they are the cheapest partial path (no need to deal with paths
    3644             :              * which have presorted keys when incremental sort is disabled).
    3645             :              */
    3646         972 :             if (!is_sorted && input_path != cheapest_partial_path &&
    3647          96 :                 (presorted_keys == 0 || !enable_incremental_sort))
    3648           0 :                 continue;
    3649             : 
    3650             :             /*
    3651             :              * Since the path originates from a non-grouped relation that is
    3652             :              * not aware of eager aggregation, we must ensure that it provides
    3653             :              * the correct input for partial aggregation.
    3654             :              */
    3655         972 :             path = (Path *) create_projection_path(root,
    3656             :                                                    grouped_rel,
    3657             :                                                    input_path,
    3658         972 :                                                    agg_info->agg_input);
    3659             : 
    3660         972 :             if (!is_sorted)
    3661             :             {
    3662             :                 /*
    3663             :                  * We've no need to consider both a sort and incremental sort.
    3664             :                  * We'll just do a sort if there are no presorted keys and an
    3665             :                  * incremental sort when there are presorted keys.
    3666             :                  */
    3667         828 :                 if (presorted_keys == 0 || !enable_incremental_sort)
    3668         732 :                     path = (Path *) create_sort_path(root,
    3669             :                                                      grouped_rel,
    3670             :                                                      path,
    3671             :                                                      group_pathkeys,
    3672             :                                                      -1.0);
    3673             :                 else
    3674          96 :                     path = (Path *) create_incremental_sort_path(root,
    3675             :                                                                  grouped_rel,
    3676             :                                                                  path,
    3677             :                                                                  group_pathkeys,
    3678             :                                                                  presorted_keys,
    3679             :                                                                  -1.0);
    3680             :             }
    3681             : 
    3682             :             /*
    3683             :              * qual is NIL because the HAVING clause cannot be evaluated until
    3684             :              * the final value of the aggregate is known.
    3685             :              */
    3686         972 :             path = (Path *) create_agg_path(root,
    3687             :                                             grouped_rel,
    3688             :                                             path,
    3689         972 :                                             agg_info->target,
    3690             :                                             AGG_SORTED,
    3691             :                                             AGGSPLIT_INITIAL_SERIAL,
    3692             :                                             agg_info->group_clauses,
    3693             :                                             NIL,
    3694             :                                             &agg_costs,
    3695             :                                             dNumPartialGroups);
    3696             : 
    3697         972 :             add_partial_path(grouped_rel, path);
    3698             :         }
    3699             :     }
    3700             : 
    3701             :     /*
    3702             :      * Add a partially-grouped HashAgg Path where possible
    3703             :      */
    3704         898 :     if (can_hash && cheapest_total_path != NULL)
    3705             :     {
    3706             :         Path       *path;
    3707             : 
    3708             :         /*
    3709             :          * Since the path originates from a non-grouped relation that is not
    3710             :          * aware of eager aggregation, we must ensure that it provides the
    3711             :          * correct input for partial aggregation.
    3712             :          */
    3713         898 :         path = (Path *) create_projection_path(root,
    3714             :                                                grouped_rel,
    3715             :                                                cheapest_total_path,
    3716         898 :                                                agg_info->agg_input);
    3717             : 
    3718             :         /*
    3719             :          * qual is NIL because the HAVING clause cannot be evaluated until the
    3720             :          * final value of the aggregate is known.
    3721             :          */
    3722         898 :         path = (Path *) create_agg_path(root,
    3723             :                                         grouped_rel,
    3724             :                                         path,
    3725         898 :                                         agg_info->target,
    3726             :                                         AGG_HASHED,
    3727             :                                         AGGSPLIT_INITIAL_SERIAL,
    3728             :                                         agg_info->group_clauses,
    3729             :                                         NIL,
    3730             :                                         &agg_costs,
    3731             :                                         dNumGroups);
    3732             : 
    3733         898 :         add_path(grouped_rel, path);
    3734             :     }
    3735             : 
    3736             :     /*
    3737             :      * Now add a partially-grouped HashAgg partial Path where possible
    3738             :      */
    3739         898 :     if (can_hash && cheapest_partial_path != NULL)
    3740             :     {
    3741             :         Path       *path;
    3742             : 
    3743             :         /*
    3744             :          * Since the path originates from a non-grouped relation that is not
    3745             :          * aware of eager aggregation, we must ensure that it provides the
    3746             :          * correct input for partial aggregation.
    3747             :          */
    3748         732 :         path = (Path *) create_projection_path(root,
    3749             :                                                grouped_rel,
    3750             :                                                cheapest_partial_path,
    3751         732 :                                                agg_info->agg_input);
    3752             : 
    3753             :         /*
    3754             :          * qual is NIL because the HAVING clause cannot be evaluated until the
    3755             :          * final value of the aggregate is known.
    3756             :          */
    3757         732 :         path = (Path *) create_agg_path(root,
    3758             :                                         grouped_rel,
    3759             :                                         path,
    3760         732 :                                         agg_info->target,
    3761             :                                         AGG_HASHED,
    3762             :                                         AGGSPLIT_INITIAL_SERIAL,
    3763             :                                         agg_info->group_clauses,
    3764             :                                         NIL,
    3765             :                                         &agg_costs,
    3766             :                                         dNumPartialGroups);
    3767             : 
    3768         732 :         add_partial_path(grouped_rel, path);
    3769             :     }
    3770             : }
    3771             : 
    3772             : /*
    3773             :  * make_rel_from_joinlist
    3774             :  *    Build access paths using a "joinlist" to guide the join path search.
    3775             :  *
    3776             :  * See comments for deconstruct_jointree() for definition of the joinlist
    3777             :  * data structure.
    3778             :  */
    3779             : static RelOptInfo *
    3780      330018 : make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
    3781             : {
    3782             :     int         levels_needed;
    3783             :     List       *initial_rels;
    3784             :     ListCell   *jl;
    3785             : 
    3786             :     /*
    3787             :      * Count the number of child joinlist nodes.  This is the depth of the
    3788             :      * dynamic-programming algorithm we must employ to consider all ways of
    3789             :      * joining the child nodes.
    3790             :      */
    3791      330018 :     levels_needed = list_length(joinlist);
    3792             : 
    3793      330018 :     if (levels_needed <= 0)
    3794           0 :         return NULL;            /* nothing to do? */
    3795             : 
    3796             :     /*
    3797             :      * Construct a list of rels corresponding to the child joinlist nodes.
    3798             :      * This may contain both base rels and rels constructed according to
    3799             :      * sub-joinlists.
    3800             :      */
    3801      330018 :     initial_rels = NIL;
    3802      798594 :     foreach(jl, joinlist)
    3803             :     {
    3804      468576 :         Node       *jlnode = (Node *) lfirst(jl);
    3805             :         RelOptInfo *thisrel;
    3806             : 
    3807      468576 :         if (IsA(jlnode, RangeTblRef))
    3808             :         {
    3809      465180 :             int         varno = ((RangeTblRef *) jlnode)->rtindex;
    3810             : 
    3811      465180 :             thisrel = find_base_rel(root, varno);
    3812             :         }
    3813        3396 :         else if (IsA(jlnode, List))
    3814             :         {
    3815             :             /* Recurse to handle subproblem */
    3816        3396 :             thisrel = make_rel_from_joinlist(root, (List *) jlnode);
    3817             :         }
    3818             :         else
    3819             :         {
    3820           0 :             elog(ERROR, "unrecognized joinlist node type: %d",
    3821             :                  (int) nodeTag(jlnode));
    3822             :             thisrel = NULL;     /* keep compiler quiet */
    3823             :         }
    3824             : 
    3825      468576 :         initial_rels = lappend(initial_rels, thisrel);
    3826             :     }
    3827             : 
    3828      330018 :     if (levels_needed == 1)
    3829             :     {
    3830             :         /*
    3831             :          * Single joinlist node, so we're done.
    3832             :          */
    3833      229030 :         return (RelOptInfo *) linitial(initial_rels);
    3834             :     }
    3835             :     else
    3836             :     {
    3837             :         /*
    3838             :          * Consider the different orders in which we could join the rels,
    3839             :          * using a plugin, GEQO, or the regular join search code.
    3840             :          *
    3841             :          * We put the initial_rels list into a PlannerInfo field because
    3842             :          * has_legal_joinclause() needs to look at it (ugly :-().
    3843             :          */
    3844      100988 :         root->initial_rels = initial_rels;
    3845             : 
    3846      100988 :         if (join_search_hook)
    3847           0 :             return (*join_search_hook) (root, levels_needed, initial_rels);
    3848      100988 :         else if (enable_geqo && levels_needed >= geqo_threshold)
    3849          42 :             return geqo(root, levels_needed, initial_rels);
    3850             :         else
    3851      100946 :             return standard_join_search(root, levels_needed, initial_rels);
    3852             :     }
    3853             : }
    3854             : 
    3855             : /*
    3856             :  * standard_join_search
    3857             :  *    Find possible joinpaths for a query by successively finding ways
    3858             :  *    to join component relations into join relations.
    3859             :  *
    3860             :  * 'levels_needed' is the number of iterations needed, ie, the number of
    3861             :  *      independent jointree items in the query.  This is > 1.
    3862             :  *
    3863             :  * 'initial_rels' is a list of RelOptInfo nodes for each independent
    3864             :  *      jointree item.  These are the components to be joined together.
    3865             :  *      Note that levels_needed == list_length(initial_rels).
    3866             :  *
    3867             :  * Returns the final level of join relations, i.e., the relation that is
    3868             :  * the result of joining all the original relations together.
    3869             :  * At least one implementation path must be provided for this relation and
    3870             :  * all required sub-relations.
    3871             :  *
    3872             :  * To support loadable plugins that modify planner behavior by changing the
    3873             :  * join searching algorithm, we provide a hook variable that lets a plugin
    3874             :  * replace or supplement this function.  Any such hook must return the same
    3875             :  * final join relation as the standard code would, but it might have a
    3876             :  * different set of implementation paths attached, and only the sub-joinrels
    3877             :  * needed for these paths need have been instantiated.
    3878             :  *
    3879             :  * Note to plugin authors: the functions invoked during standard_join_search()
    3880             :  * modify root->join_rel_list and root->join_rel_hash.  If you want to do more
    3881             :  * than one join-order search, you'll probably need to save and restore the
    3882             :  * original states of those data structures.  See geqo_eval() for an example.
    3883             :  */
    3884             : RelOptInfo *
    3885      100946 : standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
    3886             : {
    3887             :     int         lev;
    3888             :     RelOptInfo *rel;
    3889             : 
    3890             :     /*
    3891             :      * This function cannot be invoked recursively within any one planning
    3892             :      * problem, so join_rel_level[] can't be in use already.
    3893             :      */
    3894             :     Assert(root->join_rel_level == NULL);
    3895             : 
    3896             :     /*
    3897             :      * We employ a simple "dynamic programming" algorithm: we first find all
    3898             :      * ways to build joins of two jointree items, then all ways to build joins
    3899             :      * of three items (from two-item joins and single items), then four-item
    3900             :      * joins, and so on until we have considered all ways to join all the
    3901             :      * items into one rel.
    3902             :      *
    3903             :      * root->join_rel_level[j] is a list of all the j-item rels.  Initially we
    3904             :      * set root->join_rel_level[1] to represent all the single-jointree-item
    3905             :      * relations.
    3906             :      */
    3907      100946 :     root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
    3908             : 
    3909      100946 :     root->join_rel_level[1] = initial_rels;
    3910             : 
    3911      239444 :     for (lev = 2; lev <= levels_needed; lev++)
    3912             :     {
    3913             :         ListCell   *lc;
    3914             : 
    3915             :         /*
    3916             :          * Determine all possible pairs of relations to be joined at this
    3917             :          * level, and build paths for making each one from every available
    3918             :          * pair of lower-level relations.
    3919             :          */
    3920      138498 :         join_search_one_level(root, lev);
    3921             : 
    3922             :         /*
    3923             :          * Run generate_partitionwise_join_paths() and
    3924             :          * generate_useful_gather_paths() for each just-processed joinrel.  We
    3925             :          * could not do this earlier because both regular and partial paths
    3926             :          * can get added to a particular joinrel at multiple times within
    3927             :          * join_search_one_level.
    3928             :          *
    3929             :          * After that, we're done creating paths for the joinrel, so run
    3930             :          * set_cheapest().
    3931             :          *
    3932             :          * In addition, we also run generate_grouped_paths() for the grouped
    3933             :          * relation of each just-processed joinrel, and run set_cheapest() for
    3934             :          * the grouped relation afterwards.
    3935             :          */
    3936      350530 :         foreach(lc, root->join_rel_level[lev])
    3937             :         {
    3938             :             bool        is_top_rel;
    3939             : 
    3940      212032 :             rel = (RelOptInfo *) lfirst(lc);
    3941             : 
    3942      212032 :             is_top_rel = bms_equal(rel->relids, root->all_query_rels);
    3943             : 
    3944             :             /* Create paths for partitionwise joins. */
    3945      212032 :             generate_partitionwise_join_paths(root, rel);
    3946             : 
    3947             :             /*
    3948             :              * Except for the topmost scan/join rel, consider gathering
    3949             :              * partial paths.  We'll do the same for the topmost scan/join rel
    3950             :              * once we know the final targetlist (see grouping_planner's and
    3951             :              * its call to apply_scanjoin_target_to_paths).
    3952             :              */
    3953      212032 :             if (!is_top_rel)
    3954      111584 :                 generate_useful_gather_paths(root, rel, false);
    3955             : 
    3956             :             /* Find and save the cheapest paths for this rel */
    3957      212032 :             set_cheapest(rel);
    3958             : 
    3959             :             /*
    3960             :              * Except for the topmost scan/join rel, consider generating
    3961             :              * partial aggregation paths for the grouped relation on top of
    3962             :              * the paths of this rel.  After that, we're done creating paths
    3963             :              * for the grouped relation, so run set_cheapest().
    3964             :              */
    3965      212032 :             if (rel->grouped_rel != NULL && !is_top_rel)
    3966             :             {
    3967          72 :                 RelOptInfo *grouped_rel = rel->grouped_rel;
    3968             : 
    3969             :                 Assert(IS_GROUPED_REL(grouped_rel));
    3970             : 
    3971          72 :                 generate_grouped_paths(root, grouped_rel, rel);
    3972          72 :                 set_cheapest(grouped_rel);
    3973             :             }
    3974             : 
    3975             : #ifdef OPTIMIZER_DEBUG
    3976             :             pprint(rel);
    3977             : #endif
    3978             :         }
    3979             :     }
    3980             : 
    3981             :     /*
    3982             :      * We should have a single rel at the final level.
    3983             :      */
    3984      100946 :     if (root->join_rel_level[levels_needed] == NIL)
    3985           0 :         elog(ERROR, "failed to build any %d-way joins", levels_needed);
    3986             :     Assert(list_length(root->join_rel_level[levels_needed]) == 1);
    3987             : 
    3988      100946 :     rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
    3989             : 
    3990      100946 :     root->join_rel_level = NULL;
    3991             : 
    3992      100946 :     return rel;
    3993             : }
    3994             : 
    3995             : /*****************************************************************************
    3996             :  *          PUSHING QUALS DOWN INTO SUBQUERIES
    3997             :  *****************************************************************************/
    3998             : 
    3999             : /*
    4000             :  * subquery_is_pushdown_safe - is a subquery safe for pushing down quals?
    4001             :  *
    4002             :  * subquery is the particular component query being checked.  topquery
    4003             :  * is the top component of a set-operations tree (the same Query if no
    4004             :  * set-op is involved).
    4005             :  *
    4006             :  * Conditions checked here:
    4007             :  *
    4008             :  * 1. If the subquery has a LIMIT clause, we must not push down any quals,
    4009             :  * since that could change the set of rows returned.
    4010             :  *
    4011             :  * 2. If the subquery contains EXCEPT or EXCEPT ALL set ops we cannot push
    4012             :  * quals into it, because that could change the results.
    4013             :  *
    4014             :  * 3. If the subquery uses DISTINCT, we cannot push volatile quals into it.
    4015             :  * This is because upper-level quals should semantically be evaluated only
    4016             :  * once per distinct row, not once per original row, and if the qual is
    4017             :  * volatile then extra evaluations could change the results.  (This issue
    4018             :  * does not apply to other forms of aggregation such as GROUP BY, because
    4019             :  * when those are present we push into HAVING not WHERE, so that the quals
    4020             :  * are still applied after aggregation.)
    4021             :  *
    4022             :  * 4. If the subquery contains window functions, we cannot push volatile quals
    4023             :  * into it.  The issue here is a bit different from DISTINCT: a volatile qual
    4024             :  * might succeed for some rows of a window partition and fail for others,
    4025             :  * thereby changing the partition contents and thus the window functions'
    4026             :  * results for rows that remain.
    4027             :  *
    4028             :  * 5. If the subquery contains any set-returning functions in its targetlist,
    4029             :  * we cannot push volatile quals into it.  That would push them below the SRFs
    4030             :  * and thereby change the number of times they are evaluated.  Also, a
    4031             :  * volatile qual could succeed for some SRF output rows and fail for others,
    4032             :  * a behavior that cannot occur if it's evaluated before SRF expansion.
    4033             :  *
    4034             :  * 6. If the subquery has nonempty grouping sets, we cannot push down any
    4035             :  * quals.  The concern here is that a qual referencing a "constant" grouping
    4036             :  * column could get constant-folded, which would be improper because the value
    4037             :  * is potentially nullable by grouping-set expansion.  This restriction could
    4038             :  * be removed if we had a parsetree representation that shows that such
    4039             :  * grouping columns are not really constant.  (There are other ideas that
    4040             :  * could be used to relax this restriction, but that's the approach most
    4041             :  * likely to get taken in the future.  Note that there's not much to be gained
    4042             :  * so long as subquery_planner can't move HAVING clauses to WHERE within such
    4043             :  * a subquery.)
    4044             :  *
    4045             :  * In addition, we make several checks on the subquery's output columns to see
    4046             :  * if it is safe to reference them in pushed-down quals.  If output column k
    4047             :  * is found to be unsafe to reference, we set the reason for that inside
    4048             :  * safetyInfo->unsafeFlags[k], but we don't reject the subquery overall since
    4049             :  * column k might not be referenced by some/all quals.  The unsafeFlags[]
    4050             :  * array will be consulted later by qual_is_pushdown_safe().  It's better to
    4051             :  * do it this way than to make the checks directly in qual_is_pushdown_safe(),
    4052             :  * because when the subquery involves set operations we have to check the
    4053             :  * output expressions in each arm of the set op.
    4054             :  *
    4055             :  * Note: pushing quals into a DISTINCT subquery is theoretically dubious:
    4056             :  * we're effectively assuming that the quals cannot distinguish values that
    4057             :  * the DISTINCT's equality operator sees as equal, yet there are many
    4058             :  * counterexamples to that assumption.  However use of such a qual with a
    4059             :  * DISTINCT subquery would be unsafe anyway, since there's no guarantee which
    4060             :  * "equal" value will be chosen as the output value by the DISTINCT operation.
    4061             :  * So we don't worry too much about that.  Another objection is that if the
    4062             :  * qual is expensive to evaluate, running it for each original row might cost
    4063             :  * more than we save by eliminating rows before the DISTINCT step.  But it
    4064             :  * would be very hard to estimate that at this stage, and in practice pushdown
    4065             :  * seldom seems to make things worse, so we ignore that problem too.
    4066             :  *
    4067             :  * Note: likewise, pushing quals into a subquery with window functions is a
    4068             :  * bit dubious: the quals might remove some rows of a window partition while
    4069             :  * leaving others, causing changes in the window functions' results for the
    4070             :  * surviving rows.  We insist that such a qual reference only partitioning
    4071             :  * columns, but again that only protects us if the qual does not distinguish
    4072             :  * values that the partitioning equality operator sees as equal.  The risks
    4073             :  * here are perhaps larger than for DISTINCT, since no de-duplication of rows
    4074             :  * occurs and thus there is no theoretical problem with such a qual.  But
    4075             :  * we'll do this anyway because the potential performance benefits are very
    4076             :  * large, and we've seen no field complaints about the longstanding comparable
    4077             :  * behavior with DISTINCT.
    4078             :  */
    4079             : static bool
    4080        2272 : subquery_is_pushdown_safe(Query *subquery, Query *topquery,
    4081             :                           pushdown_safety_info *safetyInfo)
    4082             : {
    4083             :     SetOperationStmt *topop;
    4084             : 
    4085             :     /* Check point 1 */
    4086        2272 :     if (subquery->limitOffset != NULL || subquery->limitCount != NULL)
    4087         134 :         return false;
    4088             : 
    4089             :     /* Check point 6 */
    4090        2138 :     if (subquery->groupClause && subquery->groupingSets)
    4091          12 :         return false;
    4092             : 
    4093             :     /* Check points 3, 4, and 5 */
    4094        2126 :     if (subquery->distinctClause ||
    4095        2042 :         subquery->hasWindowFuncs ||
    4096        1776 :         subquery->hasTargetSRFs)
    4097         546 :         safetyInfo->unsafeVolatile = true;
    4098             : 
    4099             :     /*
    4100             :      * If we're at a leaf query, check for unsafe expressions in its target
    4101             :      * list, and mark any reasons why they're unsafe in unsafeFlags[].
    4102             :      * (Non-leaf nodes in setop trees have only simple Vars in their tlists,
    4103             :      * so no need to check them.)
    4104             :      */
    4105        2126 :     if (subquery->setOperations == NULL)
    4106        1970 :         check_output_expressions(subquery, safetyInfo);
    4107             : 
    4108             :     /* Are we at top level, or looking at a setop component? */
    4109        2126 :     if (subquery == topquery)
    4110             :     {
    4111             :         /* Top level, so check any component queries */
    4112        1814 :         if (subquery->setOperations != NULL)
    4113         156 :             if (!recurse_pushdown_safe(subquery->setOperations, topquery,
    4114             :                                        safetyInfo))
    4115           0 :                 return false;
    4116             :     }
    4117             :     else
    4118             :     {
    4119             :         /* Setop component must not have more components (too weird) */
    4120         312 :         if (subquery->setOperations != NULL)
    4121           0 :             return false;
    4122             :         /* Check whether setop component output types match top level */
    4123         312 :         topop = castNode(SetOperationStmt, topquery->setOperations);
    4124             :         Assert(topop);
    4125         312 :         compare_tlist_datatypes(subquery->targetList,
    4126             :                                 topop->colTypes,
    4127             :                                 safetyInfo);
    4128             :     }
    4129        2126 :     return true;
    4130             : }
    4131             : 
    4132             : /*
    4133             :  * Helper routine to recurse through setOperations tree
    4134             :  */
    4135             : static bool
    4136         468 : recurse_pushdown_safe(Node *setOp, Query *topquery,
    4137             :                       pushdown_safety_info *safetyInfo)
    4138             : {
    4139         468 :     if (IsA(setOp, RangeTblRef))
    4140             :     {
    4141         312 :         RangeTblRef *rtr = (RangeTblRef *) setOp;
    4142         312 :         RangeTblEntry *rte = rt_fetch(rtr->rtindex, topquery->rtable);
    4143         312 :         Query      *subquery = rte->subquery;
    4144             : 
    4145             :         Assert(subquery != NULL);
    4146         312 :         return subquery_is_pushdown_safe(subquery, topquery, safetyInfo);
    4147             :     }
    4148         156 :     else if (IsA(setOp, SetOperationStmt))
    4149             :     {
    4150         156 :         SetOperationStmt *op = (SetOperationStmt *) setOp;
    4151             : 
    4152             :         /* EXCEPT is no good (point 2 for subquery_is_pushdown_safe) */
    4153         156 :         if (op->op == SETOP_EXCEPT)
    4154           0 :             return false;
    4155             :         /* Else recurse */
    4156         156 :         if (!recurse_pushdown_safe(op->larg, topquery, safetyInfo))
    4157           0 :             return false;
    4158         156 :         if (!recurse_pushdown_safe(op->rarg, topquery, safetyInfo))
    4159           0 :             return false;
    4160             :     }
    4161             :     else
    4162             :     {
    4163           0 :         elog(ERROR, "unrecognized node type: %d",
    4164             :              (int) nodeTag(setOp));
    4165             :     }
    4166         156 :     return true;
    4167             : }
    4168             : 
    4169             : /*
    4170             :  * check_output_expressions - check subquery's output expressions for safety
    4171             :  *
    4172             :  * There are several cases in which it's unsafe to push down an upper-level
    4173             :  * qual if it references a particular output column of a subquery.  We check
    4174             :  * each output column of the subquery and set flags in unsafeFlags[k] when we
    4175             :  * see that column is unsafe for a pushed-down qual to reference.  The
    4176             :  * conditions checked here are:
    4177             :  *
    4178             :  * 1. We must not push down any quals that refer to subselect outputs that
    4179             :  * return sets, else we'd introduce functions-returning-sets into the
    4180             :  * subquery's WHERE/HAVING quals.
    4181             :  *
    4182             :  * 2. We must not push down any quals that refer to subselect outputs that
    4183             :  * contain volatile functions, for fear of introducing strange results due
    4184             :  * to multiple evaluation of a volatile function.
    4185             :  *
    4186             :  * 3. If the subquery uses DISTINCT ON, we must not push down any quals that
    4187             :  * refer to non-DISTINCT output columns, because that could change the set
    4188             :  * of rows returned.  (This condition is vacuous for DISTINCT, because then
    4189             :  * there are no non-DISTINCT output columns, so we needn't check.  Note that
    4190             :  * subquery_is_pushdown_safe already reported that we can't use volatile
    4191             :  * quals if there's DISTINCT or DISTINCT ON.)
    4192             :  *
    4193             :  * 4. If the subquery has any window functions, we must not push down quals
    4194             :  * that reference any output columns that are not listed in all the subquery's
    4195             :  * window PARTITION BY clauses.  We can push down quals that use only
    4196             :  * partitioning columns because they should succeed or fail identically for
    4197             :  * every row of any one window partition, and totally excluding some
    4198             :  * partitions will not change a window function's results for remaining
    4199             :  * partitions.  (Again, this also requires nonvolatile quals, but
    4200             :  * subquery_is_pushdown_safe handles that.).  Subquery columns marked as
    4201             :  * unsafe for this reason can still have WindowClause run conditions pushed
    4202             :  * down.
    4203             :  */
    4204             : static void
    4205        1970 : check_output_expressions(Query *subquery, pushdown_safety_info *safetyInfo)
    4206             : {
    4207             :     ListCell   *lc;
    4208             : 
    4209       21106 :     foreach(lc, subquery->targetList)
    4210             :     {
    4211       19136 :         TargetEntry *tle = (TargetEntry *) lfirst(lc);
    4212             : 
    4213       19136 :         if (tle->resjunk)
    4214         134 :             continue;           /* ignore resjunk columns */
    4215             : 
    4216             :         /* Functions returning sets are unsafe (point 1) */
    4217       19002 :         if (subquery->hasTargetSRFs &&
    4218         668 :             (safetyInfo->unsafeFlags[tle->resno] &
    4219         668 :              UNSAFE_HAS_SET_FUNC) == 0 &&
    4220         668 :             expression_returns_set((Node *) tle->expr))
    4221             :         {
    4222         376 :             safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_HAS_SET_FUNC;
    4223         376 :             continue;
    4224             :         }
    4225             : 
    4226             :         /* Volatile functions are unsafe (point 2) */
    4227       18626 :         if ((safetyInfo->unsafeFlags[tle->resno] &
    4228       18614 :              UNSAFE_HAS_VOLATILE_FUNC) == 0 &&
    4229       18614 :             contain_volatile_functions((Node *) tle->expr))
    4230             :         {
    4231          78 :             safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_HAS_VOLATILE_FUNC;
    4232          78 :             continue;
    4233             :         }
    4234             : 
    4235             :         /* If subquery uses DISTINCT ON, check point 3 */
    4236       18548 :         if (subquery->hasDistinctOn &&
    4237           0 :             (safetyInfo->unsafeFlags[tle->resno] &
    4238           0 :              UNSAFE_NOTIN_DISTINCTON_CLAUSE) == 0 &&
    4239           0 :             !targetIsInSortList(tle, InvalidOid, subquery->distinctClause))
    4240             :         {
    4241             :             /* non-DISTINCT column, so mark it unsafe */
    4242           0 :             safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_NOTIN_DISTINCTON_CLAUSE;
    4243           0 :             continue;
    4244             :         }
    4245             : 
    4246             :         /* If subquery uses window functions, check point 4 */
    4247       18548 :         if (subquery->hasWindowFuncs &&
    4248        1090 :             (safetyInfo->unsafeFlags[tle->resno] &
    4249        2092 :              UNSAFE_NOTIN_DISTINCTON_CLAUSE) == 0 &&
    4250        1090 :             !targetIsInAllPartitionLists(tle, subquery))
    4251             :         {
    4252             :             /* not present in all PARTITION BY clauses, so mark it unsafe */
    4253        1002 :             safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_NOTIN_PARTITIONBY_CLAUSE;
    4254        1002 :             continue;
    4255             :         }
    4256             :     }
    4257        1970 : }
    4258             : 
    4259             : /*
    4260             :  * For subqueries using UNION/UNION ALL/INTERSECT/INTERSECT ALL, we can
    4261             :  * push quals into each component query, but the quals can only reference
    4262             :  * subquery columns that suffer no type coercions in the set operation.
    4263             :  * Otherwise there are possible semantic gotchas.  So, we check the
    4264             :  * component queries to see if any of them have output types different from
    4265             :  * the top-level setop outputs.  We set the UNSAFE_TYPE_MISMATCH bit in
    4266             :  * unsafeFlags[k] if column k has different type in any component.
    4267             :  *
    4268             :  * We don't have to care about typmods here: the only allowed difference
    4269             :  * between set-op input and output typmods is input is a specific typmod
    4270             :  * and output is -1, and that does not require a coercion.
    4271             :  *
    4272             :  * tlist is a subquery tlist.
    4273             :  * colTypes is an OID list of the top-level setop's output column types.
    4274             :  * safetyInfo is the pushdown_safety_info to set unsafeFlags[] for.
    4275             :  */
    4276             : static void
    4277         312 : compare_tlist_datatypes(List *tlist, List *colTypes,
    4278             :                         pushdown_safety_info *safetyInfo)
    4279             : {
    4280             :     ListCell   *l;
    4281         312 :     ListCell   *colType = list_head(colTypes);
    4282             : 
    4283         984 :     foreach(l, tlist)
    4284             :     {
    4285         672 :         TargetEntry *tle = (TargetEntry *) lfirst(l);
    4286             : 
    4287         672 :         if (tle->resjunk)
    4288           0 :             continue;           /* ignore resjunk columns */
    4289         672 :         if (colType == NULL)
    4290           0 :             elog(ERROR, "wrong number of tlist entries");
    4291         672 :         if (exprType((Node *) tle->expr) != lfirst_oid(colType))
    4292         104 :             safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_TYPE_MISMATCH;
    4293         672 :         colType = lnext(colTypes, colType);
    4294             :     }
    4295         312 :     if (colType != NULL)
    4296           0 :         elog(ERROR, "wrong number of tlist entries");
    4297         312 : }
    4298             : 
    4299             : /*
    4300             :  * targetIsInAllPartitionLists
    4301             :  *      True if the TargetEntry is listed in the PARTITION BY clause
    4302             :  *      of every window defined in the query.
    4303             :  *
    4304             :  * It would be safe to ignore windows not actually used by any window
    4305             :  * function, but it's not easy to get that info at this stage; and it's
    4306             :  * unlikely to be useful to spend any extra cycles getting it, since
    4307             :  * unreferenced window definitions are probably infrequent in practice.
    4308             :  */
    4309             : static bool
    4310        1090 : targetIsInAllPartitionLists(TargetEntry *tle, Query *query)
    4311             : {
    4312             :     ListCell   *lc;
    4313             : 
    4314        1202 :     foreach(lc, query->windowClause)
    4315             :     {
    4316        1114 :         WindowClause *wc = (WindowClause *) lfirst(lc);
    4317             : 
    4318        1114 :         if (!targetIsInSortList(tle, InvalidOid, wc->partitionClause))
    4319        1002 :             return false;
    4320             :     }
    4321          88 :     return true;
    4322             : }
    4323             : 
    4324             : /*
    4325             :  * qual_is_pushdown_safe - is a particular rinfo safe to push down?
    4326             :  *
    4327             :  * rinfo is a restriction clause applying to the given subquery (whose RTE
    4328             :  * has index rti in the parent query).
    4329             :  *
    4330             :  * Conditions checked here:
    4331             :  *
    4332             :  * 1. rinfo's clause must not contain any SubPlans (mainly because it's
    4333             :  * unclear that it will work correctly: SubLinks will already have been
    4334             :  * transformed into SubPlans in the qual, but not in the subquery).  Note that
    4335             :  * SubLinks that transform to initplans are safe, and will be accepted here
    4336             :  * because what we'll see in the qual is just a Param referencing the initplan
    4337             :  * output.
    4338             :  *
    4339             :  * 2. If unsafeVolatile is set, rinfo's clause must not contain any volatile
    4340             :  * functions.
    4341             :  *
    4342             :  * 3. If unsafeLeaky is set, rinfo's clause must not contain any leaky
    4343             :  * functions that are passed Var nodes, and therefore might reveal values from
    4344             :  * the subquery as side effects.
    4345             :  *
    4346             :  * 4. rinfo's clause must not refer to the whole-row output of the subquery
    4347             :  * (since there is no easy way to name that within the subquery itself).
    4348             :  *
    4349             :  * 5. rinfo's clause must not refer to any subquery output columns that were
    4350             :  * found to be unsafe to reference by subquery_is_pushdown_safe().
    4351             :  */
    4352             : static pushdown_safe_type
    4353        2762 : qual_is_pushdown_safe(Query *subquery, Index rti, RestrictInfo *rinfo,
    4354             :                       pushdown_safety_info *safetyInfo)
    4355             : {
    4356        2762 :     pushdown_safe_type safe = PUSHDOWN_SAFE;
    4357        2762 :     Node       *qual = (Node *) rinfo->clause;
    4358             :     List       *vars;
    4359             :     ListCell   *vl;
    4360             : 
    4361             :     /* Refuse subselects (point 1) */
    4362        2762 :     if (contain_subplans(qual))
    4363          66 :         return PUSHDOWN_UNSAFE;
    4364             : 
    4365             :     /* Refuse volatile quals if we found they'd be unsafe (point 2) */
    4366        3348 :     if (safetyInfo->unsafeVolatile &&
    4367         652 :         contain_volatile_functions((Node *) rinfo))
    4368          18 :         return PUSHDOWN_UNSAFE;
    4369             : 
    4370             :     /* Refuse leaky quals if told to (point 3) */
    4371        3850 :     if (safetyInfo->unsafeLeaky &&
    4372        1172 :         contain_leaked_vars(qual))
    4373         162 :         return PUSHDOWN_UNSAFE;
    4374             : 
    4375             :     /*
    4376             :      * Examine all Vars used in clause.  Since it's a restriction clause, all
    4377             :      * such Vars must refer to subselect output columns ... unless this is
    4378             :      * part of a LATERAL subquery, in which case there could be lateral
    4379             :      * references.
    4380             :      *
    4381             :      * By omitting the relevant flags, this also gives us a cheap sanity check
    4382             :      * that no aggregates or window functions appear in the qual.  Those would
    4383             :      * be unsafe to push down, but at least for the moment we could never see
    4384             :      * any in a qual anyhow.
    4385             :      */
    4386        2516 :     vars = pull_var_clause(qual, PVC_INCLUDE_PLACEHOLDERS);
    4387        4932 :     foreach(vl, vars)
    4388             :     {
    4389        2624 :         Var        *var = (Var *) lfirst(vl);
    4390             : 
    4391             :         /*
    4392             :          * XXX Punt if we find any PlaceHolderVars in the restriction clause.
    4393             :          * It's not clear whether a PHV could safely be pushed down, and even
    4394             :          * less clear whether such a situation could arise in any cases of
    4395             :          * practical interest anyway.  So for the moment, just refuse to push
    4396             :          * down.
    4397             :          */
    4398        2624 :         if (!IsA(var, Var))
    4399             :         {
    4400           0 :             safe = PUSHDOWN_UNSAFE;
    4401           0 :             break;
    4402             :         }
    4403             : 
    4404             :         /*
    4405             :          * Punt if we find any lateral references.  It would be safe to push
    4406             :          * these down, but we'd have to convert them into outer references,
    4407             :          * which subquery_push_qual lacks the infrastructure to do.  The case
    4408             :          * arises so seldom that it doesn't seem worth working hard on.
    4409             :          */
    4410        2624 :         if (var->varno != rti)
    4411             :         {
    4412          12 :             safe = PUSHDOWN_UNSAFE;
    4413          12 :             break;
    4414             :         }
    4415             : 
    4416             :         /* Subqueries have no system columns */
    4417             :         Assert(var->varattno >= 0);
    4418             : 
    4419             :         /* Check point 4 */
    4420        2612 :         if (var->varattno == 0)
    4421             :         {
    4422           0 :             safe = PUSHDOWN_UNSAFE;
    4423           0 :             break;
    4424             :         }
    4425             : 
    4426             :         /* Check point 5 */
    4427        2612 :         if (safetyInfo->unsafeFlags[var->varattno] != 0)
    4428             :         {
    4429         526 :             if (safetyInfo->unsafeFlags[var->varattno] &
    4430             :                 (UNSAFE_HAS_VOLATILE_FUNC | UNSAFE_HAS_SET_FUNC |
    4431             :                  UNSAFE_NOTIN_DISTINCTON_CLAUSE | UNSAFE_TYPE_MISMATCH))
    4432             :             {
    4433         196 :                 safe = PUSHDOWN_UNSAFE;
    4434         196 :                 break;
    4435             :             }
    4436             :             else
    4437             :             {
    4438             :                 /* UNSAFE_NOTIN_PARTITIONBY_CLAUSE is ok for run conditions */
    4439         330 :                 safe = PUSHDOWN_WINDOWCLAUSE_RUNCOND;
    4440             :                 /* don't break, we might find another Var that's unsafe */
    4441             :             }
    4442             :         }
    4443             :     }
    4444             : 
    4445        2516 :     list_free(vars);
    4446             : 
    4447        2516 :     return safe;
    4448             : }
    4449             : 
    4450             : /*
    4451             :  * subquery_push_qual - push down a qual that we have determined is safe
    4452             :  */
    4453             : static void
    4454        2320 : subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
    4455             : {
    4456        2320 :     if (subquery->setOperations != NULL)
    4457             :     {
    4458             :         /* Recurse to push it separately to each component query */
    4459         132 :         recurse_push_qual(subquery->setOperations, subquery,
    4460             :                           rte, rti, qual);
    4461             :     }
    4462             :     else
    4463             :     {
    4464             :         /*
    4465             :          * We need to replace Vars in the qual (which must refer to outputs of
    4466             :          * the subquery) with copies of the subquery's targetlist expressions.
    4467             :          * Note that at this point, any uplevel Vars in the qual should have
    4468             :          * been replaced with Params, so they need no work.
    4469             :          *
    4470             :          * This step also ensures that when we are pushing into a setop tree,
    4471             :          * each component query gets its own copy of the qual.
    4472             :          */
    4473        2188 :         qual = ReplaceVarsFromTargetList(qual, rti, 0, rte,
    4474             :                                          subquery->targetList,
    4475             :                                          subquery->resultRelation,
    4476             :                                          REPLACEVARS_REPORT_ERROR, 0,
    4477             :                                          &subquery->hasSubLinks);
    4478             : 
    4479             :         /*
    4480             :          * Now attach the qual to the proper place: normally WHERE, but if the
    4481             :          * subquery uses grouping or aggregation, put it in HAVING (since the
    4482             :          * qual really refers to the group-result rows).
    4483             :          */
    4484        2188 :         if (subquery->hasAggs || subquery->groupClause || subquery->groupingSets || subquery->havingQual)
    4485         380 :             subquery->havingQual = make_and_qual(subquery->havingQual, qual);
    4486             :         else
    4487        1808 :             subquery->jointree->quals =
    4488        1808 :                 make_and_qual(subquery->jointree->quals, qual);
    4489             : 
    4490             :         /*
    4491             :          * We need not change the subquery's hasAggs or hasSubLinks flags,
    4492             :          * since we can't be pushing down any aggregates that weren't there
    4493             :          * before, and we don't push down subselects at all.
    4494             :          */
    4495             :     }
    4496        2320 : }
    4497             : 
    4498             : /*
    4499             :  * Helper routine to recurse through setOperations tree
    4500             :  */
    4501             : static void
    4502         396 : recurse_push_qual(Node *setOp, Query *topquery,
    4503             :                   RangeTblEntry *rte, Index rti, Node *qual)
    4504             : {
    4505         396 :     if (IsA(setOp, RangeTblRef))
    4506             :     {
    4507         264 :         RangeTblRef *rtr = (RangeTblRef *) setOp;
    4508         264 :         RangeTblEntry *subrte = rt_fetch(rtr->rtindex, topquery->rtable);
    4509         264 :         Query      *subquery = subrte->subquery;
    4510             : 
    4511             :         Assert(subquery != NULL);
    4512         264 :         subquery_push_qual(subquery, rte, rti, qual);
    4513             :     }
    4514         132 :     else if (IsA(setOp, SetOperationStmt))
    4515             :     {
    4516         132 :         SetOperationStmt *op = (SetOperationStmt *) setOp;
    4517             : 
    4518         132 :         recurse_push_qual(op->larg, topquery, rte, rti, qual);
    4519         132 :         recurse_push_qual(op->rarg, topquery, rte, rti, qual);
    4520             :     }
    4521             :     else
    4522             :     {
    4523           0 :         elog(ERROR, "unrecognized node type: %d",
    4524             :              (int) nodeTag(setOp));
    4525             :     }
    4526         396 : }
    4527             : 
    4528             : /*****************************************************************************
    4529             :  *          SIMPLIFYING SUBQUERY TARGETLISTS
    4530             :  *****************************************************************************/
    4531             : 
    4532             : /*
    4533             :  * remove_unused_subquery_outputs
    4534             :  *      Remove subquery targetlist items we don't need
    4535             :  *
    4536             :  * It's possible, even likely, that the upper query does not read all the
    4537             :  * output columns of the subquery.  We can remove any such outputs that are
    4538             :  * not needed by the subquery itself (e.g., as sort/group columns) and do not
    4539             :  * affect semantics otherwise (e.g., volatile functions can't be removed).
    4540             :  * This is useful not only because we might be able to remove expensive-to-
    4541             :  * compute expressions, but because deletion of output columns might allow
    4542             :  * optimizations such as join removal to occur within the subquery.
    4543             :  *
    4544             :  * extra_used_attrs can be passed as non-NULL to mark any columns (offset by
    4545             :  * FirstLowInvalidHeapAttributeNumber) that we should not remove.  This
    4546             :  * parameter is modified by the function, so callers must make a copy if they
    4547             :  * need to use the passed in Bitmapset after calling this function.
    4548             :  *
    4549             :  * To avoid affecting column numbering in the targetlist, we don't physically
    4550             :  * remove unused tlist entries, but rather replace their expressions with NULL
    4551             :  * constants.  This is implemented by modifying subquery->targetList.
    4552             :  */
    4553             : static void
    4554       17532 : remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel,
    4555             :                                Bitmapset *extra_used_attrs)
    4556             : {
    4557             :     Bitmapset  *attrs_used;
    4558             :     ListCell   *lc;
    4559             : 
    4560             :     /*
    4561             :      * Just point directly to extra_used_attrs. No need to bms_copy as none of
    4562             :      * the current callers use the Bitmapset after calling this function.
    4563             :      */
    4564       17532 :     attrs_used = extra_used_attrs;
    4565             : 
    4566             :     /*
    4567             :      * Do nothing if subquery has UNION/INTERSECT/EXCEPT: in principle we
    4568             :      * could update all the child SELECTs' tlists, but it seems not worth the
    4569             :      * trouble presently.
    4570             :      */
    4571       17532 :     if (subquery->setOperations)
    4572        2008 :         return;
    4573             : 
    4574             :     /*
    4575             :      * If subquery has regular DISTINCT (not DISTINCT ON), we're wasting our
    4576             :      * time: all its output columns must be used in the distinctClause.
    4577             :      */
    4578       16698 :     if (subquery->distinctClause && !subquery->hasDistinctOn)
    4579         870 :         return;
    4580             : 
    4581             :     /*
    4582             :      * Collect a bitmap of all the output column numbers used by the upper
    4583             :      * query.
    4584             :      *
    4585             :      * Add all the attributes needed for joins or final output.  Note: we must
    4586             :      * look at rel's targetlist, not the attr_needed data, because attr_needed
    4587             :      * isn't computed for inheritance child rels, cf set_append_rel_size().
    4588             :      * (XXX might be worth changing that sometime.)
    4589             :      */
    4590       15828 :     pull_varattnos((Node *) rel->reltarget->exprs, rel->relid, &attrs_used);
    4591             : 
    4592             :     /* Add all the attributes used by un-pushed-down restriction clauses. */
    4593       16564 :     foreach(lc, rel->baserestrictinfo)
    4594             :     {
    4595         736 :         RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
    4596             : 
    4597         736 :         pull_varattnos((Node *) rinfo->clause, rel->relid, &attrs_used);
    4598             :     }
    4599             : 
    4600             :     /*
    4601             :      * If there's a whole-row reference to the subquery, we can't remove
    4602             :      * anything.
    4603             :      */
    4604       15828 :     if (bms_is_member(0 - FirstLowInvalidHeapAttributeNumber, attrs_used))
    4605         304 :         return;
    4606             : 
    4607             :     /*
    4608             :      * Run through the tlist and zap entries we don't need.  It's okay to
    4609             :      * modify the tlist items in-place because set_subquery_pathlist made a
    4610             :      * copy of the subquery.
    4611             :      */
    4612       91062 :     foreach(lc, subquery->targetList)
    4613             :     {
    4614       75538 :         TargetEntry *tle = (TargetEntry *) lfirst(lc);
    4615       75538 :         Node       *texpr = (Node *) tle->expr;
    4616             : 
    4617             :         /*
    4618             :          * If it has a sortgroupref number, it's used in some sort/group
    4619             :          * clause so we'd better not remove it.  Also, don't remove any
    4620             :          * resjunk columns, since their reason for being has nothing to do
    4621             :          * with anybody reading the subquery's output.  (It's likely that
    4622             :          * resjunk columns in a sub-SELECT would always have ressortgroupref
    4623             :          * set, but even if they don't, it seems imprudent to remove them.)
    4624             :          */
    4625       75538 :         if (tle->ressortgroupref || tle->resjunk)
    4626        2928 :             continue;
    4627             : 
    4628             :         /*
    4629             :          * If it's used by the upper query, we can't remove it.
    4630             :          */
    4631       72610 :         if (bms_is_member(tle->resno - FirstLowInvalidHeapAttributeNumber,
    4632             :                           attrs_used))
    4633       47408 :             continue;
    4634             : 
    4635             :         /*
    4636             :          * If it contains a set-returning function, we can't remove it since
    4637             :          * that could change the number of rows returned by the subquery.
    4638             :          */
    4639       26258 :         if (subquery->hasTargetSRFs &&
    4640        1056 :             expression_returns_set(texpr))
    4641         788 :             continue;
    4642             : 
    4643             :         /*
    4644             :          * If it contains volatile functions, we daren't remove it for fear
    4645             :          * that the user is expecting their side-effects to happen.
    4646             :          */
    4647       24414 :         if (contain_volatile_functions(texpr))
    4648          32 :             continue;
    4649             : 
    4650             :         /*
    4651             :          * OK, we don't need it.  Replace the expression with a NULL constant.
    4652             :          * Preserve the exposed type of the expression, in case something
    4653             :          * looks at the rowtype of the subquery's result.
    4654             :          */
    4655       24382 :         tle->expr = (Expr *) makeNullConst(exprType(texpr),
    4656             :                                            exprTypmod(texpr),
    4657             :                                            exprCollation(texpr));
    4658             :     }
    4659             : }
    4660             : 
    4661             : /*
    4662             :  * create_partial_bitmap_paths
    4663             :  *    Build partial bitmap heap path for the relation
    4664             :  */
    4665             : void
    4666      144224 : create_partial_bitmap_paths(PlannerInfo *root, RelOptInfo *rel,
    4667             :                             Path *bitmapqual)
    4668             : {
    4669             :     int         parallel_workers;
    4670             :     double      pages_fetched;
    4671             : 
    4672             :     /* Compute heap pages for bitmap heap scan */
    4673      144224 :     pages_fetched = compute_bitmap_pages(root, rel, bitmapqual, 1.0,
    4674             :                                          NULL, NULL);
    4675             : 
    4676      144224 :     parallel_workers = compute_parallel_worker(rel, pages_fetched, -1,
    4677             :                                                max_parallel_workers_per_gather);
    4678             : 
    4679      144224 :     if (parallel_workers <= 0)
    4680      140052 :         return;
    4681             : 
    4682        4172 :     add_partial_path(rel, (Path *) create_bitmap_heap_path(root, rel,
    4683             :                                                            bitmapqual, rel->lateral_relids, 1.0, parallel_workers));
    4684             : }
    4685             : 
    4686             : /*
    4687             :  * Compute the number of parallel workers that should be used to scan a
    4688             :  * relation.  We compute the parallel workers based on the size of the heap to
    4689             :  * be scanned and the size of the index to be scanned, then choose a minimum
    4690             :  * of those.
    4691             :  *
    4692             :  * "heap_pages" is the number of pages from the table that we expect to scan, or
    4693             :  * -1 if we don't expect to scan any.
    4694             :  *
    4695             :  * "index_pages" is the number of pages from the index that we expect to scan, or
    4696             :  * -1 if we don't expect to scan any.
    4697             :  *
    4698             :  * "max_workers" is caller's limit on the number of workers.  This typically
    4699             :  * comes from a GUC.
    4700             :  */
    4701             : int
    4702      758386 : compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages,
    4703             :                         int max_workers)
    4704             : {
    4705      758386 :     int         parallel_workers = 0;
    4706             : 
    4707             :     /*
    4708             :      * If the user has set the parallel_workers reloption, use that; otherwise
    4709             :      * select a default number of workers.
    4710             :      */
    4711      758386 :     if (rel->rel_parallel_workers != -1)
    4712        1914 :         parallel_workers = rel->rel_parallel_workers;
    4713             :     else
    4714             :     {
    4715             :         /*
    4716             :          * If the number of pages being scanned is insufficient to justify a
    4717             :          * parallel scan, just return zero ... unless it's an inheritance
    4718             :          * child. In that case, we want to generate a parallel path here
    4719             :          * anyway.  It might not be worthwhile just for this relation, but
    4720             :          * when combined with all of its inheritance siblings it may well pay
    4721             :          * off.
    4722             :          */
    4723      756472 :         if (rel->reloptkind == RELOPT_BASEREL &&
    4724      717004 :             ((heap_pages >= 0 && heap_pages < min_parallel_table_scan_size) ||
    4725       24002 :              (index_pages >= 0 && index_pages < min_parallel_index_scan_size)))
    4726      716088 :             return 0;
    4727             : 
    4728       40384 :         if (heap_pages >= 0)
    4729             :         {
    4730             :             int         heap_parallel_threshold;
    4731       38258 :             int         heap_parallel_workers = 1;
    4732             : 
    4733             :             /*
    4734             :              * Select the number of workers based on the log of the size of
    4735             :              * the relation.  This probably needs to be a good deal more
    4736             :              * sophisticated, but we need something here for now.  Note that
    4737             :              * the upper limit of the min_parallel_table_scan_size GUC is
    4738             :              * chosen to prevent overflow here.
    4739             :              */
    4740       38258 :             heap_parallel_threshold = Max(min_parallel_table_scan_size, 1);
    4741       43498 :             while (heap_pages >= (BlockNumber) (heap_parallel_threshold * 3))
    4742             :             {
    4743        5240 :                 heap_parallel_workers++;
    4744        5240 :                 heap_parallel_threshold *= 3;
    4745        5240 :                 if (heap_parallel_threshold > INT_MAX / 3)
    4746           0 :                     break;      /* avoid overflow */
    4747             :             }
    4748             : 
    4749       38258 :             parallel_workers = heap_parallel_workers;
    4750             :         }
    4751             : 
    4752       40384 :         if (index_pages >= 0)
    4753             :         {
    4754        9808 :             int         index_parallel_workers = 1;
    4755             :             int         index_parallel_threshold;
    4756             : 
    4757             :             /* same calculation as for heap_pages above */
    4758        9808 :             index_parallel_threshold = Max(min_parallel_index_scan_size, 1);
    4759       10084 :             while (index_pages >= (BlockNumber) (index_parallel_threshold * 3))
    4760             :             {
    4761         276 :                 index_parallel_workers++;
    4762         276 :                 index_parallel_threshold *= 3;
    4763         276 :                 if (index_parallel_threshold > INT_MAX / 3)
    4764           0 :                     break;      /* avoid overflow */
    4765             :             }
    4766             : 
    4767        9808 :             if (parallel_workers > 0)
    4768        7682 :                 parallel_workers = Min(parallel_workers, index_parallel_workers);
    4769             :             else
    4770        2126 :                 parallel_workers = index_parallel_workers;
    4771             :         }
    4772             :     }
    4773             : 
    4774             :     /* In no case use more than caller supplied maximum number of workers */
    4775       42298 :     parallel_workers = Min(parallel_workers, max_workers);
    4776             : 
    4777       42298 :     return parallel_workers;
    4778             : }
    4779             : 
    4780             : /*
    4781             :  * generate_partitionwise_join_paths
    4782             :  *      Create paths representing partitionwise join for given partitioned
    4783             :  *      join relation.
    4784             :  *
    4785             :  * This must not be called until after we are done adding paths for all
    4786             :  * child-joins. Otherwise, add_path might delete a path to which some path
    4787             :  * generated here has a reference.
    4788             :  */
    4789             : void
    4790      237002 : generate_partitionwise_join_paths(PlannerInfo *root, RelOptInfo *rel)
    4791             : {
    4792      237002 :     List       *live_children = NIL;
    4793             :     int         cnt_parts;
    4794             :     int         num_parts;
    4795             :     RelOptInfo **part_rels;
    4796             : 
    4797             :     /* Handle only join relations here. */
    4798      237002 :     if (!IS_JOIN_REL(rel))
    4799           0 :         return;
    4800             : 
    4801             :     /* We've nothing to do if the relation is not partitioned. */
    4802      237002 :     if (!IS_PARTITIONED_REL(rel))
    4803      229856 :         return;
    4804             : 
    4805             :     /* The relation should have consider_partitionwise_join set. */
    4806             :     Assert(rel->consider_partitionwise_join);
    4807             : 
    4808             :     /* Guard against stack overflow due to overly deep partition hierarchy. */
    4809        7146 :     check_stack_depth();
    4810             : 
    4811        7146 :     num_parts = rel->nparts;
    4812        7146 :     part_rels = rel->part_rels;
    4813             : 
    4814             :     /* Collect non-dummy child-joins. */
    4815       25448 :     for (cnt_parts = 0; cnt_parts < num_parts; cnt_parts++)
    4816             :     {
    4817       18302 :         RelOptInfo *child_rel = part_rels[cnt_parts];
    4818             : 
    4819             :         /* If it's been pruned entirely, it's certainly dummy. */
    4820       18302 :         if (child_rel == NULL)
    4821          64 :             continue;
    4822             : 
    4823             :         /* Make partitionwise join paths for this partitioned child-join. */
    4824       18238 :         generate_partitionwise_join_paths(root, child_rel);
    4825             : 
    4826             :         /* If we failed to make any path for this child, we must give up. */
    4827       18238 :         if (child_rel->pathlist == NIL)
    4828             :         {
    4829             :             /*
    4830             :              * Mark the parent joinrel as unpartitioned so that later
    4831             :              * functions treat it correctly.
    4832             :              */
    4833           0 :             rel->nparts = 0;
    4834           0 :             return;
    4835             :         }
    4836             : 
    4837             :         /* Else, identify the cheapest path for it. */
    4838       18238 :         set_cheapest(child_rel);
    4839             : 
    4840             :         /* Dummy children need not be scanned, so ignore those. */
    4841       18238 :         if (IS_DUMMY_REL(child_rel))
    4842           0 :             continue;
    4843             : 
    4844             :         /*
    4845             :          * Except for the topmost scan/join rel, consider generating partial
    4846             :          * aggregation paths for the grouped relation on top of the paths of
    4847             :          * this partitioned child-join.  After that, we're done creating paths
    4848             :          * for the grouped relation, so run set_cheapest().
    4849             :          */
    4850       18238 :         if (child_rel->grouped_rel != NULL &&
    4851       12876 :             !bms_equal(IS_OTHER_REL(rel) ?
    4852             :                        rel->top_parent_relids : rel->relids,
    4853       12876 :                        root->all_query_rels))
    4854             :         {
    4855         240 :             RelOptInfo *grouped_rel = child_rel->grouped_rel;
    4856             : 
    4857             :             Assert(IS_GROUPED_REL(grouped_rel));
    4858             : 
    4859         240 :             generate_grouped_paths(root, grouped_rel, child_rel);
    4860         240 :             set_cheapest(grouped_rel);
    4861             :         }
    4862             : 
    4863             : #ifdef OPTIMIZER_DEBUG
    4864             :         pprint(child_rel);
    4865             : #endif
    4866             : 
    4867       18238 :         live_children = lappend(live_children, child_rel);
    4868             :     }
    4869             : 
    4870             :     /* If all child-joins are dummy, parent join is also dummy. */
    4871        7146 :     if (!live_children)
    4872             :     {
    4873           0 :         mark_dummy_rel(rel);
    4874           0 :         return;
    4875             :     }
    4876             : 
    4877             :     /* Build additional paths for this rel from child-join paths. */
    4878        7146 :     add_paths_to_append_rel(root, rel, live_children);
    4879        7146 :     list_free(live_children);
    4880             : }

Generated by: LCOV version 1.16