LCOV - code coverage report
Current view: top level - src/backend/optimizer/path - allpaths.c (source / functions) Hit Total Coverage
Test: PostgreSQL 17devel Lines: 1048 1110 94.4 %
Date: 2023-12-07 06:11:03 Functions: 49 49 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * allpaths.c
       4             :  *    Routines to find possible search paths for processing a query
       5             :  *
       6             :  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/optimizer/path/allpaths.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : 
      16             : #include "postgres.h"
      17             : 
      18             : #include <limits.h>
      19             : #include <math.h>
      20             : 
      21             : #include "access/sysattr.h"
      22             : #include "access/tsmapi.h"
      23             : #include "catalog/pg_class.h"
      24             : #include "catalog/pg_operator.h"
      25             : #include "catalog/pg_proc.h"
      26             : #include "foreign/fdwapi.h"
      27             : #include "miscadmin.h"
      28             : #include "nodes/makefuncs.h"
      29             : #include "nodes/nodeFuncs.h"
      30             : #include "nodes/supportnodes.h"
      31             : #ifdef OPTIMIZER_DEBUG
      32             : #include "nodes/print.h"
      33             : #endif
      34             : #include "optimizer/appendinfo.h"
      35             : #include "optimizer/clauses.h"
      36             : #include "optimizer/cost.h"
      37             : #include "optimizer/geqo.h"
      38             : #include "optimizer/inherit.h"
      39             : #include "optimizer/optimizer.h"
      40             : #include "optimizer/pathnode.h"
      41             : #include "optimizer/paths.h"
      42             : #include "optimizer/plancat.h"
      43             : #include "optimizer/planner.h"
      44             : #include "optimizer/restrictinfo.h"
      45             : #include "optimizer/tlist.h"
      46             : #include "parser/parse_clause.h"
      47             : #include "parser/parsetree.h"
      48             : #include "partitioning/partbounds.h"
      49             : #include "partitioning/partprune.h"
      50             : #include "port/pg_bitutils.h"
      51             : #include "rewrite/rewriteManip.h"
      52             : #include "utils/lsyscache.h"
      53             : 
      54             : 
      55             : /* Bitmask flags for pushdown_safety_info.unsafeFlags */
      56             : #define UNSAFE_HAS_VOLATILE_FUNC        (1 << 0)
      57             : #define UNSAFE_HAS_SET_FUNC             (1 << 1)
      58             : #define UNSAFE_NOTIN_DISTINCTON_CLAUSE  (1 << 2)
      59             : #define UNSAFE_NOTIN_PARTITIONBY_CLAUSE (1 << 3)
      60             : #define UNSAFE_TYPE_MISMATCH            (1 << 4)
      61             : 
      62             : /* results of subquery_is_pushdown_safe */
      63             : typedef struct pushdown_safety_info
      64             : {
      65             :     unsigned char *unsafeFlags; /* bitmask of reasons why this target list
      66             :                                  * column is unsafe for qual pushdown, or 0 if
      67             :                                  * no reason. */
      68             :     bool        unsafeVolatile; /* don't push down volatile quals */
      69             :     bool        unsafeLeaky;    /* don't push down leaky quals */
      70             : } pushdown_safety_info;
      71             : 
      72             : /* Return type for qual_is_pushdown_safe */
      73             : typedef enum pushdown_safe_type
      74             : {
      75             :     PUSHDOWN_UNSAFE,            /* unsafe to push qual into subquery */
      76             :     PUSHDOWN_SAFE,              /* safe to push qual into subquery */
      77             :     PUSHDOWN_WINDOWCLAUSE_RUNCOND,  /* unsafe, but may work as WindowClause
      78             :                                      * run condition */
      79             : } pushdown_safe_type;
      80             : 
      81             : /* These parameters are set by GUC */
      82             : bool        enable_geqo = false;    /* just in case GUC doesn't set it */
      83             : int         geqo_threshold;
      84             : int         min_parallel_table_scan_size;
      85             : int         min_parallel_index_scan_size;
      86             : 
      87             : /* Hook for plugins to get control in set_rel_pathlist() */
      88             : set_rel_pathlist_hook_type set_rel_pathlist_hook = NULL;
      89             : 
      90             : /* Hook for plugins to replace standard_join_search() */
      91             : join_search_hook_type join_search_hook = NULL;
      92             : 
      93             : 
      94             : static void set_base_rel_consider_startup(PlannerInfo *root);
      95             : static void set_base_rel_sizes(PlannerInfo *root);
      96             : static void set_base_rel_pathlists(PlannerInfo *root);
      97             : static void set_rel_size(PlannerInfo *root, RelOptInfo *rel,
      98             :                          Index rti, RangeTblEntry *rte);
      99             : static void set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
     100             :                              Index rti, RangeTblEntry *rte);
     101             : static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel,
     102             :                                RangeTblEntry *rte);
     103             : static void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel);
     104             : static void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
     105             :                                       RangeTblEntry *rte);
     106             : static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
     107             :                                    RangeTblEntry *rte);
     108             : static void set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel,
     109             :                                      RangeTblEntry *rte);
     110             : static void set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
     111             :                                          RangeTblEntry *rte);
     112             : static void set_foreign_size(PlannerInfo *root, RelOptInfo *rel,
     113             :                              RangeTblEntry *rte);
     114             : static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel,
     115             :                                  RangeTblEntry *rte);
     116             : static void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
     117             :                                 Index rti, RangeTblEntry *rte);
     118             : static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
     119             :                                     Index rti, RangeTblEntry *rte);
     120             : static void generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel,
     121             :                                          List *live_childrels,
     122             :                                          List *all_child_pathkeys);
     123             : static Path *get_cheapest_parameterized_child_path(PlannerInfo *root,
     124             :                                                    RelOptInfo *rel,
     125             :                                                    Relids required_outer);
     126             : static void accumulate_append_subpath(Path *path,
     127             :                                       List **subpaths,
     128             :                                       List **special_subpaths);
     129             : static Path *get_singleton_append_subpath(Path *path);
     130             : static void set_dummy_rel_pathlist(RelOptInfo *rel);
     131             : static void set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
     132             :                                   Index rti, RangeTblEntry *rte);
     133             : static void set_function_pathlist(PlannerInfo *root, RelOptInfo *rel,
     134             :                                   RangeTblEntry *rte);
     135             : static void set_values_pathlist(PlannerInfo *root, RelOptInfo *rel,
     136             :                                 RangeTblEntry *rte);
     137             : static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel,
     138             :                                    RangeTblEntry *rte);
     139             : static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
     140             :                              RangeTblEntry *rte);
     141             : static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
     142             :                                          RangeTblEntry *rte);
     143             : static void set_result_pathlist(PlannerInfo *root, RelOptInfo *rel,
     144             :                                 RangeTblEntry *rte);
     145             : static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel,
     146             :                                    RangeTblEntry *rte);
     147             : static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
     148             : static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
     149             :                                       pushdown_safety_info *safetyInfo);
     150             : static bool recurse_pushdown_safe(Node *setOp, Query *topquery,
     151             :                                   pushdown_safety_info *safetyInfo);
     152             : static void check_output_expressions(Query *subquery,
     153             :                                      pushdown_safety_info *safetyInfo);
     154             : static void compare_tlist_datatypes(List *tlist, List *colTypes,
     155             :                                     pushdown_safety_info *safetyInfo);
     156             : static bool targetIsInAllPartitionLists(TargetEntry *tle, Query *query);
     157             : static pushdown_safe_type qual_is_pushdown_safe(Query *subquery, Index rti,
     158             :                                                 RestrictInfo *rinfo,
     159             :                                                 pushdown_safety_info *safetyInfo);
     160             : static void subquery_push_qual(Query *subquery,
     161             :                                RangeTblEntry *rte, Index rti, Node *qual);
     162             : static void recurse_push_qual(Node *setOp, Query *topquery,
     163             :                               RangeTblEntry *rte, Index rti, Node *qual);
     164             : static void remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel,
     165             :                                            Bitmapset *extra_used_attrs);
     166             : 
     167             : 
     168             : /*
     169             :  * make_one_rel
     170             :  *    Finds all possible access paths for executing a query, returning a
     171             :  *    single rel that represents the join of all base rels in the query.
     172             :  */
     173             : RelOptInfo *
     174      260962 : make_one_rel(PlannerInfo *root, List *joinlist)
     175             : {
     176             :     RelOptInfo *rel;
     177             :     Index       rti;
     178             :     double      total_pages;
     179             : 
     180             :     /* Mark base rels as to whether we care about fast-start plans */
     181      260962 :     set_base_rel_consider_startup(root);
     182             : 
     183             :     /*
     184             :      * Compute size estimates and consider_parallel flags for each base rel.
     185             :      */
     186      260962 :     set_base_rel_sizes(root);
     187             : 
     188             :     /*
     189             :      * We should now have size estimates for every actual table involved in
     190             :      * the query, and we also know which if any have been deleted from the
     191             :      * query by join removal, pruned by partition pruning, or eliminated by
     192             :      * constraint exclusion.  So we can now compute total_table_pages.
     193             :      *
     194             :      * Note that appendrels are not double-counted here, even though we don't
     195             :      * bother to distinguish RelOptInfos for appendrel parents, because the
     196             :      * parents will have pages = 0.
     197             :      *
     198             :      * XXX if a table is self-joined, we will count it once per appearance,
     199             :      * which perhaps is the wrong thing ... but that's not completely clear,
     200             :      * and detecting self-joins here is difficult, so ignore it for now.
     201             :      */
     202      260950 :     total_pages = 0;
     203      787082 :     for (rti = 1; rti < root->simple_rel_array_size; rti++)
     204             :     {
     205      526132 :         RelOptInfo *brel = root->simple_rel_array[rti];
     206             : 
     207             :         /* there may be empty slots corresponding to non-baserel RTEs */
     208      526132 :         if (brel == NULL)
     209      118262 :             continue;
     210             : 
     211             :         Assert(brel->relid == rti); /* sanity check on array */
     212             : 
     213      407870 :         if (IS_DUMMY_REL(brel))
     214        1042 :             continue;
     215             : 
     216      406828 :         if (IS_SIMPLE_REL(brel))
     217      406828 :             total_pages += (double) brel->pages;
     218             :     }
     219      260950 :     root->total_table_pages = total_pages;
     220             : 
     221             :     /*
     222             :      * Generate access paths for each base rel.
     223             :      */
     224      260950 :     set_base_rel_pathlists(root);
     225             : 
     226             :     /*
     227             :      * Generate access paths for the entire join tree.
     228             :      */
     229      260950 :     rel = make_rel_from_joinlist(root, joinlist);
     230             : 
     231             :     /*
     232             :      * The result should join all and only the query's base + outer-join rels.
     233             :      */
     234             :     Assert(bms_equal(rel->relids, root->all_query_rels));
     235             : 
     236      260950 :     return rel;
     237             : }
     238             : 
     239             : /*
     240             :  * set_base_rel_consider_startup
     241             :  *    Set the consider_[param_]startup flags for each base-relation entry.
     242             :  *
     243             :  * For the moment, we only deal with consider_param_startup here; because the
     244             :  * logic for consider_startup is pretty trivial and is the same for every base
     245             :  * relation, we just let build_simple_rel() initialize that flag correctly to
     246             :  * start with.  If that logic ever gets more complicated it would probably
     247             :  * be better to move it here.
     248             :  */
     249             : static void
     250      260962 : set_base_rel_consider_startup(PlannerInfo *root)
     251             : {
     252             :     /*
     253             :      * Since parameterized paths can only be used on the inside of a nestloop
     254             :      * join plan, there is usually little value in considering fast-start
     255             :      * plans for them.  However, for relations that are on the RHS of a SEMI
     256             :      * or ANTI join, a fast-start plan can be useful because we're only going
     257             :      * to care about fetching one tuple anyway.
     258             :      *
     259             :      * To minimize growth of planning time, we currently restrict this to
     260             :      * cases where the RHS is a single base relation, not a join; there is no
     261             :      * provision for consider_param_startup to get set at all on joinrels.
     262             :      * Also we don't worry about appendrels.  costsize.c's costing rules for
     263             :      * nestloop semi/antijoins don't consider such cases either.
     264             :      */
     265             :     ListCell   *lc;
     266             : 
     267      296216 :     foreach(lc, root->join_info_list)
     268             :     {
     269       35254 :         SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(lc);
     270             :         int         varno;
     271             : 
     272       40088 :         if ((sjinfo->jointype == JOIN_SEMI || sjinfo->jointype == JOIN_ANTI) &&
     273        4834 :             bms_get_singleton_member(sjinfo->syn_righthand, &varno))
     274             :         {
     275        4658 :             RelOptInfo *rel = find_base_rel(root, varno);
     276             : 
     277        4658 :             rel->consider_param_startup = true;
     278             :         }
     279             :     }
     280      260962 : }
     281             : 
     282             : /*
     283             :  * set_base_rel_sizes
     284             :  *    Set the size estimates (rows and widths) for each base-relation entry.
     285             :  *    Also determine whether to consider parallel paths for base relations.
     286             :  *
     287             :  * We do this in a separate pass over the base rels so that rowcount
     288             :  * estimates are available for parameterized path generation, and also so
     289             :  * that each rel's consider_parallel flag is set correctly before we begin to
     290             :  * generate paths.
     291             :  */
     292             : static void
     293      260962 : set_base_rel_sizes(PlannerInfo *root)
     294             : {
     295             :     Index       rti;
     296             : 
     297      787096 :     for (rti = 1; rti < root->simple_rel_array_size; rti++)
     298             :     {
     299      526146 :         RelOptInfo *rel = root->simple_rel_array[rti];
     300             :         RangeTblEntry *rte;
     301             : 
     302             :         /* there may be empty slots corresponding to non-baserel RTEs */
     303      526146 :         if (rel == NULL)
     304      118264 :             continue;
     305             : 
     306             :         Assert(rel->relid == rti);   /* sanity check on array */
     307             : 
     308             :         /* ignore RTEs that are "other rels" */
     309      407882 :         if (rel->reloptkind != RELOPT_BASEREL)
     310       40232 :             continue;
     311             : 
     312      367650 :         rte = root->simple_rte_array[rti];
     313             : 
     314             :         /*
     315             :          * If parallelism is allowable for this query in general, see whether
     316             :          * it's allowable for this rel in particular.  We have to do this
     317             :          * before set_rel_size(), because (a) if this rel is an inheritance
     318             :          * parent, set_append_rel_size() will use and perhaps change the rel's
     319             :          * consider_parallel flag, and (b) for some RTE types, set_rel_size()
     320             :          * goes ahead and makes paths immediately.
     321             :          */
     322      367650 :         if (root->glob->parallelModeOK)
     323      289464 :             set_rel_consider_parallel(root, rel, rte);
     324             : 
     325      367650 :         set_rel_size(root, rel, rti, rte);
     326             :     }
     327      260950 : }
     328             : 
     329             : /*
     330             :  * set_base_rel_pathlists
     331             :  *    Finds all paths available for scanning each base-relation entry.
     332             :  *    Sequential scan and any available indices are considered.
     333             :  *    Each useful path is attached to its relation's 'pathlist' field.
     334             :  */
     335             : static void
     336      260950 : set_base_rel_pathlists(PlannerInfo *root)
     337             : {
     338             :     Index       rti;
     339             : 
     340      787082 :     for (rti = 1; rti < root->simple_rel_array_size; rti++)
     341             :     {
     342      526132 :         RelOptInfo *rel = root->simple_rel_array[rti];
     343             : 
     344             :         /* there may be empty slots corresponding to non-baserel RTEs */
     345      526132 :         if (rel == NULL)
     346      118262 :             continue;
     347             : 
     348             :         Assert(rel->relid == rti);   /* sanity check on array */
     349             : 
     350             :         /* ignore RTEs that are "other rels" */
     351      407870 :         if (rel->reloptkind != RELOPT_BASEREL)
     352       40232 :             continue;
     353             : 
     354      367638 :         set_rel_pathlist(root, rel, rti, root->simple_rte_array[rti]);
     355             :     }
     356      260950 : }
     357             : 
     358             : /*
     359             :  * set_rel_size
     360             :  *    Set size estimates for a base relation
     361             :  */
     362             : static void
     363      407674 : set_rel_size(PlannerInfo *root, RelOptInfo *rel,
     364             :              Index rti, RangeTblEntry *rte)
     365             : {
     366      775324 :     if (rel->reloptkind == RELOPT_BASEREL &&
     367      367650 :         relation_excluded_by_constraints(root, rel, rte))
     368             :     {
     369             :         /*
     370             :          * We proved we don't need to scan the rel via constraint exclusion,
     371             :          * so set up a single dummy path for it.  Here we only check this for
     372             :          * regular baserels; if it's an otherrel, CE was already checked in
     373             :          * set_append_rel_size().
     374             :          *
     375             :          * In this case, we go ahead and set up the relation's path right away
     376             :          * instead of leaving it for set_rel_pathlist to do.  This is because
     377             :          * we don't have a convention for marking a rel as dummy except by
     378             :          * assigning a dummy path to it.
     379             :          */
     380         450 :         set_dummy_rel_pathlist(rel);
     381             :     }
     382      407224 :     else if (rte->inh)
     383             :     {
     384             :         /* It's an "append relation", process accordingly */
     385       19104 :         set_append_rel_size(root, rel, rti, rte);
     386             :     }
     387             :     else
     388             :     {
     389      388120 :         switch (rel->rtekind)
     390             :         {
     391      330268 :             case RTE_RELATION:
     392      330268 :                 if (rte->relkind == RELKIND_FOREIGN_TABLE)
     393             :                 {
     394             :                     /* Foreign table */
     395        2324 :                     set_foreign_size(root, rel, rte);
     396             :                 }
     397      327944 :                 else if (rte->relkind == RELKIND_PARTITIONED_TABLE)
     398             :                 {
     399             :                     /*
     400             :                      * We could get here if asked to scan a partitioned table
     401             :                      * with ONLY.  In that case we shouldn't scan any of the
     402             :                      * partitions, so mark it as a dummy rel.
     403             :                      */
     404          40 :                     set_dummy_rel_pathlist(rel);
     405             :                 }
     406      327904 :                 else if (rte->tablesample != NULL)
     407             :                 {
     408             :                     /* Sampled relation */
     409         252 :                     set_tablesample_rel_size(root, rel, rte);
     410             :                 }
     411             :                 else
     412             :                 {
     413             :                     /* Plain relation */
     414      327652 :                     set_plain_rel_size(root, rel, rte);
     415             :                 }
     416      330256 :                 break;
     417        6596 :             case RTE_SUBQUERY:
     418             : 
     419             :                 /*
     420             :                  * Subqueries don't support making a choice between
     421             :                  * parameterized and unparameterized paths, so just go ahead
     422             :                  * and build their paths immediately.
     423             :                  */
     424        6596 :                 set_subquery_pathlist(root, rel, rti, rte);
     425        6596 :                 break;
     426       38142 :             case RTE_FUNCTION:
     427       38142 :                 set_function_size_estimates(root, rel);
     428       38142 :                 break;
     429         216 :             case RTE_TABLEFUNC:
     430         216 :                 set_tablefunc_size_estimates(root, rel);
     431         216 :                 break;
     432        7322 :             case RTE_VALUES:
     433        7322 :                 set_values_size_estimates(root, rel);
     434        7322 :                 break;
     435        3776 :             case RTE_CTE:
     436             : 
     437             :                 /*
     438             :                  * CTEs don't support making a choice between parameterized
     439             :                  * and unparameterized paths, so just go ahead and build their
     440             :                  * paths immediately.
     441             :                  */
     442        3776 :                 if (rte->self_reference)
     443         784 :                     set_worktable_pathlist(root, rel, rte);
     444             :                 else
     445        2992 :                     set_cte_pathlist(root, rel, rte);
     446        3776 :                 break;
     447         446 :             case RTE_NAMEDTUPLESTORE:
     448             :                 /* Might as well just build the path immediately */
     449         446 :                 set_namedtuplestore_pathlist(root, rel, rte);
     450         446 :                 break;
     451        1354 :             case RTE_RESULT:
     452             :                 /* Might as well just build the path immediately */
     453        1354 :                 set_result_pathlist(root, rel, rte);
     454        1354 :                 break;
     455           0 :             default:
     456           0 :                 elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
     457             :                 break;
     458             :         }
     459             :     }
     460             : 
     461             :     /*
     462             :      * We insist that all non-dummy rels have a nonzero rowcount estimate.
     463             :      */
     464             :     Assert(rel->rows > 0 || IS_DUMMY_REL(rel));
     465      407660 : }
     466             : 
     467             : /*
     468             :  * set_rel_pathlist
     469             :  *    Build access paths for a base relation
     470             :  */
     471             : static void
     472      407696 : set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
     473             :                  Index rti, RangeTblEntry *rte)
     474             : {
     475      407696 :     if (IS_DUMMY_REL(rel))
     476             :     {
     477             :         /* We already proved the relation empty, so nothing more to do */
     478             :     }
     479      406792 :     else if (rte->inh)
     480             :     {
     481             :         /* It's an "append relation", process accordingly */
     482       18832 :         set_append_rel_pathlist(root, rel, rti, rte);
     483             :     }
     484             :     else
     485             :     {
     486      387960 :         switch (rel->rtekind)
     487             :         {
     488      330216 :             case RTE_RELATION:
     489      330216 :                 if (rte->relkind == RELKIND_FOREIGN_TABLE)
     490             :                 {
     491             :                     /* Foreign table */
     492        2320 :                     set_foreign_pathlist(root, rel, rte);
     493             :                 }
     494      327896 :                 else if (rte->tablesample != NULL)
     495             :                 {
     496             :                     /* Sampled relation */
     497         252 :                     set_tablesample_rel_pathlist(root, rel, rte);
     498             :                 }
     499             :                 else
     500             :                 {
     501             :                     /* Plain relation */
     502      327644 :                     set_plain_rel_pathlist(root, rel, rte);
     503             :                 }
     504      330216 :                 break;
     505        6488 :             case RTE_SUBQUERY:
     506             :                 /* Subquery --- fully handled during set_rel_size */
     507        6488 :                 break;
     508       38142 :             case RTE_FUNCTION:
     509             :                 /* RangeFunction */
     510       38142 :                 set_function_pathlist(root, rel, rte);
     511       38142 :                 break;
     512         216 :             case RTE_TABLEFUNC:
     513             :                 /* Table Function */
     514         216 :                 set_tablefunc_pathlist(root, rel, rte);
     515         216 :                 break;
     516        7322 :             case RTE_VALUES:
     517             :                 /* Values list */
     518        7322 :                 set_values_pathlist(root, rel, rte);
     519        7322 :                 break;
     520        3776 :             case RTE_CTE:
     521             :                 /* CTE reference --- fully handled during set_rel_size */
     522        3776 :                 break;
     523         446 :             case RTE_NAMEDTUPLESTORE:
     524             :                 /* tuplestore reference --- fully handled during set_rel_size */
     525         446 :                 break;
     526        1354 :             case RTE_RESULT:
     527             :                 /* simple Result --- fully handled during set_rel_size */
     528        1354 :                 break;
     529           0 :             default:
     530           0 :                 elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
     531             :                 break;
     532             :         }
     533             :     }
     534             : 
     535             :     /*
     536             :      * Allow a plugin to editorialize on the set of Paths for this base
     537             :      * relation.  It could add new paths (such as CustomPaths) by calling
     538             :      * add_path(), or add_partial_path() if parallel aware.  It could also
     539             :      * delete or modify paths added by the core code.
     540             :      */
     541      407696 :     if (set_rel_pathlist_hook)
     542           0 :         (*set_rel_pathlist_hook) (root, rel, rti, rte);
     543             : 
     544             :     /*
     545             :      * If this is a baserel, we should normally consider gathering any partial
     546             :      * paths we may have created for it.  We have to do this after calling the
     547             :      * set_rel_pathlist_hook, else it cannot add partial paths to be included
     548             :      * here.
     549             :      *
     550             :      * However, if this is an inheritance child, skip it.  Otherwise, we could
     551             :      * end up with a very large number of gather nodes, each trying to grab
     552             :      * its own pool of workers.  Instead, we'll consider gathering partial
     553             :      * paths for the parent appendrel.
     554             :      *
     555             :      * Also, if this is the topmost scan/join rel, we postpone gathering until
     556             :      * the final scan/join targetlist is available (see grouping_planner).
     557             :      */
     558      407696 :     if (rel->reloptkind == RELOPT_BASEREL &&
     559      367638 :         !bms_equal(rel->relids, root->all_query_rels))
     560      183650 :         generate_useful_gather_paths(root, rel, false);
     561             : 
     562             :     /* Now find the cheapest of the paths for this rel */
     563      407696 :     set_cheapest(rel);
     564             : 
     565             : #ifdef OPTIMIZER_DEBUG
     566             :     pprint(rel);
     567             : #endif
     568      407696 : }
     569             : 
     570             : /*
     571             :  * set_plain_rel_size
     572             :  *    Set size estimates for a plain relation (no subquery, no inheritance)
     573             :  */
     574             : static void
     575      327652 : set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
     576             : {
     577             :     /*
     578             :      * Test any partial indexes of rel for applicability.  We must do this
     579             :      * first since partial unique indexes can affect size estimates.
     580             :      */
     581      327652 :     check_index_predicates(root, rel);
     582             : 
     583             :     /* Mark rel with estimated output rows, width, etc */
     584      327652 :     set_baserel_size_estimates(root, rel);
     585      327644 : }
     586             : 
     587             : /*
     588             :  * If this relation could possibly be scanned from within a worker, then set
     589             :  * its consider_parallel flag.
     590             :  */
     591             : static void
     592      318022 : set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
     593             :                           RangeTblEntry *rte)
     594             : {
     595             :     /*
     596             :      * The flag has previously been initialized to false, so we can just
     597             :      * return if it becomes clear that we can't safely set it.
     598             :      */
     599             :     Assert(!rel->consider_parallel);
     600             : 
     601             :     /* Don't call this if parallelism is disallowed for the entire query. */
     602             :     Assert(root->glob->parallelModeOK);
     603             : 
     604             :     /* This should only be called for baserels and appendrel children. */
     605             :     Assert(IS_SIMPLE_REL(rel));
     606             : 
     607             :     /* Assorted checks based on rtekind. */
     608      318022 :     switch (rte->rtekind)
     609             :     {
     610      285404 :         case RTE_RELATION:
     611             : 
     612             :             /*
     613             :              * Currently, parallel workers can't access the leader's temporary
     614             :              * tables.  We could possibly relax this if we wrote all of its
     615             :              * local buffers at the start of the query and made no changes
     616             :              * thereafter (maybe we could allow hint bit changes), and if we
     617             :              * taught the workers to read them.  Writing a large number of
     618             :              * temporary buffers could be expensive, though, and we don't have
     619             :              * the rest of the necessary infrastructure right now anyway.  So
     620             :              * for now, bail out if we see a temporary table.
     621             :              */
     622      285404 :             if (get_rel_persistence(rte->relid) == RELPERSISTENCE_TEMP)
     623        7584 :                 return;
     624             : 
     625             :             /*
     626             :              * Table sampling can be pushed down to workers if the sample
     627             :              * function and its arguments are safe.
     628             :              */
     629      277820 :             if (rte->tablesample != NULL)
     630             :             {
     631         252 :                 char        proparallel = func_parallel(rte->tablesample->tsmhandler);
     632             : 
     633         252 :                 if (proparallel != PROPARALLEL_SAFE)
     634          36 :                     return;
     635         216 :                 if (!is_parallel_safe(root, (Node *) rte->tablesample->args))
     636          12 :                     return;
     637             :             }
     638             : 
     639             :             /*
     640             :              * Ask FDWs whether they can support performing a ForeignScan
     641             :              * within a worker.  Most often, the answer will be no.  For
     642             :              * example, if the nature of the FDW is such that it opens a TCP
     643             :              * connection with a remote server, each parallel worker would end
     644             :              * up with a separate connection, and these connections might not
     645             :              * be appropriately coordinated between workers and the leader.
     646             :              */
     647      277772 :             if (rte->relkind == RELKIND_FOREIGN_TABLE)
     648             :             {
     649             :                 Assert(rel->fdwroutine);
     650        1494 :                 if (!rel->fdwroutine->IsForeignScanParallelSafe)
     651        1430 :                     return;
     652          64 :                 if (!rel->fdwroutine->IsForeignScanParallelSafe(root, rel, rte))
     653           0 :                     return;
     654             :             }
     655             : 
     656             :             /*
     657             :              * There are additional considerations for appendrels, which we'll
     658             :              * deal with in set_append_rel_size and set_append_rel_pathlist.
     659             :              * For now, just set consider_parallel based on the rel's own
     660             :              * quals and targetlist.
     661             :              */
     662      276342 :             break;
     663             : 
     664        5732 :         case RTE_SUBQUERY:
     665             : 
     666             :             /*
     667             :              * There's no intrinsic problem with scanning a subquery-in-FROM
     668             :              * (as distinct from a SubPlan or InitPlan) in a parallel worker.
     669             :              * If the subquery doesn't happen to have any parallel-safe paths,
     670             :              * then flagging it as consider_parallel won't change anything,
     671             :              * but that's true for plain tables, too.  We must set
     672             :              * consider_parallel based on the rel's own quals and targetlist,
     673             :              * so that if a subquery path is parallel-safe but the quals and
     674             :              * projection we're sticking onto it are not, we correctly mark
     675             :              * the SubqueryScanPath as not parallel-safe.  (Note that
     676             :              * set_subquery_pathlist() might push some of these quals down
     677             :              * into the subquery itself, but that doesn't change anything.)
     678             :              *
     679             :              * We can't push sub-select containing LIMIT/OFFSET to workers as
     680             :              * there is no guarantee that the row order will be fully
     681             :              * deterministic, and applying LIMIT/OFFSET will lead to
     682             :              * inconsistent results at the top-level.  (In some cases, where
     683             :              * the result is ordered, we could relax this restriction.  But it
     684             :              * doesn't currently seem worth expending extra effort to do so.)
     685             :              */
     686             :             {
     687        5732 :                 Query      *subquery = castNode(Query, rte->subquery);
     688             : 
     689        5732 :                 if (limit_needed(subquery))
     690         410 :                     return;
     691             :             }
     692        5322 :             break;
     693             : 
     694           0 :         case RTE_JOIN:
     695             :             /* Shouldn't happen; we're only considering baserels here. */
     696             :             Assert(false);
     697           0 :             return;
     698             : 
     699       19332 :         case RTE_FUNCTION:
     700             :             /* Check for parallel-restricted functions. */
     701       19332 :             if (!is_parallel_safe(root, (Node *) rte->functions))
     702        9796 :                 return;
     703        9536 :             break;
     704             : 
     705         216 :         case RTE_TABLEFUNC:
     706             :             /* not parallel safe */
     707         216 :             return;
     708             : 
     709        2712 :         case RTE_VALUES:
     710             :             /* Check for parallel-restricted functions. */
     711        2712 :             if (!is_parallel_safe(root, (Node *) rte->values_lists))
     712           6 :                 return;
     713        2706 :             break;
     714             : 
     715        3108 :         case RTE_CTE:
     716             : 
     717             :             /*
     718             :              * CTE tuplestores aren't shared among parallel workers, so we
     719             :              * force all CTE scans to happen in the leader.  Also, populating
     720             :              * the CTE would require executing a subplan that's not available
     721             :              * in the worker, might be parallel-restricted, and must get
     722             :              * executed only once.
     723             :              */
     724        3108 :             return;
     725             : 
     726         418 :         case RTE_NAMEDTUPLESTORE:
     727             : 
     728             :             /*
     729             :              * tuplestore cannot be shared, at least without more
     730             :              * infrastructure to support that.
     731             :              */
     732         418 :             return;
     733             : 
     734        1100 :         case RTE_RESULT:
     735             :             /* RESULT RTEs, in themselves, are no problem. */
     736        1100 :             break;
     737             :     }
     738             : 
     739             :     /*
     740             :      * If there's anything in baserestrictinfo that's parallel-restricted, we
     741             :      * give up on parallelizing access to this relation.  We could consider
     742             :      * instead postponing application of the restricted quals until we're
     743             :      * above all the parallelism in the plan tree, but it's not clear that
     744             :      * that would be a win in very many cases, and it might be tricky to make
     745             :      * outer join clauses work correctly.  It would likely break equivalence
     746             :      * classes, too.
     747             :      */
     748      295006 :     if (!is_parallel_safe(root, (Node *) rel->baserestrictinfo))
     749       20426 :         return;
     750             : 
     751             :     /*
     752             :      * Likewise, if the relation's outputs are not parallel-safe, give up.
     753             :      * (Usually, they're just Vars, but sometimes they're not.)
     754             :      */
     755      274580 :     if (!is_parallel_safe(root, (Node *) rel->reltarget->exprs))
     756          18 :         return;
     757             : 
     758             :     /* We have a winner. */
     759      274562 :     rel->consider_parallel = true;
     760             : }
     761             : 
     762             : /*
     763             :  * set_plain_rel_pathlist
     764             :  *    Build access paths for a plain relation (no subquery, no inheritance)
     765             :  */
     766             : static void
     767      327644 : set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
     768             : {
     769             :     Relids      required_outer;
     770             : 
     771             :     /*
     772             :      * We don't support pushing join clauses into the quals of a seqscan, but
     773             :      * it could still have required parameterization due to LATERAL refs in
     774             :      * its tlist.
     775             :      */
     776      327644 :     required_outer = rel->lateral_relids;
     777             : 
     778             :     /* Consider sequential scan */
     779      327644 :     add_path(rel, create_seqscan_path(root, rel, required_outer, 0));
     780             : 
     781             :     /* If appropriate, consider parallel sequential scan */
     782      327644 :     if (rel->consider_parallel && required_outer == NULL)
     783      243776 :         create_plain_partial_paths(root, rel);
     784             : 
     785             :     /* Consider index scans */
     786      327644 :     create_index_paths(root, rel);
     787             : 
     788             :     /* Consider TID scans */
     789      327644 :     create_tidscan_paths(root, rel);
     790      327644 : }
     791             : 
     792             : /*
     793             :  * create_plain_partial_paths
     794             :  *    Build partial access paths for parallel scan of a plain relation
     795             :  */
     796             : static void
     797      243776 : create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
     798             : {
     799             :     int         parallel_workers;
     800             : 
     801      243776 :     parallel_workers = compute_parallel_worker(rel, rel->pages, -1,
     802             :                                                max_parallel_workers_per_gather);
     803             : 
     804             :     /* If any limit was set to zero, the user doesn't want a parallel scan. */
     805      243776 :     if (parallel_workers <= 0)
     806      219078 :         return;
     807             : 
     808             :     /* Add an unordered partial path based on a parallel sequential scan. */
     809       24698 :     add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
     810             : }
     811             : 
     812             : /*
     813             :  * set_tablesample_rel_size
     814             :  *    Set size estimates for a sampled relation
     815             :  */
     816             : static void
     817         252 : set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
     818             : {
     819         252 :     TableSampleClause *tsc = rte->tablesample;
     820             :     TsmRoutine *tsm;
     821             :     BlockNumber pages;
     822             :     double      tuples;
     823             : 
     824             :     /*
     825             :      * Test any partial indexes of rel for applicability.  We must do this
     826             :      * first since partial unique indexes can affect size estimates.
     827             :      */
     828         252 :     check_index_predicates(root, rel);
     829             : 
     830             :     /*
     831             :      * Call the sampling method's estimation function to estimate the number
     832             :      * of pages it will read and the number of tuples it will return.  (Note:
     833             :      * we assume the function returns sane values.)
     834             :      */
     835         252 :     tsm = GetTsmRoutine(tsc->tsmhandler);
     836         252 :     tsm->SampleScanGetSampleSize(root, rel, tsc->args,
     837             :                                  &pages, &tuples);
     838             : 
     839             :     /*
     840             :      * For the moment, because we will only consider a SampleScan path for the
     841             :      * rel, it's okay to just overwrite the pages and tuples estimates for the
     842             :      * whole relation.  If we ever consider multiple path types for sampled
     843             :      * rels, we'll need more complication.
     844             :      */
     845         252 :     rel->pages = pages;
     846         252 :     rel->tuples = tuples;
     847             : 
     848             :     /* Mark rel with estimated output rows, width, etc */
     849         252 :     set_baserel_size_estimates(root, rel);
     850         252 : }
     851             : 
     852             : /*
     853             :  * set_tablesample_rel_pathlist
     854             :  *    Build access paths for a sampled relation
     855             :  */
     856             : static void
     857         252 : set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
     858             : {
     859             :     Relids      required_outer;
     860             :     Path       *path;
     861             : 
     862             :     /*
     863             :      * We don't support pushing join clauses into the quals of a samplescan,
     864             :      * but it could still have required parameterization due to LATERAL refs
     865             :      * in its tlist or TABLESAMPLE arguments.
     866             :      */
     867         252 :     required_outer = rel->lateral_relids;
     868             : 
     869             :     /* Consider sampled scan */
     870         252 :     path = create_samplescan_path(root, rel, required_outer);
     871             : 
     872             :     /*
     873             :      * If the sampling method does not support repeatable scans, we must avoid
     874             :      * plans that would scan the rel multiple times.  Ideally, we'd simply
     875             :      * avoid putting the rel on the inside of a nestloop join; but adding such
     876             :      * a consideration to the planner seems like a great deal of complication
     877             :      * to support an uncommon usage of second-rate sampling methods.  Instead,
     878             :      * if there is a risk that the query might perform an unsafe join, just
     879             :      * wrap the SampleScan in a Materialize node.  We can check for joins by
     880             :      * counting the membership of all_query_rels (note that this correctly
     881             :      * counts inheritance trees as single rels).  If we're inside a subquery,
     882             :      * we can't easily check whether a join might occur in the outer query, so
     883             :      * just assume one is possible.
     884             :      *
     885             :      * GetTsmRoutine is relatively expensive compared to the other tests here,
     886             :      * so check repeatable_across_scans last, even though that's a bit odd.
     887             :      */
     888         478 :     if ((root->query_level > 1 ||
     889         226 :          bms_membership(root->all_query_rels) != BMS_SINGLETON) &&
     890          44 :         !(GetTsmRoutine(rte->tablesample->tsmhandler)->repeatable_across_scans))
     891             :     {
     892           8 :         path = (Path *) create_material_path(rel, path);
     893             :     }
     894             : 
     895         252 :     add_path(rel, path);
     896             : 
     897             :     /* For the moment, at least, there are no other paths to consider */
     898         252 : }
     899             : 
     900             : /*
     901             :  * set_foreign_size
     902             :  *      Set size estimates for a foreign table RTE
     903             :  */
     904             : static void
     905        2324 : set_foreign_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
     906             : {
     907             :     /* Mark rel with estimated output rows, width, etc */
     908        2324 :     set_foreign_size_estimates(root, rel);
     909             : 
     910             :     /* Let FDW adjust the size estimates, if it can */
     911        2324 :     rel->fdwroutine->GetForeignRelSize(root, rel, rte->relid);
     912             : 
     913             :     /* ... but do not let it set the rows estimate to zero */
     914        2320 :     rel->rows = clamp_row_est(rel->rows);
     915             : 
     916             :     /*
     917             :      * Also, make sure rel->tuples is not insane relative to rel->rows.
     918             :      * Notably, this ensures sanity if pg_class.reltuples contains -1 and the
     919             :      * FDW doesn't do anything to replace that.
     920             :      */
     921        2320 :     rel->tuples = Max(rel->tuples, rel->rows);
     922        2320 : }
     923             : 
     924             : /*
     925             :  * set_foreign_pathlist
     926             :  *      Build access paths for a foreign table RTE
     927             :  */
     928             : static void
     929        2320 : set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
     930             : {
     931             :     /* Call the FDW's GetForeignPaths function to generate path(s) */
     932        2320 :     rel->fdwroutine->GetForeignPaths(root, rel, rte->relid);
     933        2320 : }
     934             : 
     935             : /*
     936             :  * set_append_rel_size
     937             :  *    Set size estimates for a simple "append relation"
     938             :  *
     939             :  * The passed-in rel and RTE represent the entire append relation.  The
     940             :  * relation's contents are computed by appending together the output of the
     941             :  * individual member relations.  Note that in the non-partitioned inheritance
     942             :  * case, the first member relation is actually the same table as is mentioned
     943             :  * in the parent RTE ... but it has a different RTE and RelOptInfo.  This is
     944             :  * a good thing because their outputs are not the same size.
     945             :  */
     946             : static void
     947       19104 : set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
     948             :                     Index rti, RangeTblEntry *rte)
     949             : {
     950       19104 :     int         parentRTindex = rti;
     951             :     bool        has_live_children;
     952             :     double      parent_rows;
     953             :     double      parent_size;
     954             :     double     *parent_attrsizes;
     955             :     int         nattrs;
     956             :     ListCell   *l;
     957             : 
     958             :     /* Guard against stack overflow due to overly deep inheritance tree. */
     959       19104 :     check_stack_depth();
     960             : 
     961             :     Assert(IS_SIMPLE_REL(rel));
     962             : 
     963             :     /*
     964             :      * If this is a partitioned baserel, set the consider_partitionwise_join
     965             :      * flag; currently, we only consider partitionwise joins with the baserel
     966             :      * if its targetlist doesn't contain a whole-row Var.
     967             :      */
     968       19104 :     if (enable_partitionwise_join &&
     969        3902 :         rel->reloptkind == RELOPT_BASEREL &&
     970        3290 :         rte->relkind == RELKIND_PARTITIONED_TABLE &&
     971        3290 :         bms_is_empty(rel->attr_needed[InvalidAttrNumber - rel->min_attr]))
     972        3214 :         rel->consider_partitionwise_join = true;
     973             : 
     974             :     /*
     975             :      * Initialize to compute size estimates for whole append relation.
     976             :      *
     977             :      * We handle width estimates by weighting the widths of different child
     978             :      * rels proportionally to their number of rows.  This is sensible because
     979             :      * the use of width estimates is mainly to compute the total relation
     980             :      * "footprint" if we have to sort or hash it.  To do this, we sum the
     981             :      * total equivalent size (in "double" arithmetic) and then divide by the
     982             :      * total rowcount estimate.  This is done separately for the total rel
     983             :      * width and each attribute.
     984             :      *
     985             :      * Note: if you consider changing this logic, beware that child rels could
     986             :      * have zero rows and/or width, if they were excluded by constraints.
     987             :      */
     988       19104 :     has_live_children = false;
     989       19104 :     parent_rows = 0;
     990       19104 :     parent_size = 0;
     991       19104 :     nattrs = rel->max_attr - rel->min_attr + 1;
     992       19104 :     parent_attrsizes = (double *) palloc0(nattrs * sizeof(double));
     993             : 
     994       99202 :     foreach(l, root->append_rel_list)
     995             :     {
     996       80100 :         AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
     997             :         int         childRTindex;
     998             :         RangeTblEntry *childRTE;
     999             :         RelOptInfo *childrel;
    1000             :         List       *childrinfos;
    1001             :         ListCell   *parentvars;
    1002             :         ListCell   *childvars;
    1003             :         ListCell   *lc;
    1004             : 
    1005             :         /* append_rel_list contains all append rels; ignore others */
    1006       80100 :         if (appinfo->parent_relid != parentRTindex)
    1007       40214 :             continue;
    1008             : 
    1009       40126 :         childRTindex = appinfo->child_relid;
    1010       40126 :         childRTE = root->simple_rte_array[childRTindex];
    1011             : 
    1012             :         /*
    1013             :          * The child rel's RelOptInfo was already created during
    1014             :          * add_other_rels_to_query.
    1015             :          */
    1016       40126 :         childrel = find_base_rel(root, childRTindex);
    1017             :         Assert(childrel->reloptkind == RELOPT_OTHER_MEMBER_REL);
    1018             : 
    1019             :         /* We may have already proven the child to be dummy. */
    1020       40126 :         if (IS_DUMMY_REL(childrel))
    1021           6 :             continue;
    1022             : 
    1023             :         /*
    1024             :          * We have to copy the parent's targetlist and quals to the child,
    1025             :          * with appropriate substitution of variables.  However, the
    1026             :          * baserestrictinfo quals were already copied/substituted when the
    1027             :          * child RelOptInfo was built.  So we don't need any additional setup
    1028             :          * before applying constraint exclusion.
    1029             :          */
    1030       40120 :         if (relation_excluded_by_constraints(root, childrel, childRTE))
    1031             :         {
    1032             :             /*
    1033             :              * This child need not be scanned, so we can omit it from the
    1034             :              * appendrel.
    1035             :              */
    1036          96 :             set_dummy_rel_pathlist(childrel);
    1037          96 :             continue;
    1038             :         }
    1039             : 
    1040             :         /*
    1041             :          * Constraint exclusion failed, so copy the parent's join quals and
    1042             :          * targetlist to the child, with appropriate variable substitutions.
    1043             :          *
    1044             :          * We skip join quals that came from above outer joins that can null
    1045             :          * this rel, since they would be of no value while generating paths
    1046             :          * for the child.  This saves some effort while processing the child
    1047             :          * rel, and it also avoids an implementation restriction in
    1048             :          * adjust_appendrel_attrs (it can't apply nullingrels to a non-Var).
    1049             :          */
    1050       40024 :         childrinfos = NIL;
    1051       52456 :         foreach(lc, rel->joininfo)
    1052             :         {
    1053       12432 :             RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
    1054             : 
    1055       12432 :             if (!bms_overlap(rinfo->clause_relids, rel->nulling_relids))
    1056       10110 :                 childrinfos = lappend(childrinfos,
    1057       10110 :                                       adjust_appendrel_attrs(root,
    1058             :                                                              (Node *) rinfo,
    1059             :                                                              1, &appinfo));
    1060             :         }
    1061       40024 :         childrel->joininfo = childrinfos;
    1062             : 
    1063             :         /*
    1064             :          * Now for the child's targetlist.
    1065             :          *
    1066             :          * NB: the resulting childrel->reltarget->exprs may contain arbitrary
    1067             :          * expressions, which otherwise would not occur in a rel's targetlist.
    1068             :          * Code that might be looking at an appendrel child must cope with
    1069             :          * such.  (Normally, a rel's targetlist would only include Vars and
    1070             :          * PlaceHolderVars.)  XXX we do not bother to update the cost or width
    1071             :          * fields of childrel->reltarget; not clear if that would be useful.
    1072             :          */
    1073       80048 :         childrel->reltarget->exprs = (List *)
    1074       40024 :             adjust_appendrel_attrs(root,
    1075       40024 :                                    (Node *) rel->reltarget->exprs,
    1076             :                                    1, &appinfo);
    1077             : 
    1078             :         /*
    1079             :          * We have to make child entries in the EquivalenceClass data
    1080             :          * structures as well.  This is needed either if the parent
    1081             :          * participates in some eclass joins (because we will want to consider
    1082             :          * inner-indexscan joins on the individual children) or if the parent
    1083             :          * has useful pathkeys (because we should try to build MergeAppend
    1084             :          * paths that produce those sort orderings).
    1085             :          */
    1086       40024 :         if (rel->has_eclass_joins || has_useful_pathkeys(root, rel))
    1087       20472 :             add_child_rel_equivalences(root, appinfo, rel, childrel);
    1088       40024 :         childrel->has_eclass_joins = rel->has_eclass_joins;
    1089             : 
    1090             :         /*
    1091             :          * Note: we could compute appropriate attr_needed data for the child's
    1092             :          * variables, by transforming the parent's attr_needed through the
    1093             :          * translated_vars mapping.  However, currently there's no need
    1094             :          * because attr_needed is only examined for base relations not
    1095             :          * otherrels.  So we just leave the child's attr_needed empty.
    1096             :          */
    1097             : 
    1098             :         /*
    1099             :          * If we consider partitionwise joins with the parent rel, do the same
    1100             :          * for partitioned child rels.
    1101             :          *
    1102             :          * Note: here we abuse the consider_partitionwise_join flag by setting
    1103             :          * it for child rels that are not themselves partitioned.  We do so to
    1104             :          * tell try_partitionwise_join() that the child rel is sufficiently
    1105             :          * valid to be used as a per-partition input, even if it later gets
    1106             :          * proven to be dummy.  (It's not usable until we've set up the
    1107             :          * reltarget and EC entries, which we just did.)
    1108             :          */
    1109       40024 :         if (rel->consider_partitionwise_join)
    1110       10472 :             childrel->consider_partitionwise_join = true;
    1111             : 
    1112             :         /*
    1113             :          * If parallelism is allowable for this query in general, see whether
    1114             :          * it's allowable for this childrel in particular.  But if we've
    1115             :          * already decided the appendrel is not parallel-safe as a whole,
    1116             :          * there's no point in considering parallelism for this child.  For
    1117             :          * consistency, do this before calling set_rel_size() for the child.
    1118             :          */
    1119       40024 :         if (root->glob->parallelModeOK && rel->consider_parallel)
    1120       28558 :             set_rel_consider_parallel(root, childrel, childRTE);
    1121             : 
    1122             :         /*
    1123             :          * Compute the child's size.
    1124             :          */
    1125       40024 :         set_rel_size(root, childrel, childRTindex, childRTE);
    1126             : 
    1127             :         /*
    1128             :          * It is possible that constraint exclusion detected a contradiction
    1129             :          * within a child subquery, even though we didn't prove one above. If
    1130             :          * so, we can skip this child.
    1131             :          */
    1132       40022 :         if (IS_DUMMY_REL(childrel))
    1133         138 :             continue;
    1134             : 
    1135             :         /* We have at least one live child. */
    1136       39884 :         has_live_children = true;
    1137             : 
    1138             :         /*
    1139             :          * If any live child is not parallel-safe, treat the whole appendrel
    1140             :          * as not parallel-safe.  In future we might be able to generate plans
    1141             :          * in which some children are farmed out to workers while others are
    1142             :          * not; but we don't have that today, so it's a waste to consider
    1143             :          * partial paths anywhere in the appendrel unless it's all safe.
    1144             :          * (Child rels visited before this one will be unmarked in
    1145             :          * set_append_rel_pathlist().)
    1146             :          */
    1147       39884 :         if (!childrel->consider_parallel)
    1148       11900 :             rel->consider_parallel = false;
    1149             : 
    1150             :         /*
    1151             :          * Accumulate size information from each live child.
    1152             :          */
    1153             :         Assert(childrel->rows > 0);
    1154             : 
    1155       39884 :         parent_rows += childrel->rows;
    1156       39884 :         parent_size += childrel->reltarget->width * childrel->rows;
    1157             : 
    1158             :         /*
    1159             :          * Accumulate per-column estimates too.  We need not do anything for
    1160             :          * PlaceHolderVars in the parent list.  If child expression isn't a
    1161             :          * Var, or we didn't record a width estimate for it, we have to fall
    1162             :          * back on a datatype-based estimate.
    1163             :          *
    1164             :          * By construction, child's targetlist is 1-to-1 with parent's.
    1165             :          */
    1166      125636 :         forboth(parentvars, rel->reltarget->exprs,
    1167             :                 childvars, childrel->reltarget->exprs)
    1168             :         {
    1169       85752 :             Var        *parentvar = (Var *) lfirst(parentvars);
    1170       85752 :             Node       *childvar = (Node *) lfirst(childvars);
    1171             : 
    1172       85752 :             if (IsA(parentvar, Var) && parentvar->varno == parentRTindex)
    1173             :             {
    1174       74704 :                 int         pndx = parentvar->varattno - rel->min_attr;
    1175       74704 :                 int32       child_width = 0;
    1176             : 
    1177       74704 :                 if (IsA(childvar, Var) &&
    1178       72714 :                     ((Var *) childvar)->varno == childrel->relid)
    1179             :                 {
    1180       72648 :                     int         cndx = ((Var *) childvar)->varattno - childrel->min_attr;
    1181             : 
    1182       72648 :                     child_width = childrel->attr_widths[cndx];
    1183             :                 }
    1184       74704 :                 if (child_width <= 0)
    1185        2056 :                     child_width = get_typavgwidth(exprType(childvar),
    1186             :                                                   exprTypmod(childvar));
    1187             :                 Assert(child_width > 0);
    1188       74704 :                 parent_attrsizes[pndx] += child_width * childrel->rows;
    1189             :             }
    1190             :         }
    1191             :     }
    1192             : 
    1193       19102 :     if (has_live_children)
    1194             :     {
    1195             :         /*
    1196             :          * Save the finished size estimates.
    1197             :          */
    1198             :         int         i;
    1199             : 
    1200             :         Assert(parent_rows > 0);
    1201       18832 :         rel->rows = parent_rows;
    1202       18832 :         rel->reltarget->width = rint(parent_size / parent_rows);
    1203      192152 :         for (i = 0; i < nattrs; i++)
    1204      173320 :             rel->attr_widths[i] = rint(parent_attrsizes[i] / parent_rows);
    1205             : 
    1206             :         /*
    1207             :          * Set "raw tuples" count equal to "rows" for the appendrel; needed
    1208             :          * because some places assume rel->tuples is valid for any baserel.
    1209             :          */
    1210       18832 :         rel->tuples = parent_rows;
    1211             : 
    1212             :         /*
    1213             :          * Note that we leave rel->pages as zero; this is important to avoid
    1214             :          * double-counting the appendrel tree in total_table_pages.
    1215             :          */
    1216             :     }
    1217             :     else
    1218             :     {
    1219             :         /*
    1220             :          * All children were excluded by constraints, so mark the whole
    1221             :          * appendrel dummy.  We must do this in this phase so that the rel's
    1222             :          * dummy-ness is visible when we generate paths for other rels.
    1223             :          */
    1224         270 :         set_dummy_rel_pathlist(rel);
    1225             :     }
    1226             : 
    1227       19102 :     pfree(parent_attrsizes);
    1228       19102 : }
    1229             : 
    1230             : /*
    1231             :  * set_append_rel_pathlist
    1232             :  *    Build access paths for an "append relation"
    1233             :  */
    1234             : static void
    1235       18832 : set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
    1236             :                         Index rti, RangeTblEntry *rte)
    1237             : {
    1238       18832 :     int         parentRTindex = rti;
    1239       18832 :     List       *live_childrels = NIL;
    1240             :     ListCell   *l;
    1241             : 
    1242             :     /*
    1243             :      * Generate access paths for each member relation, and remember the
    1244             :      * non-dummy children.
    1245             :      */
    1246       98516 :     foreach(l, root->append_rel_list)
    1247             :     {
    1248       79684 :         AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
    1249             :         int         childRTindex;
    1250             :         RangeTblEntry *childRTE;
    1251             :         RelOptInfo *childrel;
    1252             : 
    1253             :         /* append_rel_list contains all append rels; ignore others */
    1254       79684 :         if (appinfo->parent_relid != parentRTindex)
    1255       39626 :             continue;
    1256             : 
    1257             :         /* Re-locate the child RTE and RelOptInfo */
    1258       40058 :         childRTindex = appinfo->child_relid;
    1259       40058 :         childRTE = root->simple_rte_array[childRTindex];
    1260       40058 :         childrel = root->simple_rel_array[childRTindex];
    1261             : 
    1262             :         /*
    1263             :          * If set_append_rel_size() decided the parent appendrel was
    1264             :          * parallel-unsafe at some point after visiting this child rel, we
    1265             :          * need to propagate the unsafety marking down to the child, so that
    1266             :          * we don't generate useless partial paths for it.
    1267             :          */
    1268       40058 :         if (!rel->consider_parallel)
    1269       12006 :             childrel->consider_parallel = false;
    1270             : 
    1271             :         /*
    1272             :          * Compute the child's access paths.
    1273             :          */
    1274       40058 :         set_rel_pathlist(root, childrel, childRTindex, childRTE);
    1275             : 
    1276             :         /*
    1277             :          * If child is dummy, ignore it.
    1278             :          */
    1279       40058 :         if (IS_DUMMY_REL(childrel))
    1280         174 :             continue;
    1281             : 
    1282             :         /*
    1283             :          * Child is live, so add it to the live_childrels list for use below.
    1284             :          */
    1285       39884 :         live_childrels = lappend(live_childrels, childrel);
    1286             :     }
    1287             : 
    1288             :     /* Add paths to the append relation. */
    1289       18832 :     add_paths_to_append_rel(root, rel, live_childrels);
    1290       18832 : }
    1291             : 
    1292             : 
    1293             : /*
    1294             :  * add_paths_to_append_rel
    1295             :  *      Generate paths for the given append relation given the set of non-dummy
    1296             :  *      child rels.
    1297             :  *
    1298             :  * The function collects all parameterizations and orderings supported by the
    1299             :  * non-dummy children. For every such parameterization or ordering, it creates
    1300             :  * an append path collecting one path from each non-dummy child with given
    1301             :  * parameterization or ordering. Similarly it collects partial paths from
    1302             :  * non-dummy children to create partial append paths.
    1303             :  */
    1304             : void
    1305       32542 : add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
    1306             :                         List *live_childrels)
    1307             : {
    1308       32542 :     List       *subpaths = NIL;
    1309       32542 :     bool        subpaths_valid = true;
    1310       32542 :     List       *startup_subpaths = NIL;
    1311       32542 :     bool        startup_subpaths_valid = true;
    1312       32542 :     List       *partial_subpaths = NIL;
    1313       32542 :     List       *pa_partial_subpaths = NIL;
    1314       32542 :     List       *pa_nonpartial_subpaths = NIL;
    1315       32542 :     bool        partial_subpaths_valid = true;
    1316             :     bool        pa_subpaths_valid;
    1317       32542 :     List       *all_child_pathkeys = NIL;
    1318       32542 :     List       *all_child_outers = NIL;
    1319             :     ListCell   *l;
    1320       32542 :     double      partial_rows = -1;
    1321             : 
    1322             :     /* If appropriate, consider parallel append */
    1323       32542 :     pa_subpaths_valid = enable_parallel_append && rel->consider_parallel;
    1324             : 
    1325             :     /*
    1326             :      * For every non-dummy child, remember the cheapest path.  Also, identify
    1327             :      * all pathkeys (orderings) and parameterizations (required_outer sets)
    1328             :      * available for the non-dummy member relations.
    1329             :      */
    1330       99254 :     foreach(l, live_childrels)
    1331             :     {
    1332       66712 :         RelOptInfo *childrel = lfirst(l);
    1333             :         ListCell   *lcp;
    1334       66712 :         Path       *cheapest_partial_path = NULL;
    1335             : 
    1336             :         /*
    1337             :          * If child has an unparameterized cheapest-total path, add that to
    1338             :          * the unparameterized Append path we are constructing for the parent.
    1339             :          * If not, there's no workable unparameterized path.
    1340             :          *
    1341             :          * With partitionwise aggregates, the child rel's pathlist may be
    1342             :          * empty, so don't assume that a path exists here.
    1343             :          */
    1344       66712 :         if (childrel->pathlist != NIL &&
    1345       66712 :             childrel->cheapest_total_path->param_info == NULL)
    1346       66196 :             accumulate_append_subpath(childrel->cheapest_total_path,
    1347             :                                       &subpaths, NULL);
    1348             :         else
    1349         516 :             subpaths_valid = false;
    1350             : 
    1351             :         /*
    1352             :          * When the planner is considering cheap startup plans, we'll also
    1353             :          * collect all the cheapest_startup_paths (if set) and build an
    1354             :          * AppendPath containing those as subpaths.
    1355             :          */
    1356       66712 :         if (rel->consider_startup && childrel->cheapest_startup_path != NULL)
    1357             :         {
    1358             :             /* cheapest_startup_path must not be a parameterized path. */
    1359             :             Assert(childrel->cheapest_startup_path->param_info == NULL);
    1360        1286 :             accumulate_append_subpath(childrel->cheapest_startup_path,
    1361             :                                       &startup_subpaths,
    1362             :                                       NULL);
    1363             :         }
    1364             :         else
    1365       65426 :             startup_subpaths_valid = false;
    1366             : 
    1367             : 
    1368             :         /* Same idea, but for a partial plan. */
    1369       66712 :         if (childrel->partial_pathlist != NIL)
    1370             :         {
    1371       44492 :             cheapest_partial_path = linitial(childrel->partial_pathlist);
    1372       44492 :             accumulate_append_subpath(cheapest_partial_path,
    1373             :                                       &partial_subpaths, NULL);
    1374             :         }
    1375             :         else
    1376       22220 :             partial_subpaths_valid = false;
    1377             : 
    1378             :         /*
    1379             :          * Same idea, but for a parallel append mixing partial and non-partial
    1380             :          * paths.
    1381             :          */
    1382       66712 :         if (pa_subpaths_valid)
    1383             :         {
    1384       46924 :             Path       *nppath = NULL;
    1385             : 
    1386             :             nppath =
    1387       46924 :                 get_cheapest_parallel_safe_total_inner(childrel->pathlist);
    1388             : 
    1389       46924 :             if (cheapest_partial_path == NULL && nppath == NULL)
    1390             :             {
    1391             :                 /* Neither a partial nor a parallel-safe path?  Forget it. */
    1392         358 :                 pa_subpaths_valid = false;
    1393             :             }
    1394       46566 :             else if (nppath == NULL ||
    1395       44042 :                      (cheapest_partial_path != NULL &&
    1396       44042 :                       cheapest_partial_path->total_cost < nppath->total_cost))
    1397             :             {
    1398             :                 /* Partial path is cheaper or the only option. */
    1399             :                 Assert(cheapest_partial_path != NULL);
    1400       43824 :                 accumulate_append_subpath(cheapest_partial_path,
    1401             :                                           &pa_partial_subpaths,
    1402             :                                           &pa_nonpartial_subpaths);
    1403             :             }
    1404             :             else
    1405             :             {
    1406             :                 /*
    1407             :                  * Either we've got only a non-partial path, or we think that
    1408             :                  * a single backend can execute the best non-partial path
    1409             :                  * faster than all the parallel backends working together can
    1410             :                  * execute the best partial path.
    1411             :                  *
    1412             :                  * It might make sense to be more aggressive here.  Even if
    1413             :                  * the best non-partial path is more expensive than the best
    1414             :                  * partial path, it could still be better to choose the
    1415             :                  * non-partial path if there are several such paths that can
    1416             :                  * be given to different workers.  For now, we don't try to
    1417             :                  * figure that out.
    1418             :                  */
    1419        2742 :                 accumulate_append_subpath(nppath,
    1420             :                                           &pa_nonpartial_subpaths,
    1421             :                                           NULL);
    1422             :             }
    1423             :         }
    1424             : 
    1425             :         /*
    1426             :          * Collect lists of all the available path orderings and
    1427             :          * parameterizations for all the children.  We use these as a
    1428             :          * heuristic to indicate which sort orderings and parameterizations we
    1429             :          * should build Append and MergeAppend paths for.
    1430             :          */
    1431      153198 :         foreach(lcp, childrel->pathlist)
    1432             :         {
    1433       86486 :             Path       *childpath = (Path *) lfirst(lcp);
    1434       86486 :             List       *childkeys = childpath->pathkeys;
    1435       86486 :             Relids      childouter = PATH_REQ_OUTER(childpath);
    1436             : 
    1437             :             /* Unsorted paths don't contribute to pathkey list */
    1438       86486 :             if (childkeys != NIL)
    1439             :             {
    1440             :                 ListCell   *lpk;
    1441       19934 :                 bool        found = false;
    1442             : 
    1443             :                 /* Have we already seen this ordering? */
    1444       20122 :                 foreach(lpk, all_child_pathkeys)
    1445             :                 {
    1446       14418 :                     List       *existing_pathkeys = (List *) lfirst(lpk);
    1447             : 
    1448       14418 :                     if (compare_pathkeys(existing_pathkeys,
    1449             :                                          childkeys) == PATHKEYS_EQUAL)
    1450             :                     {
    1451       14230 :                         found = true;
    1452       14230 :                         break;
    1453             :                     }
    1454             :                 }
    1455       19934 :                 if (!found)
    1456             :                 {
    1457             :                     /* No, so add it to all_child_pathkeys */
    1458        5704 :                     all_child_pathkeys = lappend(all_child_pathkeys,
    1459             :                                                  childkeys);
    1460             :                 }
    1461             :             }
    1462             : 
    1463             :             /* Unparameterized paths don't contribute to param-set list */
    1464       86486 :             if (childouter)
    1465             :             {
    1466             :                 ListCell   *lco;
    1467        5664 :                 bool        found = false;
    1468             : 
    1469             :                 /* Have we already seen this param set? */
    1470        6276 :                 foreach(lco, all_child_outers)
    1471             :                 {
    1472        4134 :                     Relids      existing_outers = (Relids) lfirst(lco);
    1473             : 
    1474        4134 :                     if (bms_equal(existing_outers, childouter))
    1475             :                     {
    1476        3522 :                         found = true;
    1477        3522 :                         break;
    1478             :                     }
    1479             :                 }
    1480        5664 :                 if (!found)
    1481             :                 {
    1482             :                     /* No, so add it to all_child_outers */
    1483        2142 :                     all_child_outers = lappend(all_child_outers,
    1484             :                                                childouter);
    1485             :                 }
    1486             :             }
    1487             :         }
    1488             :     }
    1489             : 
    1490             :     /*
    1491             :      * If we found unparameterized paths for all children, build an unordered,
    1492             :      * unparameterized Append path for the rel.  (Note: this is correct even
    1493             :      * if we have zero or one live subpath due to constraint exclusion.)
    1494             :      */
    1495       32542 :     if (subpaths_valid)
    1496       32314 :         add_path(rel, (Path *) create_append_path(root, rel, subpaths, NIL,
    1497             :                                                   NIL, NULL, 0, false,
    1498             :                                                   -1));
    1499             : 
    1500             :     /* build an AppendPath for the cheap startup paths, if valid */
    1501       32542 :     if (startup_subpaths_valid)
    1502         544 :         add_path(rel, (Path *) create_append_path(root, rel, startup_subpaths,
    1503             :                                                   NIL, NIL, NULL, 0, false, -1));
    1504             : 
    1505             :     /*
    1506             :      * Consider an append of unordered, unparameterized partial paths.  Make
    1507             :      * it parallel-aware if possible.
    1508             :      */
    1509       32542 :     if (partial_subpaths_valid && partial_subpaths != NIL)
    1510             :     {
    1511             :         AppendPath *appendpath;
    1512             :         ListCell   *lc;
    1513       20214 :         int         parallel_workers = 0;
    1514             : 
    1515             :         /* Find the highest number of workers requested for any subpath. */
    1516       68622 :         foreach(lc, partial_subpaths)
    1517             :         {
    1518       48408 :             Path       *path = lfirst(lc);
    1519             : 
    1520       48408 :             parallel_workers = Max(parallel_workers, path->parallel_workers);
    1521             :         }
    1522             :         Assert(parallel_workers > 0);
    1523             : 
    1524             :         /*
    1525             :          * If the use of parallel append is permitted, always request at least
    1526             :          * log2(# of children) workers.  We assume it can be useful to have
    1527             :          * extra workers in this case because they will be spread out across
    1528             :          * the children.  The precise formula is just a guess, but we don't
    1529             :          * want to end up with a radically different answer for a table with N
    1530             :          * partitions vs. an unpartitioned table with the same data, so the
    1531             :          * use of some kind of log-scaling here seems to make some sense.
    1532             :          */
    1533       20214 :         if (enable_parallel_append)
    1534             :         {
    1535       20166 :             parallel_workers = Max(parallel_workers,
    1536             :                                    pg_leftmost_one_pos32(list_length(live_childrels)) + 1);
    1537       20166 :             parallel_workers = Min(parallel_workers,
    1538             :                                    max_parallel_workers_per_gather);
    1539             :         }
    1540             :         Assert(parallel_workers > 0);
    1541             : 
    1542             :         /* Generate a partial append path. */
    1543       20214 :         appendpath = create_append_path(root, rel, NIL, partial_subpaths,
    1544             :                                         NIL, NULL, parallel_workers,
    1545             :                                         enable_parallel_append,
    1546             :                                         -1);
    1547             : 
    1548             :         /*
    1549             :          * Make sure any subsequent partial paths use the same row count
    1550             :          * estimate.
    1551             :          */
    1552       20214 :         partial_rows = appendpath->path.rows;
    1553             : 
    1554             :         /* Add the path. */
    1555       20214 :         add_partial_path(rel, (Path *) appendpath);
    1556             :     }
    1557             : 
    1558             :     /*
    1559             :      * Consider a parallel-aware append using a mix of partial and non-partial
    1560             :      * paths.  (This only makes sense if there's at least one child which has
    1561             :      * a non-partial path that is substantially cheaper than any partial path;
    1562             :      * otherwise, we should use the append path added in the previous step.)
    1563             :      */
    1564       32542 :     if (pa_subpaths_valid && pa_nonpartial_subpaths != NIL)
    1565             :     {
    1566             :         AppendPath *appendpath;
    1567             :         ListCell   *lc;
    1568        1424 :         int         parallel_workers = 0;
    1569             : 
    1570             :         /*
    1571             :          * Find the highest number of workers requested for any partial
    1572             :          * subpath.
    1573             :          */
    1574        2296 :         foreach(lc, pa_partial_subpaths)
    1575             :         {
    1576         872 :             Path       *path = lfirst(lc);
    1577             : 
    1578         872 :             parallel_workers = Max(parallel_workers, path->parallel_workers);
    1579             :         }
    1580             : 
    1581             :         /*
    1582             :          * Same formula here as above.  It's even more important in this
    1583             :          * instance because the non-partial paths won't contribute anything to
    1584             :          * the planned number of parallel workers.
    1585             :          */
    1586        1424 :         parallel_workers = Max(parallel_workers,
    1587             :                                pg_leftmost_one_pos32(list_length(live_childrels)) + 1);
    1588        1424 :         parallel_workers = Min(parallel_workers,
    1589             :                                max_parallel_workers_per_gather);
    1590             :         Assert(parallel_workers > 0);
    1591             : 
    1592        1424 :         appendpath = create_append_path(root, rel, pa_nonpartial_subpaths,
    1593             :                                         pa_partial_subpaths,
    1594             :                                         NIL, NULL, parallel_workers, true,
    1595             :                                         partial_rows);
    1596        1424 :         add_partial_path(rel, (Path *) appendpath);
    1597             :     }
    1598             : 
    1599             :     /*
    1600             :      * Also build unparameterized ordered append paths based on the collected
    1601             :      * list of child pathkeys.
    1602             :      */
    1603       32542 :     if (subpaths_valid)
    1604       32314 :         generate_orderedappend_paths(root, rel, live_childrels,
    1605             :                                      all_child_pathkeys);
    1606             : 
    1607             :     /*
    1608             :      * Build Append paths for each parameterization seen among the child rels.
    1609             :      * (This may look pretty expensive, but in most cases of practical
    1610             :      * interest, the child rels will expose mostly the same parameterizations,
    1611             :      * so that not that many cases actually get considered here.)
    1612             :      *
    1613             :      * The Append node itself cannot enforce quals, so all qual checking must
    1614             :      * be done in the child paths.  This means that to have a parameterized
    1615             :      * Append path, we must have the exact same parameterization for each
    1616             :      * child path; otherwise some children might be failing to check the
    1617             :      * moved-down quals.  To make them match up, we can try to increase the
    1618             :      * parameterization of lesser-parameterized paths.
    1619             :      */
    1620       34684 :     foreach(l, all_child_outers)
    1621             :     {
    1622        2142 :         Relids      required_outer = (Relids) lfirst(l);
    1623             :         ListCell   *lcr;
    1624             : 
    1625             :         /* Select the child paths for an Append with this parameterization */
    1626        2142 :         subpaths = NIL;
    1627        2142 :         subpaths_valid = true;
    1628        7890 :         foreach(lcr, live_childrels)
    1629             :         {
    1630        5760 :             RelOptInfo *childrel = (RelOptInfo *) lfirst(lcr);
    1631             :             Path       *subpath;
    1632             : 
    1633        5760 :             if (childrel->pathlist == NIL)
    1634             :             {
    1635             :                 /* failed to make a suitable path for this child */
    1636           0 :                 subpaths_valid = false;
    1637           0 :                 break;
    1638             :             }
    1639             : 
    1640        5760 :             subpath = get_cheapest_parameterized_child_path(root,
    1641             :                                                             childrel,
    1642             :                                                             required_outer);
    1643        5760 :             if (subpath == NULL)
    1644             :             {
    1645             :                 /* failed to make a suitable path for this child */
    1646          12 :                 subpaths_valid = false;
    1647          12 :                 break;
    1648             :             }
    1649        5748 :             accumulate_append_subpath(subpath, &subpaths, NULL);
    1650             :         }
    1651             : 
    1652        2142 :         if (subpaths_valid)
    1653        2130 :             add_path(rel, (Path *)
    1654        2130 :                      create_append_path(root, rel, subpaths, NIL,
    1655             :                                         NIL, required_outer, 0, false,
    1656             :                                         -1));
    1657             :     }
    1658             : 
    1659             :     /*
    1660             :      * When there is only a single child relation, the Append path can inherit
    1661             :      * any ordering available for the child rel's path, so that it's useful to
    1662             :      * consider ordered partial paths.  Above we only considered the cheapest
    1663             :      * partial path for each child, but let's also make paths using any
    1664             :      * partial paths that have pathkeys.
    1665             :      */
    1666       32542 :     if (list_length(live_childrels) == 1)
    1667             :     {
    1668       13218 :         RelOptInfo *childrel = (RelOptInfo *) linitial(live_childrels);
    1669             : 
    1670             :         /* skip the cheapest partial path, since we already used that above */
    1671       13422 :         for_each_from(l, childrel->partial_pathlist, 1)
    1672             :         {
    1673         204 :             Path       *path = (Path *) lfirst(l);
    1674             :             AppendPath *appendpath;
    1675             : 
    1676             :             /* skip paths with no pathkeys. */
    1677         204 :             if (path->pathkeys == NIL)
    1678           0 :                 continue;
    1679             : 
    1680         204 :             appendpath = create_append_path(root, rel, NIL, list_make1(path),
    1681             :                                             NIL, NULL,
    1682             :                                             path->parallel_workers, true,
    1683             :                                             partial_rows);
    1684         204 :             add_partial_path(rel, (Path *) appendpath);
    1685             :         }
    1686             :     }
    1687       32542 : }
    1688             : 
    1689             : /*
    1690             :  * generate_orderedappend_paths
    1691             :  *      Generate ordered append paths for an append relation
    1692             :  *
    1693             :  * Usually we generate MergeAppend paths here, but there are some special
    1694             :  * cases where we can generate simple Append paths, because the subpaths
    1695             :  * can provide tuples in the required order already.
    1696             :  *
    1697             :  * We generate a path for each ordering (pathkey list) appearing in
    1698             :  * all_child_pathkeys.
    1699             :  *
    1700             :  * We consider both cheapest-startup and cheapest-total cases, ie, for each
    1701             :  * interesting ordering, collect all the cheapest startup subpaths and all the
    1702             :  * cheapest total paths, and build a suitable path for each case.
    1703             :  *
    1704             :  * We don't currently generate any parameterized ordered paths here.  While
    1705             :  * it would not take much more code here to do so, it's very unclear that it
    1706             :  * is worth the planning cycles to investigate such paths: there's little
    1707             :  * use for an ordered path on the inside of a nestloop.  In fact, it's likely
    1708             :  * that the current coding of add_path would reject such paths out of hand,
    1709             :  * because add_path gives no credit for sort ordering of parameterized paths,
    1710             :  * and a parameterized MergeAppend is going to be more expensive than the
    1711             :  * corresponding parameterized Append path.  If we ever try harder to support
    1712             :  * parameterized mergejoin plans, it might be worth adding support for
    1713             :  * parameterized paths here to feed such joins.  (See notes in
    1714             :  * optimizer/README for why that might not ever happen, though.)
    1715             :  */
    1716             : static void
    1717       32314 : generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel,
    1718             :                              List *live_childrels,
    1719             :                              List *all_child_pathkeys)
    1720             : {
    1721             :     ListCell   *lcp;
    1722       32314 :     List       *partition_pathkeys = NIL;
    1723       32314 :     List       *partition_pathkeys_desc = NIL;
    1724       32314 :     bool        partition_pathkeys_partial = true;
    1725       32314 :     bool        partition_pathkeys_desc_partial = true;
    1726             : 
    1727             :     /*
    1728             :      * Some partitioned table setups may allow us to use an Append node
    1729             :      * instead of a MergeAppend.  This is possible in cases such as RANGE
    1730             :      * partitioned tables where it's guaranteed that an earlier partition must
    1731             :      * contain rows which come earlier in the sort order.  To detect whether
    1732             :      * this is relevant, build pathkey descriptions of the partition ordering,
    1733             :      * for both forward and reverse scans.
    1734             :      */
    1735       57508 :     if (rel->part_scheme != NULL && IS_SIMPLE_REL(rel) &&
    1736       25194 :         partitions_are_ordered(rel->boundinfo, rel->live_parts))
    1737             :     {
    1738       21226 :         partition_pathkeys = build_partition_pathkeys(root, rel,
    1739             :                                                       ForwardScanDirection,
    1740             :                                                       &partition_pathkeys_partial);
    1741             : 
    1742       21226 :         partition_pathkeys_desc = build_partition_pathkeys(root, rel,
    1743             :                                                            BackwardScanDirection,
    1744             :                                                            &partition_pathkeys_desc_partial);
    1745             : 
    1746             :         /*
    1747             :          * You might think we should truncate_useless_pathkeys here, but
    1748             :          * allowing partition keys which are a subset of the query's pathkeys
    1749             :          * can often be useful.  For example, consider a table partitioned by
    1750             :          * RANGE (a, b), and a query with ORDER BY a, b, c.  If we have child
    1751             :          * paths that can produce the a, b, c ordering (perhaps via indexes on
    1752             :          * (a, b, c)) then it works to consider the appendrel output as
    1753             :          * ordered by a, b, c.
    1754             :          */
    1755             :     }
    1756             : 
    1757             :     /* Now consider each interesting sort ordering */
    1758       37982 :     foreach(lcp, all_child_pathkeys)
    1759             :     {
    1760        5668 :         List       *pathkeys = (List *) lfirst(lcp);
    1761        5668 :         List       *startup_subpaths = NIL;
    1762        5668 :         List       *total_subpaths = NIL;
    1763        5668 :         List       *fractional_subpaths = NIL;
    1764        5668 :         bool        startup_neq_total = false;
    1765             :         bool        match_partition_order;
    1766             :         bool        match_partition_order_desc;
    1767             :         int         end_index;
    1768             :         int         first_index;
    1769             :         int         direction;
    1770             : 
    1771             :         /*
    1772             :          * Determine if this sort ordering matches any partition pathkeys we
    1773             :          * have, for both ascending and descending partition order.  If the
    1774             :          * partition pathkeys happen to be contained in pathkeys then it still
    1775             :          * works, as described above, providing that the partition pathkeys
    1776             :          * are complete and not just a prefix of the partition keys.  (In such
    1777             :          * cases we'll be relying on the child paths to have sorted the
    1778             :          * lower-order columns of the required pathkeys.)
    1779             :          */
    1780        5668 :         match_partition_order =
    1781        9248 :             pathkeys_contained_in(pathkeys, partition_pathkeys) ||
    1782        3716 :             (!partition_pathkeys_partial &&
    1783         136 :              pathkeys_contained_in(partition_pathkeys, pathkeys));
    1784             : 
    1785       12666 :         match_partition_order_desc = !match_partition_order &&
    1786        3514 :             (pathkeys_contained_in(pathkeys, partition_pathkeys_desc) ||
    1787        3524 :              (!partition_pathkeys_desc_partial &&
    1788          40 :               pathkeys_contained_in(partition_pathkeys_desc, pathkeys)));
    1789             : 
    1790             :         /*
    1791             :          * When the required pathkeys match the reverse of the partition
    1792             :          * order, we must build the list of paths in reverse starting with the
    1793             :          * last matching partition first.  We can get away without making any
    1794             :          * special cases for this in the loop below by just looping backward
    1795             :          * over the child relations in this case.
    1796             :          */
    1797        5668 :         if (match_partition_order_desc)
    1798             :         {
    1799             :             /* loop backward */
    1800          42 :             first_index = list_length(live_childrels) - 1;
    1801          42 :             end_index = -1;
    1802          42 :             direction = -1;
    1803             : 
    1804             :             /*
    1805             :              * Set this to true to save us having to check for
    1806             :              * match_partition_order_desc in the loop below.
    1807             :              */
    1808          42 :             match_partition_order = true;
    1809             :         }
    1810             :         else
    1811             :         {
    1812             :             /* for all other case, loop forward */
    1813        5626 :             first_index = 0;
    1814        5626 :             end_index = list_length(live_childrels);
    1815        5626 :             direction = 1;
    1816             :         }
    1817             : 
    1818             :         /* Select the child paths for this ordering... */
    1819       20898 :         for (int i = first_index; i != end_index; i += direction)
    1820             :         {
    1821       15230 :             RelOptInfo *childrel = list_nth_node(RelOptInfo, live_childrels, i);
    1822             :             Path       *cheapest_startup,
    1823             :                        *cheapest_total,
    1824       15230 :                        *cheapest_fractional = NULL;
    1825             : 
    1826             :             /* Locate the right paths, if they are available. */
    1827             :             cheapest_startup =
    1828       15230 :                 get_cheapest_path_for_pathkeys(childrel->pathlist,
    1829             :                                                pathkeys,
    1830             :                                                NULL,
    1831             :                                                STARTUP_COST,
    1832             :                                                false);
    1833             :             cheapest_total =
    1834       15230 :                 get_cheapest_path_for_pathkeys(childrel->pathlist,
    1835             :                                                pathkeys,
    1836             :                                                NULL,
    1837             :                                                TOTAL_COST,
    1838             :                                                false);
    1839             : 
    1840             :             /*
    1841             :              * If we can't find any paths with the right order just use the
    1842             :              * cheapest-total path; we'll have to sort it later.
    1843             :              */
    1844       15230 :             if (cheapest_startup == NULL || cheapest_total == NULL)
    1845             :             {
    1846         262 :                 cheapest_startup = cheapest_total =
    1847             :                     childrel->cheapest_total_path;
    1848             :                 /* Assert we do have an unparameterized path for this child */
    1849             :                 Assert(cheapest_total->param_info == NULL);
    1850             :             }
    1851             : 
    1852             :             /*
    1853             :              * When building a fractional path, determine a cheapest
    1854             :              * fractional path for each child relation too. Looking at startup
    1855             :              * and total costs is not enough, because the cheapest fractional
    1856             :              * path may be dominated by two separate paths (one for startup,
    1857             :              * one for total).
    1858             :              *
    1859             :              * When needed (building fractional path), determine the cheapest
    1860             :              * fractional path too.
    1861             :              */
    1862       15230 :             if (root->tuple_fraction > 0)
    1863             :             {
    1864         668 :                 double      path_fraction = (1.0 / root->tuple_fraction);
    1865             : 
    1866             :                 cheapest_fractional =
    1867         668 :                     get_cheapest_fractional_path_for_pathkeys(childrel->pathlist,
    1868             :                                                               pathkeys,
    1869             :                                                               NULL,
    1870             :                                                               path_fraction);
    1871             : 
    1872             :                 /*
    1873             :                  * If we found no path with matching pathkeys, use the
    1874             :                  * cheapest total path instead.
    1875             :                  *
    1876             :                  * XXX We might consider partially sorted paths too (with an
    1877             :                  * incremental sort on top). But we'd have to build all the
    1878             :                  * incremental paths, do the costing etc.
    1879             :                  */
    1880         668 :                 if (!cheapest_fractional)
    1881          44 :                     cheapest_fractional = cheapest_total;
    1882             :             }
    1883             : 
    1884             :             /*
    1885             :              * Notice whether we actually have different paths for the
    1886             :              * "cheapest" and "total" cases; frequently there will be no point
    1887             :              * in two create_merge_append_path() calls.
    1888             :              */
    1889       15230 :             if (cheapest_startup != cheapest_total)
    1890          48 :                 startup_neq_total = true;
    1891             : 
    1892             :             /*
    1893             :              * Collect the appropriate child paths.  The required logic varies
    1894             :              * for the Append and MergeAppend cases.
    1895             :              */
    1896       15230 :             if (match_partition_order)
    1897             :             {
    1898             :                 /*
    1899             :                  * We're going to make a plain Append path.  We don't need
    1900             :                  * most of what accumulate_append_subpath would do, but we do
    1901             :                  * want to cut out child Appends or MergeAppends if they have
    1902             :                  * just a single subpath (and hence aren't doing anything
    1903             :                  * useful).
    1904             :                  */
    1905        5888 :                 cheapest_startup = get_singleton_append_subpath(cheapest_startup);
    1906        5888 :                 cheapest_total = get_singleton_append_subpath(cheapest_total);
    1907             : 
    1908        5888 :                 startup_subpaths = lappend(startup_subpaths, cheapest_startup);
    1909        5888 :                 total_subpaths = lappend(total_subpaths, cheapest_total);
    1910             : 
    1911        5888 :                 if (cheapest_fractional)
    1912             :                 {
    1913         120 :                     cheapest_fractional = get_singleton_append_subpath(cheapest_fractional);
    1914         120 :                     fractional_subpaths = lappend(fractional_subpaths, cheapest_fractional);
    1915             :                 }
    1916             :             }
    1917             :             else
    1918             :             {
    1919             :                 /*
    1920             :                  * Otherwise, rely on accumulate_append_subpath to collect the
    1921             :                  * child paths for the MergeAppend.
    1922             :                  */
    1923        9342 :                 accumulate_append_subpath(cheapest_startup,
    1924             :                                           &startup_subpaths, NULL);
    1925        9342 :                 accumulate_append_subpath(cheapest_total,
    1926             :                                           &total_subpaths, NULL);
    1927             : 
    1928        9342 :                 if (cheapest_fractional)
    1929         548 :                     accumulate_append_subpath(cheapest_fractional,
    1930             :                                               &fractional_subpaths, NULL);
    1931             :             }
    1932             :         }
    1933             : 
    1934             :         /* ... and build the Append or MergeAppend paths */
    1935        5668 :         if (match_partition_order)
    1936             :         {
    1937             :             /* We only need Append */
    1938        2196 :             add_path(rel, (Path *) create_append_path(root,
    1939             :                                                       rel,
    1940             :                                                       startup_subpaths,
    1941             :                                                       NIL,
    1942             :                                                       pathkeys,
    1943             :                                                       NULL,
    1944             :                                                       0,
    1945             :                                                       false,
    1946             :                                                       -1));
    1947        2196 :             if (startup_neq_total)
    1948           0 :                 add_path(rel, (Path *) create_append_path(root,
    1949             :                                                           rel,
    1950             :                                                           total_subpaths,
    1951             :                                                           NIL,
    1952             :                                                           pathkeys,
    1953             :                                                           NULL,
    1954             :                                                           0,
    1955             :                                                           false,
    1956             :                                                           -1));
    1957             : 
    1958        2196 :             if (fractional_subpaths)
    1959          60 :                 add_path(rel, (Path *) create_append_path(root,
    1960             :                                                           rel,
    1961             :                                                           fractional_subpaths,
    1962             :                                                           NIL,
    1963             :                                                           pathkeys,
    1964             :                                                           NULL,
    1965             :                                                           0,
    1966             :                                                           false,
    1967             :                                                           -1));
    1968             :         }
    1969             :         else
    1970             :         {
    1971             :             /* We need MergeAppend */
    1972        3472 :             add_path(rel, (Path *) create_merge_append_path(root,
    1973             :                                                             rel,
    1974             :                                                             startup_subpaths,
    1975             :                                                             pathkeys,
    1976             :                                                             NULL));
    1977        3472 :             if (startup_neq_total)
    1978          24 :                 add_path(rel, (Path *) create_merge_append_path(root,
    1979             :                                                                 rel,
    1980             :                                                                 total_subpaths,
    1981             :                                                                 pathkeys,
    1982             :                                                                 NULL));
    1983             : 
    1984        3472 :             if (fractional_subpaths)
    1985         196 :                 add_path(rel, (Path *) create_merge_append_path(root,
    1986             :                                                                 rel,
    1987             :                                                                 fractional_subpaths,
    1988             :                                                                 pathkeys,
    1989             :                                                                 NULL));
    1990             :         }
    1991             :     }
    1992       32314 : }
    1993             : 
    1994             : /*
    1995             :  * get_cheapest_parameterized_child_path
    1996             :  *      Get cheapest path for this relation that has exactly the requested
    1997             :  *      parameterization.
    1998             :  *
    1999             :  * Returns NULL if unable to create such a path.
    2000             :  */
    2001             : static Path *
    2002        5760 : get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel,
    2003             :                                       Relids required_outer)
    2004             : {
    2005             :     Path       *cheapest;
    2006             :     ListCell   *lc;
    2007             : 
    2008             :     /*
    2009             :      * Look up the cheapest existing path with no more than the needed
    2010             :      * parameterization.  If it has exactly the needed parameterization, we're
    2011             :      * done.
    2012             :      */
    2013        5760 :     cheapest = get_cheapest_path_for_pathkeys(rel->pathlist,
    2014             :                                               NIL,
    2015             :                                               required_outer,
    2016             :                                               TOTAL_COST,
    2017             :                                               false);
    2018             :     Assert(cheapest != NULL);
    2019        5760 :     if (bms_equal(PATH_REQ_OUTER(cheapest), required_outer))
    2020        5468 :         return cheapest;
    2021             : 
    2022             :     /*
    2023             :      * Otherwise, we can "reparameterize" an existing path to match the given
    2024             :      * parameterization, which effectively means pushing down additional
    2025             :      * joinquals to be checked within the path's scan.  However, some existing
    2026             :      * paths might check the available joinquals already while others don't;
    2027             :      * therefore, it's not clear which existing path will be cheapest after
    2028             :      * reparameterization.  We have to go through them all and find out.
    2029             :      */
    2030         292 :     cheapest = NULL;
    2031        1012 :     foreach(lc, rel->pathlist)
    2032             :     {
    2033         720 :         Path       *path = (Path *) lfirst(lc);
    2034             : 
    2035             :         /* Can't use it if it needs more than requested parameterization */
    2036         720 :         if (!bms_is_subset(PATH_REQ_OUTER(path), required_outer))
    2037          24 :             continue;
    2038             : 
    2039             :         /*
    2040             :          * Reparameterization can only increase the path's cost, so if it's
    2041             :          * already more expensive than the current cheapest, forget it.
    2042             :          */
    2043        1080 :         if (cheapest != NULL &&
    2044         384 :             compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
    2045         312 :             continue;
    2046             : 
    2047             :         /* Reparameterize if needed, then recheck cost */
    2048         384 :         if (!bms_equal(PATH_REQ_OUTER(path), required_outer))
    2049             :         {
    2050         308 :             path = reparameterize_path(root, path, required_outer, 1.0);
    2051         308 :             if (path == NULL)
    2052          32 :                 continue;       /* failed to reparameterize this one */
    2053             :             Assert(bms_equal(PATH_REQ_OUTER(path), required_outer));
    2054             : 
    2055         276 :             if (cheapest != NULL &&
    2056           0 :                 compare_path_costs(cheapest, path, TOTAL_COST) <= 0)
    2057           0 :                 continue;
    2058             :         }
    2059             : 
    2060             :         /* We have a new best path */
    2061         352 :         cheapest = path;
    2062             :     }
    2063             : 
    2064             :     /* Return the best path, or NULL if we found no suitable candidate */
    2065         292 :     return cheapest;
    2066             : }
    2067             : 
    2068             : /*
    2069             :  * accumulate_append_subpath
    2070             :  *      Add a subpath to the list being built for an Append or MergeAppend.
    2071             :  *
    2072             :  * It's possible that the child is itself an Append or MergeAppend path, in
    2073             :  * which case we can "cut out the middleman" and just add its child paths to
    2074             :  * our own list.  (We don't try to do this earlier because we need to apply
    2075             :  * both levels of transformation to the quals.)
    2076             :  *
    2077             :  * Note that if we omit a child MergeAppend in this way, we are effectively
    2078             :  * omitting a sort step, which seems fine: if the parent is to be an Append,
    2079             :  * its result would be unsorted anyway, while if the parent is to be a
    2080             :  * MergeAppend, there's no point in a separate sort on a child.
    2081             :  *
    2082             :  * Normally, either path is a partial path and subpaths is a list of partial
    2083             :  * paths, or else path is a non-partial plan and subpaths is a list of those.
    2084             :  * However, if path is a parallel-aware Append, then we add its partial path
    2085             :  * children to subpaths and the rest to special_subpaths.  If the latter is
    2086             :  * NULL, we don't flatten the path at all (unless it contains only partial
    2087             :  * paths).
    2088             :  */
    2089             : static void
    2090      183520 : accumulate_append_subpath(Path *path, List **subpaths, List **special_subpaths)
    2091             : {
    2092      183520 :     if (IsA(path, AppendPath))
    2093             :     {
    2094       13322 :         AppendPath *apath = (AppendPath *) path;
    2095             : 
    2096       13322 :         if (!apath->path.parallel_aware || apath->first_partial_path == 0)
    2097             :         {
    2098       13130 :             *subpaths = list_concat(*subpaths, apath->subpaths);
    2099       13130 :             return;
    2100             :         }
    2101         192 :         else if (special_subpaths != NULL)
    2102             :         {
    2103             :             List       *new_special_subpaths;
    2104             : 
    2105             :             /* Split Parallel Append into partial and non-partial subpaths */
    2106          96 :             *subpaths = list_concat(*subpaths,
    2107          96 :                                     list_copy_tail(apath->subpaths,
    2108             :                                                    apath->first_partial_path));
    2109          96 :             new_special_subpaths = list_copy_head(apath->subpaths,
    2110             :                                                   apath->first_partial_path);
    2111          96 :             *special_subpaths = list_concat(*special_subpaths,
    2112             :                                             new_special_subpaths);
    2113          96 :             return;
    2114             :         }
    2115             :     }
    2116      170198 :     else if (IsA(path, MergeAppendPath))
    2117             :     {
    2118         620 :         MergeAppendPath *mpath = (MergeAppendPath *) path;
    2119             : 
    2120         620 :         *subpaths = list_concat(*subpaths, mpath->subpaths);
    2121         620 :         return;
    2122             :     }
    2123             : 
    2124      169674 :     *subpaths = lappend(*subpaths, path);
    2125             : }
    2126             : 
    2127             : /*
    2128             :  * get_singleton_append_subpath
    2129             :  *      Returns the single subpath of an Append/MergeAppend, or just
    2130             :  *      return 'path' if it's not a single sub-path Append/MergeAppend.
    2131             :  *
    2132             :  * Note: 'path' must not be a parallel-aware path.
    2133             :  */
    2134             : static Path *
    2135       11896 : get_singleton_append_subpath(Path *path)
    2136             : {
    2137             :     Assert(!path->parallel_aware);
    2138             : 
    2139       11896 :     if (IsA(path, AppendPath))
    2140             :     {
    2141         340 :         AppendPath *apath = (AppendPath *) path;
    2142             : 
    2143         340 :         if (list_length(apath->subpaths) == 1)
    2144         156 :             return (Path *) linitial(apath->subpaths);
    2145             :     }
    2146       11556 :     else if (IsA(path, MergeAppendPath))
    2147             :     {
    2148         252 :         MergeAppendPath *mpath = (MergeAppendPath *) path;
    2149             : 
    2150         252 :         if (list_length(mpath->subpaths) == 1)
    2151           0 :             return (Path *) linitial(mpath->subpaths);
    2152             :     }
    2153             : 
    2154       11740 :     return path;
    2155             : }
    2156             : 
    2157             : /*
    2158             :  * set_dummy_rel_pathlist
    2159             :  *    Build a dummy path for a relation that's been excluded by constraints
    2160             :  *
    2161             :  * Rather than inventing a special "dummy" path type, we represent this as an
    2162             :  * AppendPath with no members (see also IS_DUMMY_APPEND/IS_DUMMY_REL macros).
    2163             :  *
    2164             :  * (See also mark_dummy_rel, which does basically the same thing, but is
    2165             :  * typically used to change a rel into dummy state after we already made
    2166             :  * paths for it.)
    2167             :  */
    2168             : static void
    2169         964 : set_dummy_rel_pathlist(RelOptInfo *rel)
    2170             : {
    2171             :     /* Set dummy size estimates --- we leave attr_widths[] as zeroes */
    2172         964 :     rel->rows = 0;
    2173         964 :     rel->reltarget->width = 0;
    2174             : 
    2175             :     /* Discard any pre-existing paths; no further need for them */
    2176         964 :     rel->pathlist = NIL;
    2177         964 :     rel->partial_pathlist = NIL;
    2178             : 
    2179             :     /* Set up the dummy path */
    2180         964 :     add_path(rel, (Path *) create_append_path(NULL, rel, NIL, NIL,
    2181             :                                               NIL, rel->lateral_relids,
    2182             :                                               0, false, -1));
    2183             : 
    2184             :     /*
    2185             :      * We set the cheapest-path fields immediately, just in case they were
    2186             :      * pointing at some discarded path.  This is redundant when we're called
    2187             :      * from set_rel_size(), but not when called from elsewhere, and doing it
    2188             :      * twice is harmless anyway.
    2189             :      */
    2190         964 :     set_cheapest(rel);
    2191         964 : }
    2192             : 
    2193             : /*
    2194             :  * find_window_run_conditions
    2195             :  *      Determine if 'wfunc' is really a WindowFunc and call its prosupport
    2196             :  *      function to determine the function's monotonic properties.  We then
    2197             :  *      see if 'opexpr' can be used to short-circuit execution.
    2198             :  *
    2199             :  * For example row_number() over (order by ...) always produces a value one
    2200             :  * higher than the previous.  If someone has a window function in a subquery
    2201             :  * and has a WHERE clause in the outer query to filter rows <= 10, then we may
    2202             :  * as well stop processing the windowagg once the row number reaches 11.  Here
    2203             :  * we check if 'opexpr' might help us to stop doing needless extra processing
    2204             :  * in WindowAgg nodes.
    2205             :  *
    2206             :  * '*keep_original' is set to true if the caller should also use 'opexpr' for
    2207             :  * its original purpose.  This is set to false if the caller can assume that
    2208             :  * the run condition will handle all of the required filtering.
    2209             :  *
    2210             :  * Returns true if 'opexpr' was found to be useful and was added to the
    2211             :  * WindowClauses runCondition.  We also set *keep_original accordingly and add
    2212             :  * 'attno' to *run_cond_attrs offset by FirstLowInvalidHeapAttributeNumber.
    2213             :  * If the 'opexpr' cannot be used then we set *keep_original to true and
    2214             :  * return false.
    2215             :  */
    2216             : static bool
    2217         222 : find_window_run_conditions(Query *subquery, RangeTblEntry *rte, Index rti,
    2218             :                            AttrNumber attno, WindowFunc *wfunc, OpExpr *opexpr,
    2219             :                            bool wfunc_left, bool *keep_original,
    2220             :                            Bitmapset **run_cond_attrs)
    2221             : {
    2222             :     Oid         prosupport;
    2223             :     Expr       *otherexpr;
    2224             :     SupportRequestWFuncMonotonic req;
    2225             :     SupportRequestWFuncMonotonic *res;
    2226             :     WindowClause *wclause;
    2227             :     List       *opinfos;
    2228             :     OpExpr     *runopexpr;
    2229             :     Oid         runoperator;
    2230             :     ListCell   *lc;
    2231             : 
    2232         222 :     *keep_original = true;
    2233             : 
    2234         222 :     while (IsA(wfunc, RelabelType))
    2235           0 :         wfunc = (WindowFunc *) ((RelabelType *) wfunc)->arg;
    2236             : 
    2237             :     /* we can only work with window functions */
    2238         222 :     if (!IsA(wfunc, WindowFunc))
    2239          24 :         return false;
    2240             : 
    2241             :     /* can't use it if there are subplans in the WindowFunc */
    2242         198 :     if (contain_subplans((Node *) wfunc))
    2243           6 :         return false;
    2244             : 
    2245         192 :     prosupport = get_func_support(wfunc->winfnoid);
    2246             : 
    2247             :     /* Check if there's a support function for 'wfunc' */
    2248         192 :     if (!OidIsValid(prosupport))
    2249          18 :         return false;
    2250             : 
    2251             :     /* get the Expr from the other side of the OpExpr */
    2252         174 :     if (wfunc_left)
    2253         150 :         otherexpr = lsecond(opexpr->args);
    2254             :     else
    2255          24 :         otherexpr = linitial(opexpr->args);
    2256             : 
    2257             :     /*
    2258             :      * The value being compared must not change during the evaluation of the
    2259             :      * window partition.
    2260             :      */
    2261         174 :     if (!is_pseudo_constant_clause((Node *) otherexpr))
    2262           0 :         return false;
    2263             : 
    2264             :     /* find the window clause belonging to the window function */
    2265         174 :     wclause = (WindowClause *) list_nth(subquery->windowClause,
    2266         174 :                                         wfunc->winref - 1);
    2267             : 
    2268         174 :     req.type = T_SupportRequestWFuncMonotonic;
    2269         174 :     req.window_func = wfunc;
    2270         174 :     req.window_clause = wclause;
    2271             : 
    2272             :     /* call the support function */
    2273             :     res = (SupportRequestWFuncMonotonic *)
    2274         174 :         DatumGetPointer(OidFunctionCall1(prosupport,
    2275             :                                          PointerGetDatum(&req)));
    2276             : 
    2277             :     /*
    2278             :      * Nothing to do if the function is neither monotonically increasing nor
    2279             :      * monotonically decreasing.
    2280             :      */
    2281         174 :     if (res == NULL || res->monotonic == MONOTONICFUNC_NONE)
    2282           0 :         return false;
    2283             : 
    2284         174 :     runopexpr = NULL;
    2285         174 :     runoperator = InvalidOid;
    2286         174 :     opinfos = get_op_btree_interpretation(opexpr->opno);
    2287             : 
    2288         174 :     foreach(lc, opinfos)
    2289             :     {
    2290         174 :         OpBtreeInterpretation *opinfo = (OpBtreeInterpretation *) lfirst(lc);
    2291         174 :         int         strategy = opinfo->strategy;
    2292             : 
    2293             :         /* handle < / <= */
    2294         174 :         if (strategy == BTLessStrategyNumber ||
    2295             :             strategy == BTLessEqualStrategyNumber)
    2296             :         {
    2297             :             /*
    2298             :              * < / <= is supported for monotonically increasing functions in
    2299             :              * the form <wfunc> op <pseudoconst> and <pseudoconst> op <wfunc>
    2300             :              * for monotonically decreasing functions.
    2301             :              */
    2302         132 :             if ((wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)) ||
    2303          18 :                 (!wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)))
    2304             :             {
    2305         120 :                 *keep_original = false;
    2306         120 :                 runopexpr = opexpr;
    2307         120 :                 runoperator = opexpr->opno;
    2308             :             }
    2309         132 :             break;
    2310             :         }
    2311             :         /* handle > / >= */
    2312          42 :         else if (strategy == BTGreaterStrategyNumber ||
    2313             :                  strategy == BTGreaterEqualStrategyNumber)
    2314             :         {
    2315             :             /*
    2316             :              * > / >= is supported for monotonically decreasing functions in
    2317             :              * the form <wfunc> op <pseudoconst> and <pseudoconst> op <wfunc>
    2318             :              * for monotonically increasing functions.
    2319             :              */
    2320          18 :             if ((wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)) ||
    2321          12 :                 (!wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)))
    2322             :             {
    2323          18 :                 *keep_original = false;
    2324          18 :                 runopexpr = opexpr;
    2325          18 :                 runoperator = opexpr->opno;
    2326             :             }
    2327          18 :             break;
    2328             :         }
    2329             :         /* handle = */
    2330          24 :         else if (strategy == BTEqualStrategyNumber)
    2331             :         {
    2332             :             int16       newstrategy;
    2333             : 
    2334             :             /*
    2335             :              * When both monotonically increasing and decreasing then the
    2336             :              * return value of the window function will be the same each time.
    2337             :              * We can simply use 'opexpr' as the run condition without
    2338             :              * modifying it.
    2339             :              */
    2340          24 :             if ((res->monotonic & MONOTONICFUNC_BOTH) == MONOTONICFUNC_BOTH)
    2341             :             {
    2342           6 :                 *keep_original = false;
    2343           6 :                 runopexpr = opexpr;
    2344           6 :                 runoperator = opexpr->opno;
    2345           6 :                 break;
    2346             :             }
    2347             : 
    2348             :             /*
    2349             :              * When monotonically increasing we make a qual with <wfunc> <=
    2350             :              * <value> or <value> >= <wfunc> in order to filter out values
    2351             :              * which are above the value in the equality condition.  For
    2352             :              * monotonically decreasing functions we want to filter values
    2353             :              * below the value in the equality condition.
    2354             :              */
    2355          18 :             if (res->monotonic & MONOTONICFUNC_INCREASING)
    2356          18 :                 newstrategy = wfunc_left ? BTLessEqualStrategyNumber : BTGreaterEqualStrategyNumber;
    2357             :             else
    2358           0 :                 newstrategy = wfunc_left ? BTGreaterEqualStrategyNumber : BTLessEqualStrategyNumber;
    2359             : 
    2360             :             /* We must keep the original equality qual */
    2361          18 :             *keep_original = true;
    2362          18 :             runopexpr = opexpr;
    2363             : 
    2364             :             /* determine the operator to use for the runCondition qual */
    2365          18 :             runoperator = get_opfamily_member(opinfo->opfamily_id,
    2366             :                                               opinfo->oplefttype,
    2367             :                                               opinfo->oprighttype,
    2368             :                                               newstrategy);
    2369          18 :             break;
    2370             :         }
    2371             :     }
    2372             : 
    2373         174 :     if (runopexpr != NULL)
    2374             :     {
    2375             :         Expr       *newexpr;
    2376             : 
    2377             :         /*
    2378             :          * Build the qual required for the run condition keeping the
    2379             :          * WindowFunc on the same side as it was originally.
    2380             :          */
    2381         162 :         if (wfunc_left)
    2382         144 :             newexpr = make_opclause(runoperator,
    2383             :                                     runopexpr->opresulttype,
    2384         144 :                                     runopexpr->opretset, (Expr *) wfunc,
    2385             :                                     otherexpr, runopexpr->opcollid,
    2386             :                                     runopexpr->inputcollid);
    2387             :         else
    2388          18 :             newexpr = make_opclause(runoperator,
    2389             :                                     runopexpr->opresulttype,
    2390          18 :                                     runopexpr->opretset,
    2391             :                                     otherexpr, (Expr *) wfunc,
    2392             :                                     runopexpr->opcollid,
    2393             :                                     runopexpr->inputcollid);
    2394             : 
    2395         162 :         wclause->runCondition = lappend(wclause->runCondition, newexpr);
    2396             : 
    2397             :         /* record that this attno was used in a run condition */
    2398         162 :         *run_cond_attrs = bms_add_member(*run_cond_attrs,
    2399             :                                          attno - FirstLowInvalidHeapAttributeNumber);
    2400         162 :         return true;
    2401             :     }
    2402             : 
    2403             :     /* unsupported OpExpr */
    2404          12 :     return false;
    2405             : }
    2406             : 
    2407             : /*
    2408             :  * check_and_push_window_quals
    2409             :  *      Check if 'clause' is a qual that can be pushed into a WindowFunc's
    2410             :  *      WindowClause as a 'runCondition' qual.  These, when present, allow
    2411             :  *      some unnecessary work to be skipped during execution.
    2412             :  *
    2413             :  * 'run_cond_attrs' will be populated with all targetlist resnos of subquery
    2414             :  * targets (offset by FirstLowInvalidHeapAttributeNumber) that we pushed
    2415             :  * window quals for.
    2416             :  *
    2417             :  * Returns true if the caller still must keep the original qual or false if
    2418             :  * the caller can safely ignore the original qual because the WindowAgg node
    2419             :  * will use the runCondition to stop returning tuples.
    2420             :  */
    2421             : static bool
    2422         234 : check_and_push_window_quals(Query *subquery, RangeTblEntry *rte, Index rti,
    2423             :                             Node *clause, Bitmapset **run_cond_attrs)
    2424             : {
    2425         234 :     OpExpr     *opexpr = (OpExpr *) clause;
    2426         234 :     bool        keep_original = true;
    2427             :     Var        *var1;
    2428             :     Var        *var2;
    2429             : 
    2430             :     /* We're only able to use OpExprs with 2 operands */
    2431         234 :     if (!IsA(opexpr, OpExpr))
    2432          18 :         return true;
    2433             : 
    2434         216 :     if (list_length(opexpr->args) != 2)
    2435           0 :         return true;
    2436             : 
    2437             :     /*
    2438             :      * Currently, we restrict this optimization to strict OpExprs.  The reason
    2439             :      * for this is that during execution, once the runcondition becomes false,
    2440             :      * we stop evaluating WindowFuncs.  To avoid leaving around stale window
    2441             :      * function result values, we set them to NULL.  Having only strict
    2442             :      * OpExprs here ensures that we properly filter out the tuples with NULLs
    2443             :      * in the top-level WindowAgg.
    2444             :      */
    2445         216 :     set_opfuncid(opexpr);
    2446         216 :     if (!func_strict(opexpr->opfuncid))
    2447           0 :         return true;
    2448             : 
    2449             :     /*
    2450             :      * Check for plain Vars that reference window functions in the subquery.
    2451             :      * If we find any, we'll ask find_window_run_conditions() if 'opexpr' can
    2452             :      * be used as part of the run condition.
    2453             :      */
    2454             : 
    2455             :     /* Check the left side of the OpExpr */
    2456         216 :     var1 = linitial(opexpr->args);
    2457         216 :     if (IsA(var1, Var) && var1->varattno > 0)
    2458             :     {
    2459         180 :         TargetEntry *tle = list_nth(subquery->targetList, var1->varattno - 1);
    2460         180 :         WindowFunc *wfunc = (WindowFunc *) tle->expr;
    2461             : 
    2462         180 :         if (find_window_run_conditions(subquery, rte, rti, tle->resno, wfunc,
    2463             :                                        opexpr, true, &keep_original,
    2464             :                                        run_cond_attrs))
    2465         144 :             return keep_original;
    2466             :     }
    2467             : 
    2468             :     /* and check the right side */
    2469          72 :     var2 = lsecond(opexpr->args);
    2470          72 :     if (IsA(var2, Var) && var2->varattno > 0)
    2471             :     {
    2472          42 :         TargetEntry *tle = list_nth(subquery->targetList, var2->varattno - 1);
    2473          42 :         WindowFunc *wfunc = (WindowFunc *) tle->expr;
    2474             : 
    2475          42 :         if (find_window_run_conditions(subquery, rte, rti, tle->resno, wfunc,
    2476             :                                        opexpr, false, &keep_original,
    2477             :                                        run_cond_attrs))
    2478          18 :             return keep_original;
    2479             :     }
    2480             : 
    2481          54 :     return true;
    2482             : }
    2483             : 
    2484             : /*
    2485             :  * set_subquery_pathlist
    2486             :  *      Generate SubqueryScan access paths for a subquery RTE
    2487             :  *
    2488             :  * We don't currently support generating parameterized paths for subqueries
    2489             :  * by pushing join clauses down into them; it seems too expensive to re-plan
    2490             :  * the subquery multiple times to consider different alternatives.
    2491             :  * (XXX that could stand to be reconsidered, now that we use Paths.)
    2492             :  * So the paths made here will be parameterized if the subquery contains
    2493             :  * LATERAL references, otherwise not.  As long as that's true, there's no need
    2494             :  * for a separate set_subquery_size phase: just make the paths right away.
    2495             :  */
    2496             : static void
    2497        6596 : set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
    2498             :                       Index rti, RangeTblEntry *rte)
    2499             : {
    2500        6596 :     Query      *parse = root->parse;
    2501        6596 :     Query      *subquery = rte->subquery;
    2502             :     bool        trivial_pathtarget;
    2503             :     Relids      required_outer;
    2504             :     pushdown_safety_info safetyInfo;
    2505             :     double      tuple_fraction;
    2506             :     RelOptInfo *sub_final_rel;
    2507        6596 :     Bitmapset  *run_cond_attrs = NULL;
    2508             :     ListCell   *lc;
    2509             : 
    2510             :     /*
    2511             :      * Must copy the Query so that planning doesn't mess up the RTE contents
    2512             :      * (really really need to fix the planner to not scribble on its input,
    2513             :      * someday ... but see remove_unused_subquery_outputs to start with).
    2514             :      */
    2515        6596 :     subquery = copyObject(subquery);
    2516             : 
    2517             :     /*
    2518             :      * If it's a LATERAL subquery, it might contain some Vars of the current
    2519             :      * query level, requiring it to be treated as parameterized, even though
    2520             :      * we don't support pushing down join quals into subqueries.
    2521             :      */
    2522        6596 :     required_outer = rel->lateral_relids;
    2523             : 
    2524             :     /*
    2525             :      * Zero out result area for subquery_is_pushdown_safe, so that it can set
    2526             :      * flags as needed while recursing.  In particular, we need a workspace
    2527             :      * for keeping track of the reasons why columns are unsafe to reference.
    2528             :      * These reasons are stored in the bits inside unsafeFlags[i] when we
    2529             :      * discover reasons that column i of the subquery is unsafe to be used in
    2530             :      * a pushed-down qual.
    2531             :      */
    2532        6596 :     memset(&safetyInfo, 0, sizeof(safetyInfo));
    2533        6596 :     safetyInfo.unsafeFlags = (unsigned char *)
    2534        6596 :         palloc0((list_length(subquery->targetList) + 1) * sizeof(unsigned char));
    2535             : 
    2536             :     /*
    2537             :      * If the subquery has the "security_barrier" flag, it means the subquery
    2538             :      * originated from a view that must enforce row-level security.  Then we
    2539             :      * must not push down quals that contain leaky functions.  (Ideally this
    2540             :      * would be checked inside subquery_is_pushdown_safe, but since we don't
    2541             :      * currently pass the RTE to that function, we must do it here.)
    2542             :      */
    2543        6596 :     safetyInfo.unsafeLeaky = rte->security_barrier;
    2544             : 
    2545             :     /*
    2546             :      * If there are any restriction clauses that have been attached to the
    2547             :      * subquery relation, consider pushing them down to become WHERE or HAVING
    2548             :      * quals of the subquery itself.  This transformation is useful because it
    2549             :      * may allow us to generate a better plan for the subquery than evaluating
    2550             :      * all the subquery output rows and then filtering them.
    2551             :      *
    2552             :      * There are several cases where we cannot push down clauses. Restrictions
    2553             :      * involving the subquery are checked by subquery_is_pushdown_safe().
    2554             :      * Restrictions on individual clauses are checked by
    2555             :      * qual_is_pushdown_safe().  Also, we don't want to push down
    2556             :      * pseudoconstant clauses; better to have the gating node above the
    2557             :      * subquery.
    2558             :      *
    2559             :      * Non-pushed-down clauses will get evaluated as qpquals of the
    2560             :      * SubqueryScan node.
    2561             :      *
    2562             :      * XXX Are there any cases where we want to make a policy decision not to
    2563             :      * push down a pushable qual, because it'd result in a worse plan?
    2564             :      */
    2565        7838 :     if (rel->baserestrictinfo != NIL &&
    2566        1242 :         subquery_is_pushdown_safe(subquery, subquery, &safetyInfo))
    2567             :     {
    2568             :         /* OK to consider pushing down individual quals */
    2569        1138 :         List       *upperrestrictlist = NIL;
    2570             :         ListCell   *l;
    2571             : 
    2572        2652 :         foreach(l, rel->baserestrictinfo)
    2573             :         {
    2574        1514 :             RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
    2575        1514 :             Node       *clause = (Node *) rinfo->clause;
    2576             : 
    2577        1514 :             if (rinfo->pseudoconstant)
    2578             :             {
    2579           4 :                 upperrestrictlist = lappend(upperrestrictlist, rinfo);
    2580           4 :                 continue;
    2581             :             }
    2582             : 
    2583        1510 :             switch (qual_is_pushdown_safe(subquery, rti, rinfo, &safetyInfo))
    2584             :             {
    2585         864 :                 case PUSHDOWN_SAFE:
    2586             :                     /* Push it down */
    2587         864 :                     subquery_push_qual(subquery, rte, rti, clause);
    2588         864 :                     break;
    2589             : 
    2590         234 :                 case PUSHDOWN_WINDOWCLAUSE_RUNCOND:
    2591             : 
    2592             :                     /*
    2593             :                      * Since we can't push the qual down into the subquery,
    2594             :                      * check if it happens to reference a window function.  If
    2595             :                      * so then it might be useful to use for the WindowAgg's
    2596             :                      * runCondition.
    2597             :                      */
    2598         468 :                     if (!subquery->hasWindowFuncs ||
    2599         234 :                         check_and_push_window_quals(subquery, rte, rti, clause,
    2600             :                                                     &run_cond_attrs))
    2601             :                     {
    2602             :                         /*
    2603             :                          * subquery has no window funcs or the clause is not a
    2604             :                          * suitable window run condition qual or it is, but
    2605             :                          * the original must also be kept in the upper query.
    2606             :                          */
    2607          90 :                         upperrestrictlist = lappend(upperrestrictlist, rinfo);
    2608             :                     }
    2609         234 :                     break;
    2610             : 
    2611         412 :                 case PUSHDOWN_UNSAFE:
    2612         412 :                     upperrestrictlist = lappend(upperrestrictlist, rinfo);
    2613         412 :                     break;
    2614             :             }
    2615        1514 :         }
    2616        1138 :         rel->baserestrictinfo = upperrestrictlist;
    2617             :         /* We don't bother recomputing baserestrict_min_security */
    2618             :     }
    2619             : 
    2620        6596 :     pfree(safetyInfo.unsafeFlags);
    2621             : 
    2622             :     /*
    2623             :      * The upper query might not use all the subquery's output columns; if
    2624             :      * not, we can simplify.  Pass the attributes that were pushed down into
    2625             :      * WindowAgg run conditions to ensure we don't accidentally think those
    2626             :      * are unused.
    2627             :      */
    2628        6596 :     remove_unused_subquery_outputs(subquery, rel, run_cond_attrs);
    2629             : 
    2630             :     /*
    2631             :      * We can safely pass the outer tuple_fraction down to the subquery if the
    2632             :      * outer level has no joining, aggregation, or sorting to do. Otherwise
    2633             :      * we'd better tell the subquery to plan for full retrieval. (XXX This
    2634             :      * could probably be made more intelligent ...)
    2635             :      */
    2636        6596 :     if (parse->hasAggs ||
    2637        6138 :         parse->groupClause ||
    2638        6132 :         parse->groupingSets ||
    2639        6132 :         root->hasHavingQual ||
    2640        6132 :         parse->distinctClause ||
    2641        9888 :         parse->sortClause ||
    2642        4156 :         bms_membership(root->all_baserels) == BMS_MULTIPLE)
    2643        3470 :         tuple_fraction = 0.0;   /* default case */
    2644             :     else
    2645        3126 :         tuple_fraction = root->tuple_fraction;
    2646             : 
    2647             :     /* plan_params should not be in use in current query level */
    2648             :     Assert(root->plan_params == NIL);
    2649             : 
    2650             :     /* Generate a subroot and Paths for the subquery */
    2651        6596 :     rel->subroot = subquery_planner(root->glob, subquery,
    2652             :                                     root,
    2653             :                                     false, tuple_fraction);
    2654             : 
    2655             :     /* Isolate the params needed by this specific subplan */
    2656        6596 :     rel->subplan_params = root->plan_params;
    2657        6596 :     root->plan_params = NIL;
    2658             : 
    2659             :     /*
    2660             :      * It's possible that constraint exclusion proved the subquery empty. If
    2661             :      * so, it's desirable to produce an unadorned dummy path so that we will
    2662             :      * recognize appropriate optimizations at this query level.
    2663             :      */
    2664        6596 :     sub_final_rel = fetch_upper_rel(rel->subroot, UPPERREL_FINAL, NULL);
    2665             : 
    2666        6596 :     if (IS_DUMMY_REL(sub_final_rel))
    2667             :     {
    2668         108 :         set_dummy_rel_pathlist(rel);
    2669         108 :         return;
    2670             :     }
    2671             : 
    2672             :     /*
    2673             :      * Mark rel with estimated output rows, width, etc.  Note that we have to
    2674             :      * do this before generating outer-query paths, else cost_subqueryscan is
    2675             :      * not happy.
    2676             :      */
    2677        6488 :     set_subquery_size_estimates(root, rel);
    2678             : 
    2679             :     /*
    2680             :      * Also detect whether the reltarget is trivial, so that we can pass that
    2681             :      * info to cost_subqueryscan (rather than re-deriving it multiple times).
    2682             :      * It's trivial if it fetches all the subplan output columns in order.
    2683             :      */
    2684        6488 :     if (list_length(rel->reltarget->exprs) != list_length(subquery->targetList))
    2685        1744 :         trivial_pathtarget = false;
    2686             :     else
    2687             :     {
    2688        4744 :         trivial_pathtarget = true;
    2689       13778 :         foreach(lc, rel->reltarget->exprs)
    2690             :         {
    2691        9278 :             Node       *node = (Node *) lfirst(lc);
    2692             :             Var        *var;
    2693             : 
    2694        9278 :             if (!IsA(node, Var))
    2695             :             {
    2696           0 :                 trivial_pathtarget = false;
    2697           0 :                 break;
    2698             :             }
    2699        9278 :             var = (Var *) node;
    2700        9278 :             if (var->varno != rti ||
    2701        9278 :                 var->varattno != foreach_current_index(lc) + 1)
    2702             :             {
    2703         244 :                 trivial_pathtarget = false;
    2704         244 :                 break;
    2705             :             }
    2706             :         }
    2707             :     }
    2708             : 
    2709             :     /*
    2710             :      * For each Path that subquery_planner produced, make a SubqueryScanPath
    2711             :      * in the outer query.
    2712             :      */
    2713       13448 :     foreach(lc, sub_final_rel->pathlist)
    2714             :     {
    2715        6960 :         Path       *subpath = (Path *) lfirst(lc);
    2716             :         List       *pathkeys;
    2717             : 
    2718             :         /* Convert subpath's pathkeys to outer representation */
    2719        6960 :         pathkeys = convert_subquery_pathkeys(root,
    2720             :                                              rel,
    2721             :                                              subpath->pathkeys,
    2722             :                                              make_tlist_from_pathtarget(subpath->pathtarget));
    2723             : 
    2724             :         /* Generate outer path using this subpath */
    2725        6960 :         add_path(rel, (Path *)
    2726        6960 :                  create_subqueryscan_path(root, rel, subpath,
    2727             :                                           trivial_pathtarget,
    2728             :                                           pathkeys, required_outer));
    2729             :     }
    2730             : 
    2731             :     /* If outer rel allows parallelism, do same for partial paths. */
    2732        6488 :     if (rel->consider_parallel && bms_is_empty(required_outer))
    2733             :     {
    2734             :         /* If consider_parallel is false, there should be no partial paths. */
    2735             :         Assert(sub_final_rel->consider_parallel ||
    2736             :                sub_final_rel->partial_pathlist == NIL);
    2737             : 
    2738             :         /* Same for partial paths. */
    2739        3610 :         foreach(lc, sub_final_rel->partial_pathlist)
    2740             :         {
    2741          42 :             Path       *subpath = (Path *) lfirst(lc);
    2742             :             List       *pathkeys;
    2743             : 
    2744             :             /* Convert subpath's pathkeys to outer representation */
    2745          42 :             pathkeys = convert_subquery_pathkeys(root,
    2746             :                                                  rel,
    2747             :                                                  subpath->pathkeys,
    2748             :                                                  make_tlist_from_pathtarget(subpath->pathtarget));
    2749             : 
    2750             :             /* Generate outer path using this subpath */
    2751          42 :             add_partial_path(rel, (Path *)
    2752          42 :                              create_subqueryscan_path(root, rel, subpath,
    2753             :                                                       trivial_pathtarget,
    2754             :                                                       pathkeys,
    2755             :                                                       required_outer));
    2756             :         }
    2757             :     }
    2758             : }
    2759             : 
    2760             : /*
    2761             :  * set_function_pathlist
    2762             :  *      Build the (single) access path for a function RTE
    2763             :  */
    2764             : static void
    2765       38142 : set_function_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
    2766             : {
    2767             :     Relids      required_outer;
    2768       38142 :     List       *pathkeys = NIL;
    2769             : 
    2770             :     /*
    2771             :      * We don't support pushing join clauses into the quals of a function
    2772             :      * scan, but it could still have required parameterization due to LATERAL
    2773             :      * refs in the function expression.
    2774             :      */
    2775       38142 :     required_outer = rel->lateral_relids;
    2776             : 
    2777             :     /*
    2778             :      * The result is considered unordered unless ORDINALITY was used, in which
    2779             :      * case it is ordered by the ordinal column (the last one).  See if we
    2780             :      * care, by checking for uses of that Var in equivalence classes.
    2781             :      */
    2782       38142 :     if (rte->funcordinality)
    2783             :     {
    2784         618 :         AttrNumber  ordattno = rel->max_attr;
    2785         618 :         Var        *var = NULL;
    2786             :         ListCell   *lc;
    2787             : 
    2788             :         /*
    2789             :          * Is there a Var for it in rel's targetlist?  If not, the query did
    2790             :          * not reference the ordinality column, or at least not in any way
    2791             :          * that would be interesting for sorting.
    2792             :          */
    2793        1766 :         foreach(lc, rel->reltarget->exprs)
    2794             :         {
    2795        1760 :             Var        *node = (Var *) lfirst(lc);
    2796             : 
    2797             :             /* checking varno/varlevelsup is just paranoia */
    2798        1760 :             if (IsA(node, Var) &&
    2799        1760 :                 node->varattno == ordattno &&
    2800         612 :                 node->varno == rel->relid &&
    2801         612 :                 node->varlevelsup == 0)
    2802             :             {
    2803         612 :                 var = node;
    2804         612 :                 break;
    2805             :             }
    2806             :         }
    2807             : 
    2808             :         /*
    2809             :          * Try to build pathkeys for this Var with int8 sorting.  We tell
    2810             :          * build_expression_pathkey not to build any new equivalence class; if
    2811             :          * the Var isn't already mentioned in some EC, it means that nothing
    2812             :          * cares about the ordering.
    2813             :          */
    2814         618 :         if (var)
    2815         612 :             pathkeys = build_expression_pathkey(root,
    2816             :                                                 (Expr *) var,
    2817             :                                                 Int8LessOperator,
    2818             :                                                 rel->relids,
    2819             :                                                 false);
    2820             :     }
    2821             : 
    2822             :     /* Generate appropriate path */
    2823       38142 :     add_path(rel, create_functionscan_path(root, rel,
    2824             :                                            pathkeys, required_outer));
    2825       38142 : }
    2826             : 
    2827             : /*
    2828             :  * set_values_pathlist
    2829             :  *      Build the (single) access path for a VALUES RTE
    2830             :  */
    2831             : static void
    2832        7322 : set_values_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
    2833             : {
    2834             :     Relids      required_outer;
    2835             : 
    2836             :     /*
    2837             :      * We don't support pushing join clauses into the quals of a values scan,
    2838             :      * but it could still have required parameterization due to LATERAL refs
    2839             :      * in the values expressions.
    2840             :      */
    2841        7322 :     required_outer = rel->lateral_relids;
    2842             : 
    2843             :     /* Generate appropriate path */
    2844        7322 :     add_path(rel, create_valuesscan_path(root, rel, required_outer));
    2845        7322 : }
    2846             : 
    2847             : /*
    2848             :  * set_tablefunc_pathlist
    2849             :  *      Build the (single) access path for a table func RTE
    2850             :  */
    2851             : static void
    2852         216 : set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
    2853             : {
    2854             :     Relids      required_outer;
    2855             : 
    2856             :     /*
    2857             :      * We don't support pushing join clauses into the quals of a tablefunc
    2858             :      * scan, but it could still have required parameterization due to LATERAL
    2859             :      * refs in the function expression.
    2860             :      */
    2861         216 :     required_outer = rel->lateral_relids;
    2862             : 
    2863             :     /* Generate appropriate path */
    2864         216 :     add_path(rel, create_tablefuncscan_path(root, rel,
    2865             :                                             required_outer));
    2866         216 : }
    2867             : 
    2868             : /*
    2869             :  * set_cte_pathlist
    2870             :  *      Build the (single) access path for a non-self-reference CTE RTE
    2871             :  *
    2872             :  * There's no need for a separate set_cte_size phase, since we don't
    2873             :  * support join-qual-parameterized paths for CTEs.
    2874             :  */
    2875             : static void
    2876        2992 : set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
    2877             : {
    2878             :     Plan       *cteplan;
    2879             :     PlannerInfo *cteroot;
    2880             :     Index       levelsup;
    2881             :     int         ndx;
    2882             :     ListCell   *lc;
    2883             :     int         plan_id;
    2884             :     Relids      required_outer;
    2885             : 
    2886             :     /*
    2887             :      * Find the referenced CTE, and locate the plan previously made for it.
    2888             :      */
    2889        2992 :     levelsup = rte->ctelevelsup;
    2890        2992 :     cteroot = root;
    2891        5384 :     while (levelsup-- > 0)
    2892             :     {
    2893        2392 :         cteroot = cteroot->parent_root;
    2894        2392 :         if (!cteroot)           /* shouldn't happen */
    2895           0 :             elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
    2896             :     }
    2897             : 
    2898             :     /*
    2899             :      * Note: cte_plan_ids can be shorter than cteList, if we are still working
    2900             :      * on planning the CTEs (ie, this is a side-reference from another CTE).
    2901             :      * So we mustn't use forboth here.
    2902             :      */
    2903        2992 :     ndx = 0;
    2904        4394 :     foreach(lc, cteroot->parse->cteList)
    2905             :     {
    2906        4394 :         CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
    2907             : 
    2908        4394 :         if (strcmp(cte->ctename, rte->ctename) == 0)
    2909        2992 :             break;
    2910        1402 :         ndx++;
    2911             :     }
    2912        2992 :     if (lc == NULL)             /* shouldn't happen */
    2913           0 :         elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
    2914        2992 :     if (ndx >= list_length(cteroot->cte_plan_ids))
    2915           0 :         elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
    2916        2992 :     plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
    2917        2992 :     if (plan_id <= 0)
    2918           0 :         elog(ERROR, "no plan was made for CTE \"%s\"", rte->ctename);
    2919        2992 :     cteplan = (Plan *) list_nth(root->glob->subplans, plan_id - 1);
    2920             : 
    2921             :     /* Mark rel with estimated output rows, width, etc */
    2922        2992 :     set_cte_size_estimates(root, rel, cteplan->plan_rows);
    2923             : 
    2924             :     /*
    2925             :      * We don't support pushing join clauses into the quals of a CTE scan, but
    2926             :      * it could still have required parameterization due to LATERAL refs in
    2927             :      * its tlist.
    2928             :      */
    2929        2992 :     required_outer = rel->lateral_relids;
    2930             : 
    2931             :     /* Generate appropriate path */
    2932        2992 :     add_path(rel, create_ctescan_path(root, rel, required_outer));
    2933        2992 : }
    2934             : 
    2935             : /*
    2936             :  * set_namedtuplestore_pathlist
    2937             :  *      Build the (single) access path for a named tuplestore RTE
    2938             :  *
    2939             :  * There's no need for a separate set_namedtuplestore_size phase, since we
    2940             :  * don't support join-qual-parameterized paths for tuplestores.
    2941             :  */
    2942             : static void
    2943         446 : set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
    2944             :                              RangeTblEntry *rte)
    2945             : {
    2946             :     Relids      required_outer;
    2947             : 
    2948             :     /* Mark rel with estimated output rows, width, etc */
    2949         446 :     set_namedtuplestore_size_estimates(root, rel);
    2950             : 
    2951             :     /*
    2952             :      * We don't support pushing join clauses into the quals of a tuplestore
    2953             :      * scan, but it could still have required parameterization due to LATERAL
    2954             :      * refs in its tlist.
    2955             :      */
    2956         446 :     required_outer = rel->lateral_relids;
    2957             : 
    2958             :     /* Generate appropriate path */
    2959         446 :     add_path(rel, create_namedtuplestorescan_path(root, rel, required_outer));
    2960             : 
    2961             :     /* Select cheapest path (pretty easy in this case...) */
    2962         446 :     set_cheapest(rel);
    2963         446 : }
    2964             : 
    2965             : /*
    2966             :  * set_result_pathlist
    2967             :  *      Build the (single) access path for an RTE_RESULT RTE
    2968             :  *
    2969             :  * There's no need for a separate set_result_size phase, since we
    2970             :  * don't support join-qual-parameterized paths for these RTEs.
    2971             :  */
    2972             : static void
    2973        1354 : set_result_pathlist(PlannerInfo *root, RelOptInfo *rel,
    2974             :                     RangeTblEntry *rte)
    2975             : {
    2976             :     Relids      required_outer;
    2977             : 
    2978             :     /* Mark rel with estimated output rows, width, etc */
    2979        1354 :     set_result_size_estimates(root, rel);
    2980             : 
    2981             :     /*
    2982             :      * We don't support pushing join clauses into the quals of a Result scan,
    2983             :      * but it could still have required parameterization due to LATERAL refs
    2984             :      * in its tlist.
    2985             :      */
    2986        1354 :     required_outer = rel->lateral_relids;
    2987             : 
    2988             :     /* Generate appropriate path */
    2989        1354 :     add_path(rel, create_resultscan_path(root, rel, required_outer));
    2990             : 
    2991             :     /* Select cheapest path (pretty easy in this case...) */
    2992        1354 :     set_cheapest(rel);
    2993        1354 : }
    2994             : 
    2995             : /*
    2996             :  * set_worktable_pathlist
    2997             :  *      Build the (single) access path for a self-reference CTE RTE
    2998             :  *
    2999             :  * There's no need for a separate set_worktable_size phase, since we don't
    3000             :  * support join-qual-parameterized paths for CTEs.
    3001             :  */
    3002             : static void
    3003         784 : set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
    3004             : {
    3005             :     Path       *ctepath;
    3006             :     PlannerInfo *cteroot;
    3007             :     Index       levelsup;
    3008             :     Relids      required_outer;
    3009             : 
    3010             :     /*
    3011             :      * We need to find the non-recursive term's path, which is in the plan
    3012             :      * level that's processing the recursive UNION, which is one level *below*
    3013             :      * where the CTE comes from.
    3014             :      */
    3015         784 :     levelsup = rte->ctelevelsup;
    3016         784 :     if (levelsup == 0)          /* shouldn't happen */
    3017           0 :         elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
    3018         784 :     levelsup--;
    3019         784 :     cteroot = root;
    3020        1772 :     while (levelsup-- > 0)
    3021             :     {
    3022         988 :         cteroot = cteroot->parent_root;
    3023         988 :         if (!cteroot)           /* shouldn't happen */
    3024           0 :             elog(ERROR, "bad levelsup for CTE \"%s\"", rte->ctename);
    3025             :     }
    3026         784 :     ctepath = cteroot->non_recursive_path;
    3027         784 :     if (!ctepath)               /* shouldn't happen */
    3028           0 :         elog(ERROR, "could not find path for CTE \"%s\"", rte->ctename);
    3029             : 
    3030             :     /* Mark rel with estimated output rows, width, etc */
    3031         784 :     set_cte_size_estimates(root, rel, ctepath->rows);
    3032             : 
    3033             :     /*
    3034             :      * We don't support pushing join clauses into the quals of a worktable
    3035             :      * scan, but it could still have required parameterization due to LATERAL
    3036             :      * refs in its tlist.  (I'm not sure this is actually possible given the
    3037             :      * restrictions on recursive references, but it's easy enough to support.)
    3038             :      */
    3039         784 :     required_outer = rel->lateral_relids;
    3040             : 
    3041             :     /* Generate appropriate path */
    3042         784 :     add_path(rel, create_worktablescan_path(root, rel, required_outer));
    3043         784 : }
    3044             : 
    3045             : /*
    3046             :  * generate_gather_paths
    3047             :  *      Generate parallel access paths for a relation by pushing a Gather or
    3048             :  *      Gather Merge on top of a partial path.
    3049             :  *
    3050             :  * This must not be called until after we're done creating all partial paths
    3051             :  * for the specified relation.  (Otherwise, add_partial_path might delete a
    3052             :  * path that some GatherPath or GatherMergePath has a reference to.)
    3053             :  *
    3054             :  * If we're generating paths for a scan or join relation, override_rows will
    3055             :  * be false, and we'll just use the relation's size estimate.  When we're
    3056             :  * being called for a partially-grouped path, though, we need to override
    3057             :  * the rowcount estimate.  (It's not clear that the particular value we're
    3058             :  * using here is actually best, but the underlying rel has no estimate so
    3059             :  * we must do something.)
    3060             :  */
    3061             : void
    3062       15348 : generate_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
    3063             : {
    3064             :     Path       *cheapest_partial_path;
    3065             :     Path       *simple_gather_path;
    3066             :     ListCell   *lc;
    3067             :     double      rows;
    3068       15348 :     double     *rowsp = NULL;
    3069             : 
    3070             :     /* If there are no partial paths, there's nothing to do here. */
    3071       15348 :     if (rel->partial_pathlist == NIL)
    3072           0 :         return;
    3073             : 
    3074             :     /* Should we override the rel's rowcount estimate? */
    3075       15348 :     if (override_rows)
    3076        1672 :         rowsp = &rows;
    3077             : 
    3078             :     /*
    3079             :      * The output of Gather is always unsorted, so there's only one partial
    3080             :      * path of interest: the cheapest one.  That will be the one at the front
    3081             :      * of partial_pathlist because of the way add_partial_path works.
    3082             :      */
    3083       15348 :     cheapest_partial_path = linitial(rel->partial_pathlist);
    3084       15348 :     rows =
    3085       15348 :         cheapest_partial_path->rows * cheapest_partial_path->parallel_workers;
    3086             :     simple_gather_path = (Path *)
    3087       15348 :         create_gather_path(root, rel, cheapest_partial_path, rel->reltarget,
    3088             :                            NULL, rowsp);
    3089       15348 :     add_path(rel, simple_gather_path);
    3090             : 
    3091             :     /*
    3092             :      * For each useful ordering, we can consider an order-preserving Gather
    3093             :      * Merge.
    3094             :      */
    3095       31966 :     foreach(lc, rel->partial_pathlist)
    3096             :     {
    3097       16618 :         Path       *subpath = (Path *) lfirst(lc);
    3098             :         GatherMergePath *path;
    3099             : 
    3100       16618 :         if (subpath->pathkeys == NIL)
    3101       15040 :             continue;
    3102             : 
    3103        1578 :         rows = subpath->rows * subpath->parallel_workers;
    3104        1578 :         path = create_gather_merge_path(root, rel, subpath, rel->reltarget,
    3105             :                                         subpath->pathkeys, NULL, rowsp);
    3106        1578 :         add_path(rel, &path->path);
    3107             :     }
    3108             : }
    3109             : 
    3110             : /*
    3111             :  * get_useful_pathkeys_for_relation
    3112             :  *      Determine which orderings of a relation might be useful.
    3113             :  *
    3114             :  * Getting data in sorted order can be useful either because the requested
    3115             :  * order matches the final output ordering for the overall query we're
    3116             :  * planning, or because it enables an efficient merge join.  Here, we try
    3117             :  * to figure out which pathkeys to consider.
    3118             :  *
    3119             :  * This allows us to do incremental sort on top of an index scan under a gather
    3120             :  * merge node, i.e. parallelized.
    3121             :  *
    3122             :  * If the require_parallel_safe is true, we also require the expressions to
    3123             :  * be parallel safe (which allows pushing the sort below Gather Merge).
    3124             :  *
    3125             :  * XXX At the moment this can only ever return a list with a single element,
    3126             :  * because it looks at query_pathkeys only. So we might return the pathkeys
    3127             :  * directly, but it seems plausible we'll want to consider other orderings
    3128             :  * in the future. For example, we might want to consider pathkeys useful for
    3129             :  * merge joins.
    3130             :  */
    3131             : static List *
    3132       15258 : get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel,
    3133             :                                  bool require_parallel_safe)
    3134             : {
    3135       15258 :     List       *useful_pathkeys_list = NIL;
    3136             : 
    3137             :     /*
    3138             :      * Considering query_pathkeys is always worth it, because it might allow
    3139             :      * us to avoid a total sort when we have a partially presorted path
    3140             :      * available or to push the total sort into the parallel portion of the
    3141             :      * query.
    3142             :      */
    3143       15258 :     if (root->query_pathkeys)
    3144             :     {
    3145             :         ListCell   *lc;
    3146        6570 :         int         npathkeys = 0;  /* useful pathkeys */
    3147             : 
    3148       14028 :         foreach(lc, root->query_pathkeys)
    3149             :         {
    3150        9366 :             PathKey    *pathkey = (PathKey *) lfirst(lc);
    3151        9366 :             EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
    3152             : 
    3153             :             /*
    3154             :              * We can only build a sort for pathkeys that contain a
    3155             :              * safe-to-compute-early EC member computable from the current
    3156             :              * relation's reltarget, so ignore the remainder of the list as
    3157             :              * soon as we find a pathkey without such a member.
    3158             :              *
    3159             :              * It's still worthwhile to return any prefix of the pathkeys list
    3160             :              * that meets this requirement, as we may be able to do an
    3161             :              * incremental sort.
    3162             :              *
    3163             :              * If requested, ensure the sort expression is parallel-safe too.
    3164             :              */
    3165        9366 :             if (!relation_can_be_sorted_early(root, rel, pathkey_ec,
    3166             :                                               require_parallel_safe))
    3167        1908 :                 break;
    3168             : 
    3169        7458 :             npathkeys++;
    3170             :         }
    3171             : 
    3172             :         /*
    3173             :          * The whole query_pathkeys list matches, so append it directly, to
    3174             :          * allow comparing pathkeys easily by comparing list pointer. If we
    3175             :          * have to truncate the pathkeys, we gotta do a copy though.
    3176             :          */
    3177        6570 :         if (npathkeys == list_length(root->query_pathkeys))
    3178        4662 :             useful_pathkeys_list = lappend(useful_pathkeys_list,
    3179        4662 :                                            root->query_pathkeys);
    3180        1908 :         else if (npathkeys > 0)
    3181         408 :             useful_pathkeys_list = lappend(useful_pathkeys_list,
    3182         408 :                                            list_copy_head(root->query_pathkeys,
    3183             :                                                           npathkeys));
    3184             :     }
    3185             : 
    3186       15258 :     return useful_pathkeys_list;
    3187             : }
    3188             : 
    3189             : /*
    3190             :  * generate_useful_gather_paths
    3191             :  *      Generate parallel access paths for a relation by pushing a Gather or
    3192             :  *      Gather Merge on top of a partial path.
    3193             :  *
    3194             :  * Unlike plain generate_gather_paths, this looks both at pathkeys of input
    3195             :  * paths (aiming to preserve the ordering), but also considers ordering that
    3196             :  * might be useful for nodes above the gather merge node, and tries to add
    3197             :  * a sort (regular or incremental) to provide that.
    3198             :  */
    3199             : void
    3200      483186 : generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_rows)
    3201             : {
    3202             :     ListCell   *lc;
    3203             :     double      rows;
    3204      483186 :     double     *rowsp = NULL;
    3205      483186 :     List       *useful_pathkeys_list = NIL;
    3206      483186 :     Path       *cheapest_partial_path = NULL;
    3207             : 
    3208             :     /* If there are no partial paths, there's nothing to do here. */
    3209      483186 :     if (rel->partial_pathlist == NIL)
    3210      467928 :         return;
    3211             : 
    3212             :     /* Should we override the rel's rowcount estimate? */
    3213       15258 :     if (override_rows)
    3214        1582 :         rowsp = &rows;
    3215             : 
    3216             :     /* generate the regular gather (merge) paths */
    3217       15258 :     generate_gather_paths(root, rel, override_rows);
    3218             : 
    3219             :     /* consider incremental sort for interesting orderings */
    3220       15258 :     useful_pathkeys_list = get_useful_pathkeys_for_relation(root, rel, true);
    3221             : 
    3222             :     /* used for explicit (full) sort paths */
    3223       15258 :     cheapest_partial_path = linitial(rel->partial_pathlist);
    3224             : 
    3225             :     /*
    3226             :      * Consider sorted paths for each interesting ordering. We generate both
    3227             :      * incremental and full sort.
    3228             :      */
    3229       20328 :     foreach(lc, useful_pathkeys_list)
    3230             :     {
    3231        5070 :         List       *useful_pathkeys = lfirst(lc);
    3232             :         ListCell   *lc2;
    3233             :         bool        is_sorted;
    3234             :         int         presorted_keys;
    3235             : 
    3236       11272 :         foreach(lc2, rel->partial_pathlist)
    3237             :         {
    3238        6202 :             Path       *subpath = (Path *) lfirst(lc2);
    3239             :             GatherMergePath *path;
    3240             : 
    3241        6202 :             is_sorted = pathkeys_count_contained_in(useful_pathkeys,
    3242             :                                                     subpath->pathkeys,
    3243             :                                                     &presorted_keys);
    3244             : 
    3245             :             /*
    3246             :              * We don't need to consider the case where a subpath is already
    3247             :              * fully sorted because generate_gather_paths already creates a
    3248             :              * gather merge path for every subpath that has pathkeys present.
    3249             :              *
    3250             :              * But since the subpath is already sorted, we know we don't need
    3251             :              * to consider adding a sort (full or incremental) on top of it,
    3252             :              * so we can continue here.
    3253             :              */
    3254        6202 :             if (is_sorted)
    3255        1086 :                 continue;
    3256             : 
    3257             :             /*
    3258             :              * Try at least sorting the cheapest path and also try
    3259             :              * incrementally sorting any path which is partially sorted
    3260             :              * already (no need to deal with paths which have presorted keys
    3261             :              * when incremental sort is disabled unless it's the cheapest
    3262             :              * input path).
    3263             :              */
    3264        5116 :             if (subpath != cheapest_partial_path &&
    3265         186 :                 (presorted_keys == 0 || !enable_incremental_sort))
    3266          54 :                 continue;
    3267             : 
    3268             :             /*
    3269             :              * Consider regular sort for any path that's not presorted or if
    3270             :              * incremental sort is disabled.  We've no need to consider both
    3271             :              * sort and incremental sort on the same path.  We assume that
    3272             :              * incremental sort is always faster when there are presorted
    3273             :              * keys.
    3274             :              *
    3275             :              * This is not redundant with the gather paths created in
    3276             :              * generate_gather_paths, because that doesn't generate ordered
    3277             :              * output. Here we add an explicit sort to match the useful
    3278             :              * ordering.
    3279             :              */
    3280        5062 :             if (presorted_keys == 0 || !enable_incremental_sort)
    3281             :             {
    3282        4918 :                 subpath = (Path *) create_sort_path(root,
    3283             :                                                     rel,
    3284             :                                                     subpath,
    3285             :                                                     useful_pathkeys,
    3286             :                                                     -1.0);
    3287        4918 :                 rows = subpath->rows * subpath->parallel_workers;
    3288             :             }
    3289             :             else
    3290         144 :                 subpath = (Path *) create_incremental_sort_path(root,
    3291             :                                                                 rel,
    3292             :                                                                 subpath,
    3293             :                                                                 useful_pathkeys,
    3294             :                                                                 presorted_keys,
    3295             :                                                                 -1);
    3296        5062 :             path = create_gather_merge_path(root, rel,
    3297             :                                             subpath,
    3298        5062 :                                             rel->reltarget,
    3299             :                                             subpath->pathkeys,
    3300             :                                             NULL,
    3301             :                                             rowsp);
    3302             : 
    3303        5062 :             add_path(rel, &path->path);
    3304             :         }
    3305             :     }
    3306             : }
    3307             : 
    3308             : /*
    3309             :  * make_rel_from_joinlist
    3310             :  *    Build access paths using a "joinlist" to guide the join path search.
    3311             :  *
    3312             :  * See comments for deconstruct_jointree() for definition of the joinlist
    3313             :  * data structure.
    3314             :  */
    3315             : static RelOptInfo *
    3316      263908 : make_rel_from_joinlist(PlannerInfo *root, List *joinlist)
    3317             : {
    3318             :     int         levels_needed;
    3319             :     List       *initial_rels;
    3320             :     ListCell   *jl;
    3321             : 
    3322             :     /*
    3323             :      * Count the number of child joinlist nodes.  This is the depth of the
    3324             :      * dynamic-programming algorithm we must employ to consider all ways of
    3325             :      * joining the child nodes.
    3326             :      */
    3327      263908 :     levels_needed = list_length(joinlist);
    3328             : 
    3329      263908 :     if (levels_needed <= 0)
    3330           0 :         return NULL;            /* nothing to do? */
    3331             : 
    3332             :     /*
    3333             :      * Construct a list of rels corresponding to the child joinlist nodes.
    3334             :      * This may contain both base rels and rels constructed according to
    3335             :      * sub-joinlists.
    3336             :      */
    3337      263908 :     initial_rels = NIL;
    3338      634504 :     foreach(jl, joinlist)
    3339             :     {
    3340      370596 :         Node       *jlnode = (Node *) lfirst(jl);
    3341             :         RelOptInfo *thisrel;
    3342             : 
    3343      370596 :         if (IsA(jlnode, RangeTblRef))
    3344             :         {
    3345      367638 :             int         varno = ((RangeTblRef *) jlnode)->rtindex;
    3346             : 
    3347      367638 :             thisrel = find_base_rel(root, varno);
    3348             :         }
    3349        2958 :         else if (IsA(jlnode, List))
    3350             :         {
    3351             :             /* Recurse to handle subproblem */
    3352        2958 :             thisrel = make_rel_from_joinlist(root, (List *) jlnode);
    3353             :         }
    3354             :         else
    3355             :         {
    3356           0 :             elog(ERROR, "unrecognized joinlist node type: %d",
    3357             :                  (int) nodeTag(jlnode));
    3358             :             thisrel = NULL;     /* keep compiler quiet */
    3359             :         }
    3360             : 
    3361      370596 :         initial_rels = lappend(initial_rels, thisrel);
    3362             :     }
    3363             : 
    3364      263908 :     if (levels_needed == 1)
    3365             :     {
    3366             :         /*
    3367             :          * Single joinlist node, so we're done.
    3368             :          */
    3369      186490 :         return (RelOptInfo *) linitial(initial_rels);
    3370             :     }
    3371             :     else
    3372             :     {
    3373             :         /*
    3374             :          * Consider the different orders in which we could join the rels,
    3375             :          * using a plugin, GEQO, or the regular join search code.
    3376             :          *
    3377             :          * We put the initial_rels list into a PlannerInfo field because
    3378             :          * has_legal_joinclause() needs to look at it (ugly :-().
    3379             :          */
    3380       77418 :         root->initial_rels = initial_rels;
    3381             : 
    3382       77418 :         if (join_search_hook)
    3383           0 :             return (*join_search_hook) (root, levels_needed, initial_rels);
    3384       77418 :         else if (enable_geqo && levels_needed >= geqo_threshold)
    3385           6 :             return geqo(root, levels_needed, initial_rels);
    3386             :         else
    3387       77412 :             return standard_join_search(root, levels_needed, initial_rels);
    3388             :     }
    3389             : }
    3390             : 
    3391             : /*
    3392             :  * standard_join_search
    3393             :  *    Find possible joinpaths for a query by successively finding ways
    3394             :  *    to join component relations into join relations.
    3395             :  *
    3396             :  * 'levels_needed' is the number of iterations needed, ie, the number of
    3397             :  *      independent jointree items in the query.  This is > 1.
    3398             :  *
    3399             :  * 'initial_rels' is a list of RelOptInfo nodes for each independent
    3400             :  *      jointree item.  These are the components to be joined together.
    3401             :  *      Note that levels_needed == list_length(initial_rels).
    3402             :  *
    3403             :  * Returns the final level of join relations, i.e., the relation that is
    3404             :  * the result of joining all the original relations together.
    3405             :  * At least one implementation path must be provided for this relation and
    3406             :  * all required sub-relations.
    3407             :  *
    3408             :  * To support loadable plugins that modify planner behavior by changing the
    3409             :  * join searching algorithm, we provide a hook variable that lets a plugin
    3410             :  * replace or supplement this function.  Any such hook must return the same
    3411             :  * final join relation as the standard code would, but it might have a
    3412             :  * different set of implementation paths attached, and only the sub-joinrels
    3413             :  * needed for these paths need have been instantiated.
    3414             :  *
    3415             :  * Note to plugin authors: the functions invoked during standard_join_search()
    3416             :  * modify root->join_rel_list and root->join_rel_hash.  If you want to do more
    3417             :  * than one join-order search, you'll probably need to save and restore the
    3418             :  * original states of those data structures.  See geqo_eval() for an example.
    3419             :  */
    3420             : RelOptInfo *
    3421       77412 : standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
    3422             : {
    3423             :     int         lev;
    3424             :     RelOptInfo *rel;
    3425             : 
    3426             :     /*
    3427             :      * This function cannot be invoked recursively within any one planning
    3428             :      * problem, so join_rel_level[] can't be in use already.
    3429             :      */
    3430             :     Assert(root->join_rel_level == NULL);
    3431             : 
    3432             :     /*
    3433             :      * We employ a simple "dynamic programming" algorithm: we first find all
    3434             :      * ways to build joins of two jointree items, then all ways to build joins
    3435             :      * of three items (from two-item joins and single items), then four-item
    3436             :      * joins, and so on until we have considered all ways to join all the
    3437             :      * items into one rel.
    3438             :      *
    3439             :      * root->join_rel_level[j] is a list of all the j-item rels.  Initially we
    3440             :      * set root->join_rel_level[1] to represent all the single-jointree-item
    3441             :      * relations.
    3442             :      */
    3443       77412 :     root->join_rel_level = (List **) palloc0((levels_needed + 1) * sizeof(List *));
    3444             : 
    3445       77412 :     root->join_rel_level[1] = initial_rels;
    3446             : 
    3447      184076 :     for (lev = 2; lev <= levels_needed; lev++)
    3448             :     {
    3449             :         ListCell   *lc;
    3450             : 
    3451             :         /*
    3452             :          * Determine all possible pairs of relations to be joined at this
    3453             :          * level, and build paths for making each one from every available
    3454             :          * pair of lower-level relations.
    3455             :          */
    3456      106664 :         join_search_one_level(root, lev);
    3457             : 
    3458             :         /*
    3459             :          * Run generate_partitionwise_join_paths() and
    3460             :          * generate_useful_gather_paths() for each just-processed joinrel.  We
    3461             :          * could not do this earlier because both regular and partial paths
    3462             :          * can get added to a particular joinrel at multiple times within
    3463             :          * join_search_one_level.
    3464             :          *
    3465             :          * After that, we're done creating paths for the joinrel, so run
    3466             :          * set_cheapest().
    3467             :          */
    3468      269748 :         foreach(lc, root->join_rel_level[lev])
    3469             :         {
    3470      163084 :             rel = (RelOptInfo *) lfirst(lc);
    3471             : 
    3472             :             /* Create paths for partitionwise joins. */
    3473      163084 :             generate_partitionwise_join_paths(root, rel);
    3474             : 
    3475             :             /*
    3476             :              * Except for the topmost scan/join rel, consider gathering
    3477             :              * partial paths.  We'll do the same for the topmost scan/join rel
    3478             :              * once we know the final targetlist (see grouping_planner's and
    3479             :              * its call to apply_scanjoin_target_to_paths).
    3480             :              */
    3481      163084 :             if (!bms_equal(rel->relids, root->all_query_rels))
    3482       86128 :                 generate_useful_gather_paths(root, rel, false);
    3483             : 
    3484             :             /* Find and save the cheapest paths for this rel */
    3485      163084 :             set_cheapest(rel);
    3486             : 
    3487             : #ifdef OPTIMIZER_DEBUG
    3488             :             pprint(rel);
    3489             : #endif
    3490             :         }
    3491             :     }
    3492             : 
    3493             :     /*
    3494             :      * We should have a single rel at the final level.
    3495             :      */
    3496       77412 :     if (root->join_rel_level[levels_needed] == NIL)
    3497           0 :         elog(ERROR, "failed to build any %d-way joins", levels_needed);
    3498             :     Assert(list_length(root->join_rel_level[levels_needed]) == 1);
    3499             : 
    3500       77412 :     rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
    3501             : 
    3502       77412 :     root->join_rel_level = NULL;
    3503             : 
    3504       77412 :     return rel;
    3505             : }
    3506             : 
    3507             : /*****************************************************************************
    3508             :  *          PUSHING QUALS DOWN INTO SUBQUERIES
    3509             :  *****************************************************************************/
    3510             : 
    3511             : /*
    3512             :  * subquery_is_pushdown_safe - is a subquery safe for pushing down quals?
    3513             :  *
    3514             :  * subquery is the particular component query being checked.  topquery
    3515             :  * is the top component of a set-operations tree (the same Query if no
    3516             :  * set-op is involved).
    3517             :  *
    3518             :  * Conditions checked here:
    3519             :  *
    3520             :  * 1. If the subquery has a LIMIT clause, we must not push down any quals,
    3521             :  * since that could change the set of rows returned.
    3522             :  *
    3523             :  * 2. If the subquery contains EXCEPT or EXCEPT ALL set ops we cannot push
    3524             :  * quals into it, because that could change the results.
    3525             :  *
    3526             :  * 3. If the subquery uses DISTINCT, we cannot push volatile quals into it.
    3527             :  * This is because upper-level quals should semantically be evaluated only
    3528             :  * once per distinct row, not once per original row, and if the qual is
    3529             :  * volatile then extra evaluations could change the results.  (This issue
    3530             :  * does not apply to other forms of aggregation such as GROUP BY, because
    3531             :  * when those are present we push into HAVING not WHERE, so that the quals
    3532             :  * are still applied after aggregation.)
    3533             :  *
    3534             :  * 4. If the subquery contains window functions, we cannot push volatile quals
    3535             :  * into it.  The issue here is a bit different from DISTINCT: a volatile qual
    3536             :  * might succeed for some rows of a window partition and fail for others,
    3537             :  * thereby changing the partition contents and thus the window functions'
    3538             :  * results for rows that remain.
    3539             :  *
    3540             :  * 5. If the subquery contains any set-returning functions in its targetlist,
    3541             :  * we cannot push volatile quals into it.  That would push them below the SRFs
    3542             :  * and thereby change the number of times they are evaluated.  Also, a
    3543             :  * volatile qual could succeed for some SRF output rows and fail for others,
    3544             :  * a behavior that cannot occur if it's evaluated before SRF expansion.
    3545             :  *
    3546             :  * 6. If the subquery has nonempty grouping sets, we cannot push down any
    3547             :  * quals.  The concern here is that a qual referencing a "constant" grouping
    3548             :  * column could get constant-folded, which would be improper because the value
    3549             :  * is potentially nullable by grouping-set expansion.  This restriction could
    3550             :  * be removed if we had a parsetree representation that shows that such
    3551             :  * grouping columns are not really constant.  (There are other ideas that
    3552             :  * could be used to relax this restriction, but that's the approach most
    3553             :  * likely to get taken in the future.  Note that there's not much to be gained
    3554             :  * so long as subquery_planner can't move HAVING clauses to WHERE within such
    3555             :  * a subquery.)
    3556             :  *
    3557             :  * In addition, we make several checks on the subquery's output columns to see
    3558             :  * if it is safe to reference them in pushed-down quals.  If output column k
    3559             :  * is found to be unsafe to reference, we set the reason for that inside
    3560             :  * safetyInfo->unsafeFlags[k], but we don't reject the subquery overall since
    3561             :  * column k might not be referenced by some/all quals.  The unsafeFlags[]
    3562             :  * array will be consulted later by qual_is_pushdown_safe().  It's better to
    3563             :  * do it this way than to make the checks directly in qual_is_pushdown_safe(),
    3564             :  * because when the subquery involves set operations we have to check the
    3565             :  * output expressions in each arm of the set op.
    3566             :  *
    3567             :  * Note: pushing quals into a DISTINCT subquery is theoretically dubious:
    3568             :  * we're effectively assuming that the quals cannot distinguish values that
    3569             :  * the DISTINCT's equality operator sees as equal, yet there are many
    3570             :  * counterexamples to that assumption.  However use of such a qual with a
    3571             :  * DISTINCT subquery would be unsafe anyway, since there's no guarantee which
    3572             :  * "equal" value will be chosen as the output value by the DISTINCT operation.
    3573             :  * So we don't worry too much about that.  Another objection is that if the
    3574             :  * qual is expensive to evaluate, running it for each original row might cost
    3575             :  * more than we save by eliminating rows before the DISTINCT step.  But it
    3576             :  * would be very hard to estimate that at this stage, and in practice pushdown
    3577             :  * seldom seems to make things worse, so we ignore that problem too.
    3578             :  *
    3579             :  * Note: likewise, pushing quals into a subquery with window functions is a
    3580             :  * bit dubious: the quals might remove some rows of a window partition while
    3581             :  * leaving others, causing changes in the window functions' results for the
    3582             :  * surviving rows.  We insist that such a qual reference only partitioning
    3583             :  * columns, but again that only protects us if the qual does not distinguish
    3584             :  * values that the partitioning equality operator sees as equal.  The risks
    3585             :  * here are perhaps larger than for DISTINCT, since no de-duplication of rows
    3586             :  * occurs and thus there is no theoretical problem with such a qual.  But
    3587             :  * we'll do this anyway because the potential performance benefits are very
    3588             :  * large, and we've seen no field complaints about the longstanding comparable
    3589             :  * behavior with DISTINCT.
    3590             :  */
    3591             : static bool
    3592        1390 : subquery_is_pushdown_safe(Query *subquery, Query *topquery,
    3593             :                           pushdown_safety_info *safetyInfo)
    3594             : {
    3595             :     SetOperationStmt *topop;
    3596             : 
    3597             :     /* Check point 1 */
    3598        1390 :     if (subquery->limitOffset != NULL || subquery->limitCount != NULL)
    3599          92 :         return false;
    3600             : 
    3601             :     /* Check point 6 */
    3602        1298 :     if (subquery->groupClause && subquery->groupingSets)
    3603          12 :         return false;
    3604             : 
    3605             :     /* Check points 3, 4, and 5 */
    3606        1286 :     if (subquery->distinctClause ||
    3607        1214 :         subquery->hasWindowFuncs ||
    3608         962 :         subquery->hasTargetSRFs)
    3609         502 :         safetyInfo->unsafeVolatile = true;
    3610             : 
    3611             :     /*
    3612             :      * If we're at a leaf query, check for unsafe expressions in its target
    3613             :      * list, and mark any reasons why they're unsafe in unsafeFlags[].
    3614             :      * (Non-leaf nodes in setop trees have only simple Vars in their tlists,
    3615             :      * so no need to check them.)
    3616             :      */
    3617        1286 :     if (subquery->setOperations == NULL)
    3618        1212 :         check_output_expressions(subquery, safetyInfo);
    3619             : 
    3620             :     /* Are we at top level, or looking at a setop component? */
    3621        1286 :     if (subquery == topquery)
    3622             :     {
    3623             :         /* Top level, so check any component queries */
    3624        1138 :         if (subquery->setOperations != NULL)
    3625          74 :             if (!recurse_pushdown_safe(subquery->setOperations, topquery,
    3626             :                                        safetyInfo))
    3627           0 :                 return false;
    3628             :     }
    3629             :     else
    3630             :     {
    3631             :         /* Setop component must not have more components (too weird) */
    3632         148 :         if (subquery->setOperations != NULL)
    3633           0 :             return false;
    3634             :         /* Check whether setop component output types match top level */
    3635         148 :         topop = castNode(SetOperationStmt, topquery->setOperations);
    3636             :         Assert(topop);
    3637         148 :         compare_tlist_datatypes(subquery->targetList,
    3638             :                                 topop->colTypes,
    3639             :                                 safetyInfo);
    3640             :     }
    3641        1286 :     return true;
    3642             : }
    3643             : 
    3644             : /*
    3645             :  * Helper routine to recurse through setOperations tree
    3646             :  */
    3647             : static bool
    3648         222 : recurse_pushdown_safe(Node *setOp, Query *topquery,
    3649             :                       pushdown_safety_info *safetyInfo)
    3650             : {
    3651         222 :     if (IsA(setOp, RangeTblRef))
    3652             :     {
    3653         148 :         RangeTblRef *rtr = (RangeTblRef *) setOp;
    3654         148 :         RangeTblEntry *rte = rt_fetch(rtr->rtindex, topquery->rtable);
    3655         148 :         Query      *subquery = rte->subquery;
    3656             : 
    3657             :         Assert(subquery != NULL);
    3658         148 :         return subquery_is_pushdown_safe(subquery, topquery, safetyInfo);
    3659             :     }
    3660          74 :     else if (IsA(setOp, SetOperationStmt))
    3661             :     {
    3662          74 :         SetOperationStmt *op = (SetOperationStmt *) setOp;
    3663             : 
    3664             :         /* EXCEPT is no good (point 2 for subquery_is_pushdown_safe) */
    3665          74 :         if (op->op == SETOP_EXCEPT)
    3666           0 :             return false;
    3667             :         /* Else recurse */
    3668          74 :         if (!recurse_pushdown_safe(op->larg, topquery, safetyInfo))
    3669           0 :             return false;
    3670          74 :         if (!recurse_pushdown_safe(op->rarg, topquery, safetyInfo))
    3671           0 :             return false;
    3672             :     }
    3673             :     else
    3674             :     {
    3675           0 :         elog(ERROR, "unrecognized node type: %d",
    3676             :              (int) nodeTag(setOp));
    3677             :     }
    3678          74 :     return true;
    3679             : }
    3680             : 
    3681             : /*
    3682             :  * check_output_expressions - check subquery's output expressions for safety
    3683             :  *
    3684             :  * There are several cases in which it's unsafe to push down an upper-level
    3685             :  * qual if it references a particular output column of a subquery.  We check
    3686             :  * each output column of the subquery and set flags in unsafeFlags[k] when we
    3687             :  * see that column is unsafe for a pushed-down qual to reference.  The
    3688             :  * conditions checked here are:
    3689             :  *
    3690             :  * 1. We must not push down any quals that refer to subselect outputs that
    3691             :  * return sets, else we'd introduce functions-returning-sets into the
    3692             :  * subquery's WHERE/HAVING quals.
    3693             :  *
    3694             :  * 2. We must not push down any quals that refer to subselect outputs that
    3695             :  * contain volatile functions, for fear of introducing strange results due
    3696             :  * to multiple evaluation of a volatile function.
    3697             :  *
    3698             :  * 3. If the subquery uses DISTINCT ON, we must not push down any quals that
    3699             :  * refer to non-DISTINCT output columns, because that could change the set
    3700             :  * of rows returned.  (This condition is vacuous for DISTINCT, because then
    3701             :  * there are no non-DISTINCT output columns, so we needn't check.  Note that
    3702             :  * subquery_is_pushdown_safe already reported that we can't use volatile
    3703             :  * quals if there's DISTINCT or DISTINCT ON.)
    3704             :  *
    3705             :  * 4. If the subquery has any window functions, we must not push down quals
    3706             :  * that reference any output columns that are not listed in all the subquery's
    3707             :  * window PARTITION BY clauses.  We can push down quals that use only
    3708             :  * partitioning columns because they should succeed or fail identically for
    3709             :  * every row of any one window partition, and totally excluding some
    3710             :  * partitions will not change a window function's results for remaining
    3711             :  * partitions.  (Again, this also requires nonvolatile quals, but
    3712             :  * subquery_is_pushdown_safe handles that.).  Subquery columns marked as
    3713             :  * unsafe for this reason can still have WindowClause run conditions pushed
    3714             :  * down.
    3715             :  */
    3716             : static void
    3717        1212 : check_output_expressions(Query *subquery, pushdown_safety_info *safetyInfo)
    3718             : {
    3719             :     ListCell   *lc;
    3720             : 
    3721        8494 :     foreach(lc, subquery->targetList)
    3722             :     {
    3723        7282 :         TargetEntry *tle = (TargetEntry *) lfirst(lc);
    3724             : 
    3725        7282 :         if (tle->resjunk)
    3726         138 :             continue;           /* ignore resjunk columns */
    3727             : 
    3728             :         /* Functions returning sets are unsafe (point 1) */
    3729        7144 :         if (subquery->hasTargetSRFs &&
    3730         590 :             (safetyInfo->unsafeFlags[tle->resno] &
    3731         590 :              UNSAFE_HAS_SET_FUNC) == 0 &&
    3732         590 :             expression_returns_set((Node *) tle->expr))
    3733             :         {
    3734         328 :             safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_HAS_SET_FUNC;
    3735         328 :             continue;
    3736             :         }
    3737             : 
    3738             :         /* Volatile functions are unsafe (point 2) */
    3739        6816 :         if ((safetyInfo->unsafeFlags[tle->resno] &
    3740        6804 :              UNSAFE_HAS_VOLATILE_FUNC) == 0 &&
    3741        6804 :             contain_volatile_functions((Node *) tle->expr))
    3742             :         {
    3743          78 :             safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_HAS_VOLATILE_FUNC;
    3744          78 :             continue;
    3745             :         }
    3746             : 
    3747             :         /* If subquery uses DISTINCT ON, check point 3 */
    3748        6738 :         if (subquery->hasDistinctOn &&
    3749           0 :             (safetyInfo->unsafeFlags[tle->resno] &
    3750           0 :              UNSAFE_NOTIN_DISTINCTON_CLAUSE) == 0 &&
    3751           0 :             !targetIsInSortList(tle, InvalidOid, subquery->distinctClause))
    3752             :         {
    3753             :             /* non-DISTINCT column, so mark it unsafe */
    3754           0 :             safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_NOTIN_DISTINCTON_CLAUSE;
    3755           0 :             continue;
    3756             :         }
    3757             : 
    3758             :         /* If subquery uses window functions, check point 4 */
    3759        6738 :         if (subquery->hasWindowFuncs &&
    3760        1134 :             (safetyInfo->unsafeFlags[tle->resno] &
    3761        1134 :              UNSAFE_NOTIN_DISTINCTON_CLAUSE) == 0 &&
    3762        1134 :             !targetIsInAllPartitionLists(tle, subquery))
    3763             :         {
    3764             :             /* not present in all PARTITION BY clauses, so mark it unsafe */
    3765        1038 :             safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_NOTIN_PARTITIONBY_CLAUSE;
    3766        1038 :             continue;
    3767             :         }
    3768             :     }
    3769        1212 : }
    3770             : 
    3771             : /*
    3772             :  * For subqueries using UNION/UNION ALL/INTERSECT/INTERSECT ALL, we can
    3773             :  * push quals into each component query, but the quals can only reference
    3774             :  * subquery columns that suffer no type coercions in the set operation.
    3775             :  * Otherwise there are possible semantic gotchas.  So, we check the
    3776             :  * component queries to see if any of them have output types different from
    3777             :  * the top-level setop outputs.  We set the UNSAFE_TYPE_MISMATCH bit in
    3778             :  * unsafeFlags[k] if column k has different type in any component.
    3779             :  *
    3780             :  * We don't have to care about typmods here: the only allowed difference
    3781             :  * between set-op input and output typmods is input is a specific typmod
    3782             :  * and output is -1, and that does not require a coercion.
    3783             :  *
    3784             :  * tlist is a subquery tlist.
    3785             :  * colTypes is an OID list of the top-level setop's output column types.
    3786             :  * safetyInfo is the pushdown_safety_info to set unsafeFlags[] for.
    3787             :  */
    3788             : static void
    3789         148 : compare_tlist_datatypes(List *tlist, List *colTypes,
    3790             :                         pushdown_safety_info *safetyInfo)
    3791             : {
    3792             :     ListCell   *l;
    3793         148 :     ListCell   *colType = list_head(colTypes);
    3794             : 
    3795         480 :     foreach(l, tlist)
    3796             :     {
    3797         332 :         TargetEntry *tle = (TargetEntry *) lfirst(l);
    3798             : 
    3799         332 :         if (tle->resjunk)
    3800           0 :             continue;           /* ignore resjunk columns */
    3801         332 :         if (colType == NULL)
    3802           0 :             elog(ERROR, "wrong number of tlist entries");
    3803         332 :         if (exprType((Node *) tle->expr) != lfirst_oid(colType))
    3804          28 :             safetyInfo->unsafeFlags[tle->resno] |= UNSAFE_TYPE_MISMATCH;
    3805         332 :         colType = lnext(colTypes, colType);
    3806             :     }
    3807         148 :     if (colType != NULL)
    3808           0 :         elog(ERROR, "wrong number of tlist entries");
    3809         148 : }
    3810             : 
    3811             : /*
    3812             :  * targetIsInAllPartitionLists
    3813             :  *      True if the TargetEntry is listed in the PARTITION BY clause
    3814             :  *      of every window defined in the query.
    3815             :  *
    3816             :  * It would be safe to ignore windows not actually used by any window
    3817             :  * function, but it's not easy to get that info at this stage; and it's
    3818             :  * unlikely to be useful to spend any extra cycles getting it, since
    3819             :  * unreferenced window definitions are probably infrequent in practice.
    3820             :  */
    3821             : static bool
    3822        1134 : targetIsInAllPartitionLists(TargetEntry *tle, Query *query)
    3823             : {
    3824             :     ListCell   *lc;
    3825             : 
    3826        1254 :     foreach(lc, query->windowClause)
    3827             :     {
    3828        1158 :         WindowClause *wc = (WindowClause *) lfirst(lc);
    3829             : 
    3830        1158 :         if (!targetIsInSortList(tle, InvalidOid, wc->partitionClause))
    3831        1038 :             return false;
    3832             :     }
    3833          96 :     return true;
    3834             : }
    3835             : 
    3836             : /*
    3837             :  * qual_is_pushdown_safe - is a particular rinfo safe to push down?
    3838             :  *
    3839             :  * rinfo is a restriction clause applying to the given subquery (whose RTE
    3840             :  * has index rti in the parent query).
    3841             :  *
    3842             :  * Conditions checked here:
    3843             :  *
    3844             :  * 1. rinfo's clause must not contain any SubPlans (mainly because it's
    3845             :  * unclear that it will work correctly: SubLinks will already have been
    3846             :  * transformed into SubPlans in the qual, but not in the subquery).  Note that
    3847             :  * SubLinks that transform to initplans are safe, and will be accepted here
    3848             :  * because what we'll see in the qual is just a Param referencing the initplan
    3849             :  * output.
    3850             :  *
    3851             :  * 2. If unsafeVolatile is set, rinfo's clause must not contain any volatile
    3852             :  * functions.
    3853             :  *
    3854             :  * 3. If unsafeLeaky is set, rinfo's clause must not contain any leaky
    3855             :  * functions that are passed Var nodes, and therefore might reveal values from
    3856             :  * the subquery as side effects.
    3857             :  *
    3858             :  * 4. rinfo's clause must not refer to the whole-row output of the subquery
    3859             :  * (since there is no easy way to name that within the subquery itself).
    3860             :  *
    3861             :  * 5. rinfo's clause must not refer to any subquery output columns that were
    3862             :  * found to be unsafe to reference by subquery_is_pushdown_safe().
    3863             :  */
    3864             : static pushdown_safe_type
    3865        1510 : qual_is_pushdown_safe(Query *subquery, Index rti, RestrictInfo *rinfo,
    3866             :                       pushdown_safety_info *safetyInfo)
    3867             : {
    3868        1510 :     pushdown_safe_type safe = PUSHDOWN_SAFE;
    3869        1510 :     Node       *qual = (Node *) rinfo->clause;
    3870             :     List       *vars;
    3871             :     ListCell   *vl;
    3872             : 
    3873             :     /* Refuse subselects (point 1) */
    3874        1510 :     if (contain_subplans(qual))
    3875          66 :         return PUSHDOWN_UNSAFE;
    3876             : 
    3877             :     /* Refuse volatile quals if we found they'd be unsafe (point 2) */
    3878        2046 :     if (safetyInfo->unsafeVolatile &&
    3879         602 :         contain_volatile_functions((Node *) rinfo))
    3880          18 :         return PUSHDOWN_UNSAFE;
    3881             : 
    3882             :     /* Refuse leaky quals if told to (point 3) */
    3883        1708 :     if (safetyInfo->unsafeLeaky &&
    3884         282 :         contain_leaked_vars(qual))
    3885         138 :         return PUSHDOWN_UNSAFE;
    3886             : 
    3887             :     /*
    3888             :      * Examine all Vars used in clause.  Since it's a restriction clause, all
    3889             :      * such Vars must refer to subselect output columns ... unless this is
    3890             :      * part of a LATERAL subquery, in which case there could be lateral
    3891             :      * references.
    3892             :      *
    3893             :      * By omitting the relevant flags, this also gives us a cheap sanity check
    3894             :      * that no aggregates or window functions appear in the qual.  Those would
    3895             :      * be unsafe to push down, but at least for the moment we could never see
    3896             :      * any in a qual anyhow.
    3897             :      */
    3898        1288 :     vars = pull_var_clause(qual, PVC_INCLUDE_PLACEHOLDERS);
    3899        2494 :     foreach(vl, vars)
    3900             :     {
    3901        1396 :         Var        *var = (Var *) lfirst(vl);
    3902             : 
    3903             :         /*
    3904             :          * XXX Punt if we find any PlaceHolderVars in the restriction clause.
    3905             :          * It's not clear whether a PHV could safely be pushed down, and even
    3906             :          * less clear whether such a situation could arise in any cases of
    3907             :          * practical interest anyway.  So for the moment, just refuse to push
    3908             :          * down.
    3909             :          */
    3910        1396 :         if (!IsA(var, Var))
    3911             :         {
    3912           0 :             safe = PUSHDOWN_UNSAFE;
    3913           0 :             break;
    3914             :         }
    3915             : 
    3916             :         /*
    3917             :          * Punt if we find any lateral references.  It would be safe to push
    3918             :          * these down, but we'd have to convert them into outer references,
    3919             :          * which subquery_push_qual lacks the infrastructure to do.  The case
    3920             :          * arises so seldom that it doesn't seem worth working hard on.
    3921             :          */
    3922        1396 :         if (var->varno != rti)
    3923             :         {
    3924          12 :             safe = PUSHDOWN_UNSAFE;
    3925          12 :             break;
    3926             :         }
    3927             : 
    3928             :         /* Subqueries have no system columns */
    3929             :         Assert(var->varattno >= 0);
    3930             : 
    3931             :         /* Check point 4 */
    3932        1384 :         if (var->varattno == 0)
    3933             :         {
    3934           0 :             safe = PUSHDOWN_UNSAFE;
    3935           0 :             break;
    3936             :         }
    3937             : 
    3938             :         /* Check point 5 */
    3939        1384 :         if (safetyInfo->unsafeFlags[var->varattno] != 0)
    3940             :         {
    3941         490 :             if (safetyInfo->unsafeFlags[var->varattno] &
    3942             :                 (UNSAFE_HAS_VOLATILE_FUNC | UNSAFE_HAS_SET_FUNC |
    3943             :                  UNSAFE_NOTIN_DISTINCTON_CLAUSE | UNSAFE_TYPE_MISMATCH))
    3944             :             {
    3945         178 :                 safe = PUSHDOWN_UNSAFE;
    3946         178 :                 break;
    3947             :             }
    3948             :             else
    3949             :             {
    3950             :                 /* UNSAFE_NOTIN_PARTITIONBY_CLAUSE is ok for run conditions */
    3951         312 :                 safe = PUSHDOWN_WINDOWCLAUSE_RUNCOND;
    3952             :                 /* don't break, we might find another Var that's unsafe */
    3953             :             }
    3954             :         }
    3955             :     }
    3956             : 
    3957        1288 :     list_free(vars);
    3958             : 
    3959        1288 :     return safe;
    3960             : }
    3961             : 
    3962             : /*
    3963             :  * subquery_push_qual - push down a qual that we have determined is safe
    3964             :  */
    3965             : static void
    3966         964 : subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual)
    3967             : {
    3968         964 :     if (subquery->setOperations != NULL)
    3969             :     {
    3970             :         /* Recurse to push it separately to each component query */
    3971          50 :         recurse_push_qual(subquery->setOperations, subquery,
    3972             :                           rte, rti, qual);
    3973             :     }
    3974             :     else
    3975             :     {
    3976             :         /*
    3977             :          * We need to replace Vars in the qual (which must refer to outputs of
    3978             :          * the subquery) with copies of the subquery's targetlist expressions.
    3979             :          * Note that at this point, any uplevel Vars in the qual should have
    3980             :          * been replaced with Params, so they need no work.
    3981             :          *
    3982             :          * This step also ensures that when we are pushing into a setop tree,
    3983             :          * each component query gets its own copy of the qual.
    3984             :          */
    3985         914 :         qual = ReplaceVarsFromTargetList(qual, rti, 0, rte,
    3986             :                                          subquery->targetList,
    3987             :                                          REPLACEVARS_REPORT_ERROR, 0,
    3988             :                                          &subquery->hasSubLinks);
    3989             : 
    3990             :         /*
    3991             :          * Now attach the qual to the proper place: normally WHERE, but if the
    3992             :          * subquery uses grouping or aggregation, put it in HAVING (since the
    3993             :          * qual really refers to the group-result rows).
    3994             :          */
    3995         914 :         if (subquery->hasAggs || subquery->groupClause || subquery->groupingSets || subquery->havingQual)
    3996         162 :             subquery->havingQual = make_and_qual(subquery->havingQual, qual);
    3997             :         else
    3998         752 :             subquery->jointree->quals =
    3999         752 :                 make_and_qual(subquery->jointree->quals, qual);
    4000             : 
    4001             :         /*
    4002             :          * We need not change the subquery's hasAggs or hasSubLinks flags,
    4003             :          * since we can't be pushing down any aggregates that weren't there
    4004             :          * before, and we don't push down subselects at all.
    4005             :          */
    4006             :     }
    4007         964 : }
    4008             : 
    4009             : /*
    4010             :  * Helper routine to recurse through setOperations tree
    4011             :  */
    4012             : static void
    4013         150 : recurse_push_qual(Node *setOp, Query *topquery,
    4014             :                   RangeTblEntry *rte, Index rti, Node *qual)
    4015             : {
    4016         150 :     if (IsA(setOp, RangeTblRef))
    4017             :     {
    4018         100 :         RangeTblRef *rtr = (RangeTblRef *) setOp;
    4019         100 :         RangeTblEntry *subrte = rt_fetch(rtr->rtindex, topquery->rtable);
    4020         100 :         Query      *subquery = subrte->subquery;
    4021             : 
    4022             :         Assert(subquery != NULL);
    4023         100 :         subquery_push_qual(subquery, rte, rti, qual);
    4024             :     }
    4025          50 :     else if (IsA(setOp, SetOperationStmt))
    4026             :     {
    4027          50 :         SetOperationStmt *op = (SetOperationStmt *) setOp;
    4028             : 
    4029          50 :         recurse_push_qual(op->larg, topquery, rte, rti, qual);
    4030          50 :         recurse_push_qual(op->rarg, topquery, rte, rti, qual);
    4031             :     }
    4032             :     else
    4033             :     {
    4034           0 :         elog(ERROR, "unrecognized node type: %d",
    4035             :              (int) nodeTag(setOp));
    4036             :     }
    4037         150 : }
    4038             : 
    4039             : /*****************************************************************************
    4040             :  *          SIMPLIFYING SUBQUERY TARGETLISTS
    4041             :  *****************************************************************************/
    4042             : 
    4043             : /*
    4044             :  * remove_unused_subquery_outputs
    4045             :  *      Remove subquery targetlist items we don't need
    4046             :  *
    4047             :  * It's possible, even likely, that the upper query does not read all the
    4048             :  * output columns of the subquery.  We can remove any such outputs that are
    4049             :  * not needed by the subquery itself (e.g., as sort/group columns) and do not
    4050             :  * affect semantics otherwise (e.g., volatile functions can't be removed).
    4051             :  * This is useful not only because we might be able to remove expensive-to-
    4052             :  * compute expressions, but because deletion of output columns might allow
    4053             :  * optimizations such as join removal to occur within the subquery.
    4054             :  *
    4055             :  * extra_used_attrs can be passed as non-NULL to mark any columns (offset by
    4056             :  * FirstLowInvalidHeapAttributeNumber) that we should not remove.  This
    4057             :  * parameter is modified by the function, so callers must make a copy if they
    4058             :  * need to use the passed in Bitmapset after calling this function.
    4059             :  *
    4060             :  * To avoid affecting column numbering in the targetlist, we don't physically
    4061             :  * remove unused tlist entries, but rather replace their expressions with NULL
    4062             :  * constants.  This is implemented by modifying subquery->targetList.
    4063             :  */
    4064             : static void
    4065        6596 : remove_unused_subquery_outputs(Query *subquery, RelOptInfo *rel,
    4066             :                                Bitmapset *extra_used_attrs)
    4067             : {
    4068             :     Bitmapset  *attrs_used;
    4069             :     ListCell   *lc;
    4070             : 
    4071             :     /*
    4072             :      * Just point directly to extra_used_attrs. No need to bms_copy as none of
    4073             :      * the current callers use the Bitmapset after calling this function.
    4074             :      */
    4075        6596 :     attrs_used = extra_used_attrs;
    4076             : 
    4077             :     /*
    4078             :      * Do nothing if subquery has UNION/INTERSECT/EXCEPT: in principle we
    4079             :      * could update all the child SELECTs' tlists, but it seems not worth the
    4080             :      * trouble presently.
    4081             :      */
    4082        6596 :     if (subquery->setOperations)
    4083         966 :         return;
    4084             : 
    4085             :     /*
    4086             :      * If subquery has regular DISTINCT (not DISTINCT ON), we're wasting our
    4087             :      * time: all its output columns must be used in the distinctClause.
    4088             :      */
    4089        6088 :     if (subquery->distinctClause && !subquery->hasDistinctOn)
    4090         220 :         return;
    4091             : 
    4092             :     /*
    4093             :      * Collect a bitmap of all the output column numbers used by the upper
    4094             :      * query.
    4095             :      *
    4096             :      * Add all the attributes needed for joins or final output.  Note: we must
    4097             :      * look at rel's targetlist, not the attr_needed data, because attr_needed
    4098             :      * isn't computed for inheritance child rels, cf set_append_rel_size().
    4099             :      * (XXX might be worth changing that sometime.)
    4100             :      */
    4101        5868 :     pull_varattnos((Node *) rel->reltarget->exprs, rel->relid, &attrs_used);
    4102             : 
    4103             :     /* Add all the attributes used by un-pushed-down restriction clauses. */
    4104        6502 :     foreach(lc, rel->baserestrictinfo)
    4105             :     {
    4106         634 :         RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
    4107             : 
    4108         634 :         pull_varattnos((Node *) rinfo->clause, rel->relid, &attrs_used);
    4109             :     }
    4110             : 
    4111             :     /*
    4112             :      * If there's a whole-row reference to the subquery, we can't remove
    4113             :      * anything.
    4114             :      */
    4115        5868 :     if (bms_is_member(0 - FirstLowInvalidHeapAttributeNumber, attrs_used))
    4116         238 :         return;
    4117             : 
    4118             :     /*
    4119             :      * Run through the tlist and zap entries we don't need.  It's okay to
    4120             :      * modify the tlist items in-place because set_subquery_pathlist made a
    4121             :      * copy of the subquery.
    4122             :      */
    4123       24988 :     foreach(lc, subquery->targetList)
    4124             :     {
    4125       19358 :         TargetEntry *tle = (TargetEntry *) lfirst(lc);
    4126       19358 :         Node       *texpr = (Node *) tle->expr;
    4127             : 
    4128             :         /*
    4129             :          * If it has a sortgroupref number, it's used in some sort/group
    4130             :          * clause so we'd better not remove it.  Also, don't remove any
    4131             :          * resjunk columns, since their reason for being has nothing to do
    4132             :          * with anybody reading the subquery's output.  (It's likely that
    4133             :          * resjunk columns in a sub-SELECT would always have ressortgroupref
    4134             :          * set, but even if they don't, it seems imprudent to remove them.)
    4135             :          */
    4136       19358 :         if (tle->ressortgroupref || tle->resjunk)
    4137        2188 :             continue;
    4138             : 
    4139             :         /*
    4140             :          * If it's used by the upper query, we can't remove it.
    4141             :          */
    4142       17170 :         if (bms_is_member(tle->resno - FirstLowInvalidHeapAttributeNumber,
    4143             :                           attrs_used))
    4144       11630 :             continue;
    4145             : 
    4146             :         /*
    4147             :          * If it contains a set-returning function, we can't remove it since
    4148             :          * that could change the number of rows returned by the subquery.
    4149             :          */
    4150        6424 :         if (subquery->hasTargetSRFs &&
    4151         884 :             expression_returns_set(texpr))
    4152         628 :             continue;
    4153             : 
    4154             :         /*
    4155             :          * If it contains volatile functions, we daren't remove it for fear
    4156             :          * that the user is expecting their side-effects to happen.
    4157             :          */
    4158        4912 :         if (contain_volatile_functions(texpr))
    4159          26 :             continue;
    4160             : 
    4161             :         /*
    4162             :          * OK, we don't need it.  Replace the expression with a NULL constant.
    4163             :          * Preserve the exposed type of the expression, in case something
    4164             :          * looks at the rowtype of the subquery's result.
    4165             :          */
    4166        4886 :         tle->expr = (Expr *) makeNullConst(exprType(texpr),
    4167             :                                            exprTypmod(texpr),
    4168             :                                            exprCollation(texpr));
    4169             :     }
    4170             : }
    4171             : 
    4172             : /*
    4173             :  * create_partial_bitmap_paths
    4174             :  *    Build partial bitmap heap path for the relation
    4175             :  */
    4176             : void
    4177      121024 : create_partial_bitmap_paths(PlannerInfo *root, RelOptInfo *rel,
    4178             :                             Path *bitmapqual)
    4179             : {
    4180             :     int         parallel_workers;
    4181             :     double      pages_fetched;
    4182             : 
    4183             :     /* Compute heap pages for bitmap heap scan */
    4184      121024 :     pages_fetched = compute_bitmap_pages(root, rel, bitmapqual, 1.0,
    4185             :                                          NULL, NULL);
    4186             : 
    4187      121024 :     parallel_workers = compute_parallel_worker(rel, pages_fetched, -1,
    4188             :                                                max_parallel_workers_per_gather);
    4189             : 
    4190      121024 :     if (parallel_workers <= 0)
    4191      116968 :         return;
    4192             : 
    4193        4056 :     add_partial_path(rel, (Path *) create_bitmap_heap_path(root, rel,
    4194             :                                                            bitmapqual, rel->lateral_relids, 1.0, parallel_workers));
    4195             : }
    4196             : 
    4197             : /*
    4198             :  * Compute the number of parallel workers that should be used to scan a
    4199             :  * relation.  We compute the parallel workers based on the size of the heap to
    4200             :  * be scanned and the size of the index to be scanned, then choose a minimum
    4201             :  * of those.
    4202             :  *
    4203             :  * "heap_pages" is the number of pages from the table that we expect to scan, or
    4204             :  * -1 if we don't expect to scan any.
    4205             :  *
    4206             :  * "index_pages" is the number of pages from the index that we expect to scan, or
    4207             :  * -1 if we don't expect to scan any.
    4208             :  *
    4209             :  * "max_workers" is caller's limit on the number of workers.  This typically
    4210             :  * comes from a GUC.
    4211             :  */
    4212             : int
    4213      593292 : compute_parallel_worker(RelOptInfo *rel, double heap_pages, double index_pages,
    4214             :                         int max_workers)
    4215             : {
    4216      593292 :     int         parallel_workers = 0;
    4217             : 
    4218             :     /*
    4219             :      * If the user has set the parallel_workers reloption, use that; otherwise
    4220             :      * select a default number of workers.
    4221             :      */
    4222      593292 :     if (rel->rel_parallel_workers != -1)
    4223        1824 :         parallel_workers = rel->rel_parallel_workers;
    4224             :     else
    4225             :     {
    4226             :         /*
    4227             :          * If the number of pages being scanned is insufficient to justify a
    4228             :          * parallel scan, just return zero ... unless it's an inheritance
    4229             :          * child. In that case, we want to generate a parallel path here
    4230             :          * anyway.  It might not be worthwhile just for this relation, but
    4231             :          * when combined with all of its inheritance siblings it may well pay
    4232             :          * off.
    4233             :          */
    4234      591468 :         if (rel->reloptkind == RELOPT_BASEREL &&
    4235      555100 :             ((heap_pages >= 0 && heap_pages < min_parallel_table_scan_size) ||
    4236       17564 :              (index_pages >= 0 && index_pages < min_parallel_index_scan_size)))
    4237      554454 :             return 0;
    4238             : 
    4239       37014 :         if (heap_pages >= 0)
    4240             :         {
    4241             :             int         heap_parallel_threshold;
    4242       35052 :             int         heap_parallel_workers = 1;
    4243             : 
    4244             :             /*
    4245             :              * Select the number of workers based on the log of the size of
    4246             :              * the relation.  This probably needs to be a good deal more
    4247             :              * sophisticated, but we need something here for now.  Note that
    4248             :              * the upper limit of the min_parallel_table_scan_size GUC is
    4249             :              * chosen to prevent overflow here.
    4250             :              */
    4251       35052 :             heap_parallel_threshold = Max(min_parallel_table_scan_size, 1);
    4252       39652 :             while (heap_pages >= (BlockNumber) (heap_parallel_threshold * 3))
    4253             :             {
    4254        4600 :                 heap_parallel_workers++;
    4255        4600 :                 heap_parallel_threshold *= 3;
    4256        4600 :                 if (heap_parallel_threshold > INT_MAX / 3)
    4257           0 :                     break;      /* avoid overflow */
    4258             :             }
    4259             : 
    4260       35052 :             parallel_workers = heap_parallel_workers;
    4261             :         }
    4262             : 
    4263       37014 :         if (index_pages >= 0)
    4264             :         {
    4265        9324 :             int         index_parallel_workers = 1;
    4266             :             int         index_parallel_threshold;
    4267             : 
    4268             :             /* same calculation as for heap_pages above */
    4269        9324 :             index_parallel_threshold = Max(min_parallel_index_scan_size, 1);
    4270        9576 :             while (index_pages >= (BlockNumber) (index_parallel_threshold * 3))
    4271             :             {
    4272         252 :                 index_parallel_workers++;
    4273         252 :                 index_parallel_threshold *= 3;
    4274         252 :                 if (index_parallel_threshold > INT_MAX / 3)
    4275           0 :                     break;      /* avoid overflow */
    4276             :             }
    4277             : 
    4278        9324 :             if (parallel_workers > 0)
    4279        7362 :                 parallel_workers = Min(parallel_workers, index_parallel_workers);
    4280             :             else
    4281        1962 :                 parallel_workers = index_parallel_workers;
    4282             :         }
    4283             :     }
    4284             : 
    4285             :     /* In no case use more than caller supplied maximum number of workers */
    4286       38838 :     parallel_workers = Min(parallel_workers, max_workers);
    4287             : 
    4288       38838 :     return parallel_workers;
    4289             : }
    4290             : 
    4291             : /*
    4292             :  * generate_partitionwise_join_paths
    4293             :  *      Create paths representing partitionwise join for given partitioned
    4294             :  *      join relation.
    4295             :  *
    4296             :  * This must not be called until after we are done adding paths for all
    4297             :  * child-joins. Otherwise, add_path might delete a path to which some path
    4298             :  * generated here has a reference.
    4299             :  */
    4300             : void
    4301      170462 : generate_partitionwise_join_paths(PlannerInfo *root, RelOptInfo *rel)
    4302             : {
    4303      170462 :     List       *live_children = NIL;
    4304             :     int         cnt_parts;
    4305             :     int         num_parts;
    4306             :     RelOptInfo **part_rels;
    4307             : 
    4308             :     /* Handle only join relations here. */
    4309      170462 :     if (!IS_JOIN_REL(rel))
    4310           0 :         return;
    4311             : 
    4312             :     /* We've nothing to do if the relation is not partitioned. */
    4313      170462 :     if (!IS_PARTITIONED_REL(rel))
    4314      168890 :         return;
    4315             : 
    4316             :     /* The relation should have consider_partitionwise_join set. */
    4317             :     Assert(rel->consider_partitionwise_join);
    4318             : 
    4319             :     /* Guard against stack overflow due to overly deep partition hierarchy. */
    4320        1572 :     check_stack_depth();
    4321             : 
    4322        1572 :     num_parts = rel->nparts;
    4323        1572 :     part_rels = rel->part_rels;
    4324             : 
    4325             :     /* Collect non-dummy child-joins. */
    4326        5906 :     for (cnt_parts = 0; cnt_parts < num_parts; cnt_parts++)
    4327             :     {
    4328        4334 :         RelOptInfo *child_rel = part_rels[cnt_parts];
    4329             : 
    4330             :         /* If it's been pruned entirely, it's certainly dummy. */
    4331        4334 :         if (child_rel == NULL)
    4332          52 :             continue;
    4333             : 
    4334             :         /* Make partitionwise join paths for this partitioned child-join. */
    4335        4282 :         generate_partitionwise_join_paths(root, child_rel);
    4336             : 
    4337             :         /* If we failed to make any path for this child, we must give up. */
    4338        4282 :         if (child_rel->pathlist == NIL)
    4339             :         {
    4340             :             /*
    4341             :              * Mark the parent joinrel as unpartitioned so that later
    4342             :              * functions treat it correctly.
    4343             :              */
    4344           0 :             rel->nparts = 0;
    4345           0 :             return;
    4346             :         }
    4347             : 
    4348             :         /* Else, identify the cheapest path for it. */
    4349        4282 :         set_cheapest(child_rel);
    4350             : 
    4351             :         /* Dummy children need not be scanned, so ignore those. */
    4352        4282 :         if (IS_DUMMY_REL(child_rel))
    4353           0 :             continue;
    4354             : 
    4355             : #ifdef OPTIMIZER_DEBUG
    4356             :         pprint(child_rel);
    4357             : #endif
    4358             : 
    4359        4282 :         live_children = lappend(live_children, child_rel);
    4360             :     }
    4361             : 
    4362             :     /* If all child-joins are dummy, parent join is also dummy. */
    4363        1572 :     if (!live_children)
    4364             :     {
    4365           0 :         mark_dummy_rel(rel);
    4366           0 :         return;
    4367             :     }
    4368             : 
    4369             :     /* Build additional paths for this rel from child-join paths. */
    4370        1572 :     add_paths_to_append_rel(root, rel, live_children);
    4371        1572 :     list_free(live_children);
    4372             : }

Generated by: LCOV version 1.14