Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * planner.c
4 : * The query optimizer external interface.
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/optimizer/plan/planner.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 :
16 : #include "postgres.h"
17 :
18 : #include <limits.h>
19 : #include <math.h>
20 :
21 : #include "access/genam.h"
22 : #include "access/parallel.h"
23 : #include "access/sysattr.h"
24 : #include "access/table.h"
25 : #include "catalog/pg_aggregate.h"
26 : #include "catalog/pg_inherits.h"
27 : #include "catalog/pg_proc.h"
28 : #include "catalog/pg_type.h"
29 : #include "executor/executor.h"
30 : #include "foreign/fdwapi.h"
31 : #include "jit/jit.h"
32 : #include "lib/bipartite_match.h"
33 : #include "lib/knapsack.h"
34 : #include "miscadmin.h"
35 : #include "nodes/makefuncs.h"
36 : #include "nodes/nodeFuncs.h"
37 : #ifdef OPTIMIZER_DEBUG
38 : #include "nodes/print.h"
39 : #endif
40 : #include "nodes/supportnodes.h"
41 : #include "optimizer/appendinfo.h"
42 : #include "optimizer/clauses.h"
43 : #include "optimizer/cost.h"
44 : #include "optimizer/optimizer.h"
45 : #include "optimizer/paramassign.h"
46 : #include "optimizer/pathnode.h"
47 : #include "optimizer/paths.h"
48 : #include "optimizer/plancat.h"
49 : #include "optimizer/planmain.h"
50 : #include "optimizer/planner.h"
51 : #include "optimizer/prep.h"
52 : #include "optimizer/subselect.h"
53 : #include "optimizer/tlist.h"
54 : #include "parser/analyze.h"
55 : #include "parser/parse_agg.h"
56 : #include "parser/parse_clause.h"
57 : #include "parser/parse_relation.h"
58 : #include "parser/parsetree.h"
59 : #include "partitioning/partdesc.h"
60 : #include "rewrite/rewriteManip.h"
61 : #include "utils/acl.h"
62 : #include "utils/backend_status.h"
63 : #include "utils/lsyscache.h"
64 : #include "utils/rel.h"
65 : #include "utils/selfuncs.h"
66 :
67 : /* GUC parameters */
68 : double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
69 : int debug_parallel_query = DEBUG_PARALLEL_OFF;
70 : bool parallel_leader_participation = true;
71 : bool enable_distinct_reordering = true;
72 :
73 : /* Hook for plugins to get control in planner() */
74 : planner_hook_type planner_hook = NULL;
75 :
76 : /* Hook for plugins to get control after PlannerGlobal is initialized */
77 : planner_setup_hook_type planner_setup_hook = NULL;
78 :
79 : /* Hook for plugins to get control before PlannerGlobal is discarded */
80 : planner_shutdown_hook_type planner_shutdown_hook = NULL;
81 :
82 : /* Hook for plugins to get control when grouping_planner() plans upper rels */
83 : create_upper_paths_hook_type create_upper_paths_hook = NULL;
84 :
85 :
86 : /* Expression kind codes for preprocess_expression */
87 : #define EXPRKIND_QUAL 0
88 : #define EXPRKIND_TARGET 1
89 : #define EXPRKIND_RTFUNC 2
90 : #define EXPRKIND_RTFUNC_LATERAL 3
91 : #define EXPRKIND_VALUES 4
92 : #define EXPRKIND_VALUES_LATERAL 5
93 : #define EXPRKIND_LIMIT 6
94 : #define EXPRKIND_APPINFO 7
95 : #define EXPRKIND_PHV 8
96 : #define EXPRKIND_TABLESAMPLE 9
97 : #define EXPRKIND_ARBITER_ELEM 10
98 : #define EXPRKIND_TABLEFUNC 11
99 : #define EXPRKIND_TABLEFUNC_LATERAL 12
100 : #define EXPRKIND_GROUPEXPR 13
101 :
102 : /*
103 : * Data specific to grouping sets
104 : */
105 : typedef struct
106 : {
107 : List *rollups;
108 : List *hash_sets_idx;
109 : double dNumHashGroups;
110 : bool any_hashable;
111 : Bitmapset *unsortable_refs;
112 : Bitmapset *unhashable_refs;
113 : List *unsortable_sets;
114 : int *tleref_to_colnum_map;
115 : } grouping_sets_data;
116 :
117 : /*
118 : * Temporary structure for use during WindowClause reordering in order to be
119 : * able to sort WindowClauses on partitioning/ordering prefix.
120 : */
121 : typedef struct
122 : {
123 : WindowClause *wc;
124 : List *uniqueOrder; /* A List of unique ordering/partitioning
125 : * clauses per Window */
126 : } WindowClauseSortData;
127 :
128 : /* Passthrough data for standard_qp_callback */
129 : typedef struct
130 : {
131 : List *activeWindows; /* active windows, if any */
132 : grouping_sets_data *gset_data; /* grouping sets data, if any */
133 : SetOperationStmt *setop; /* parent set operation or NULL if not a
134 : * subquery belonging to a set operation */
135 : } standard_qp_extra;
136 :
137 : /* Local functions */
138 : static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
139 : static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
140 : static void grouping_planner(PlannerInfo *root, double tuple_fraction,
141 : SetOperationStmt *setops);
142 : static grouping_sets_data *preprocess_grouping_sets(PlannerInfo *root);
143 : static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
144 : int *tleref_to_colnum_map);
145 : static void preprocess_rowmarks(PlannerInfo *root);
146 : static double preprocess_limit(PlannerInfo *root,
147 : double tuple_fraction,
148 : int64 *offset_est, int64 *count_est);
149 : static List *preprocess_groupclause(PlannerInfo *root, List *force);
150 : static List *extract_rollup_sets(List *groupingSets);
151 : static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
152 : static void standard_qp_callback(PlannerInfo *root, void *extra);
153 : static double get_number_of_groups(PlannerInfo *root,
154 : double path_rows,
155 : grouping_sets_data *gd,
156 : List *target_list);
157 : static RelOptInfo *create_grouping_paths(PlannerInfo *root,
158 : RelOptInfo *input_rel,
159 : PathTarget *target,
160 : bool target_parallel_safe,
161 : grouping_sets_data *gd);
162 : static bool is_degenerate_grouping(PlannerInfo *root);
163 : static void create_degenerate_grouping_paths(PlannerInfo *root,
164 : RelOptInfo *input_rel,
165 : RelOptInfo *grouped_rel);
166 : static RelOptInfo *make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
167 : PathTarget *target, bool target_parallel_safe,
168 : Node *havingQual);
169 : static void create_ordinary_grouping_paths(PlannerInfo *root,
170 : RelOptInfo *input_rel,
171 : RelOptInfo *grouped_rel,
172 : const AggClauseCosts *agg_costs,
173 : grouping_sets_data *gd,
174 : GroupPathExtraData *extra,
175 : RelOptInfo **partially_grouped_rel_p);
176 : static void consider_groupingsets_paths(PlannerInfo *root,
177 : RelOptInfo *grouped_rel,
178 : Path *path,
179 : bool is_sorted,
180 : bool can_hash,
181 : grouping_sets_data *gd,
182 : const AggClauseCosts *agg_costs,
183 : double dNumGroups);
184 : static RelOptInfo *create_window_paths(PlannerInfo *root,
185 : RelOptInfo *input_rel,
186 : PathTarget *input_target,
187 : PathTarget *output_target,
188 : bool output_target_parallel_safe,
189 : WindowFuncLists *wflists,
190 : List *activeWindows);
191 : static void create_one_window_path(PlannerInfo *root,
192 : RelOptInfo *window_rel,
193 : Path *path,
194 : PathTarget *input_target,
195 : PathTarget *output_target,
196 : WindowFuncLists *wflists,
197 : List *activeWindows);
198 : static RelOptInfo *create_distinct_paths(PlannerInfo *root,
199 : RelOptInfo *input_rel,
200 : PathTarget *target);
201 : static void create_partial_distinct_paths(PlannerInfo *root,
202 : RelOptInfo *input_rel,
203 : RelOptInfo *final_distinct_rel,
204 : PathTarget *target);
205 : static RelOptInfo *create_final_distinct_paths(PlannerInfo *root,
206 : RelOptInfo *input_rel,
207 : RelOptInfo *distinct_rel);
208 : static List *get_useful_pathkeys_for_distinct(PlannerInfo *root,
209 : List *needed_pathkeys,
210 : List *path_pathkeys);
211 : static RelOptInfo *create_ordered_paths(PlannerInfo *root,
212 : RelOptInfo *input_rel,
213 : PathTarget *target,
214 : bool target_parallel_safe,
215 : double limit_tuples);
216 : static PathTarget *make_group_input_target(PlannerInfo *root,
217 : PathTarget *final_target);
218 : static PathTarget *make_partial_grouping_target(PlannerInfo *root,
219 : PathTarget *grouping_target,
220 : Node *havingQual);
221 : static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
222 : static void optimize_window_clauses(PlannerInfo *root,
223 : WindowFuncLists *wflists);
224 : static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
225 : static void name_active_windows(List *activeWindows);
226 : static PathTarget *make_window_input_target(PlannerInfo *root,
227 : PathTarget *final_target,
228 : List *activeWindows);
229 : static List *make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
230 : List *tlist);
231 : static PathTarget *make_sort_input_target(PlannerInfo *root,
232 : PathTarget *final_target,
233 : bool *have_postponed_srfs);
234 : static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
235 : List *targets, List *targets_contain_srfs);
236 : static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
237 : RelOptInfo *grouped_rel,
238 : RelOptInfo *partially_grouped_rel,
239 : const AggClauseCosts *agg_costs,
240 : grouping_sets_data *gd,
241 : GroupPathExtraData *extra);
242 : static RelOptInfo *create_partial_grouping_paths(PlannerInfo *root,
243 : RelOptInfo *grouped_rel,
244 : RelOptInfo *input_rel,
245 : grouping_sets_data *gd,
246 : GroupPathExtraData *extra,
247 : bool force_rel_creation);
248 : static Path *make_ordered_path(PlannerInfo *root,
249 : RelOptInfo *rel,
250 : Path *path,
251 : Path *cheapest_path,
252 : List *pathkeys,
253 : double limit_tuples);
254 : static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel);
255 : static bool can_partial_agg(PlannerInfo *root);
256 : static void apply_scanjoin_target_to_paths(PlannerInfo *root,
257 : RelOptInfo *rel,
258 : List *scanjoin_targets,
259 : List *scanjoin_targets_contain_srfs,
260 : bool scanjoin_target_parallel_safe,
261 : bool tlist_same_exprs);
262 : static void create_partitionwise_grouping_paths(PlannerInfo *root,
263 : RelOptInfo *input_rel,
264 : RelOptInfo *grouped_rel,
265 : RelOptInfo *partially_grouped_rel,
266 : const AggClauseCosts *agg_costs,
267 : grouping_sets_data *gd,
268 : PartitionwiseAggregateType patype,
269 : GroupPathExtraData *extra);
270 : static bool group_by_has_partkey(RelOptInfo *input_rel,
271 : List *targetList,
272 : List *groupClause);
273 : static int common_prefix_cmp(const void *a, const void *b);
274 : static List *generate_setop_child_grouplist(SetOperationStmt *op,
275 : List *targetlist);
276 : static void create_final_unique_paths(PlannerInfo *root, RelOptInfo *input_rel,
277 : List *sortPathkeys, List *groupClause,
278 : SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel);
279 : static void create_partial_unique_paths(PlannerInfo *root, RelOptInfo *input_rel,
280 : List *sortPathkeys, List *groupClause,
281 : SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel);
282 :
283 :
284 : /*****************************************************************************
285 : *
286 : * Query optimizer entry point
287 : *
288 : * Inputs:
289 : * parse: an analyzed-and-rewritten query tree for an optimizable statement
290 : * query_string: source text for the query tree (used for error reports)
291 : * cursorOptions: bitmask of CURSOR_OPT_XXX flags, see parsenodes.h
292 : * boundParams: passed-in parameter values, or NULL if none
293 : * es: ExplainState if being called from EXPLAIN, else NULL
294 : *
295 : * The result is a PlannedStmt tree.
296 : *
297 : * PARAM_EXTERN Param nodes within the parse tree can be replaced by Consts
298 : * using values from boundParams, if those values are marked PARAM_FLAG_CONST.
299 : * Parameter values not so marked are still relied on for estimation purposes.
300 : *
301 : * The ExplainState pointer is not currently used by the core planner, but it
302 : * is passed through to some planner hooks so that they can report information
303 : * back to EXPLAIN extension hooks.
304 : *
305 : * To support loadable plugins that monitor or modify planner behavior,
306 : * we provide a hook variable that lets a plugin get control before and
307 : * after the standard planning process. The plugin would normally call
308 : * standard_planner().
309 : *
310 : * Note to plugin authors: standard_planner() scribbles on its Query input,
311 : * so you'd better copy that data structure if you want to plan more than once.
312 : *
313 : *****************************************************************************/
314 : PlannedStmt *
315 460916 : planner(Query *parse, const char *query_string, int cursorOptions,
316 : ParamListInfo boundParams, ExplainState *es)
317 : {
318 : PlannedStmt *result;
319 :
320 460916 : if (planner_hook)
321 96206 : result = (*planner_hook) (parse, query_string, cursorOptions,
322 : boundParams, es);
323 : else
324 364710 : result = standard_planner(parse, query_string, cursorOptions,
325 : boundParams, es);
326 :
327 456078 : pgstat_report_plan_id(result->planId, false);
328 :
329 456078 : return result;
330 : }
331 :
332 : PlannedStmt *
333 460916 : standard_planner(Query *parse, const char *query_string, int cursorOptions,
334 : ParamListInfo boundParams, ExplainState *es)
335 : {
336 : PlannedStmt *result;
337 : PlannerGlobal *glob;
338 : double tuple_fraction;
339 : PlannerInfo *root;
340 : RelOptInfo *final_rel;
341 : Path *best_path;
342 : Plan *top_plan;
343 : ListCell *lp,
344 : *lr;
345 :
346 : /*
347 : * Set up global state for this planner invocation. This data is needed
348 : * across all levels of sub-Query that might exist in the given command,
349 : * so we keep it in a separate struct that's linked to by each per-Query
350 : * PlannerInfo.
351 : */
352 460916 : glob = makeNode(PlannerGlobal);
353 :
354 460916 : glob->boundParams = boundParams;
355 460916 : glob->subplans = NIL;
356 460916 : glob->subpaths = NIL;
357 460916 : glob->subroots = NIL;
358 460916 : glob->rewindPlanIDs = NULL;
359 460916 : glob->finalrtable = NIL;
360 460916 : glob->allRelids = NULL;
361 460916 : glob->prunableRelids = NULL;
362 460916 : glob->finalrteperminfos = NIL;
363 460916 : glob->finalrowmarks = NIL;
364 460916 : glob->resultRelations = NIL;
365 460916 : glob->appendRelations = NIL;
366 460916 : glob->partPruneInfos = NIL;
367 460916 : glob->relationOids = NIL;
368 460916 : glob->invalItems = NIL;
369 460916 : glob->paramExecTypes = NIL;
370 460916 : glob->lastPHId = 0;
371 460916 : glob->lastRowMarkId = 0;
372 460916 : glob->lastPlanNodeId = 0;
373 460916 : glob->transientPlan = false;
374 460916 : glob->dependsOnRole = false;
375 460916 : glob->partition_directory = NULL;
376 460916 : glob->rel_notnullatts_hash = NULL;
377 :
378 : /*
379 : * Assess whether it's feasible to use parallel mode for this query. We
380 : * can't do this in a standalone backend, or if the command will try to
381 : * modify any data, or if this is a cursor operation, or if GUCs are set
382 : * to values that don't permit parallelism, or if parallel-unsafe
383 : * functions are present in the query tree.
384 : *
385 : * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
386 : * MATERIALIZED VIEW to use parallel plans, but this is safe only because
387 : * the command is writing into a completely new table which workers won't
388 : * be able to see. If the workers could see the table, the fact that
389 : * group locking would cause them to ignore the leader's heavyweight GIN
390 : * page locks would make this unsafe. We'll have to fix that somehow if
391 : * we want to allow parallel inserts in general; updates and deletes have
392 : * additional problems especially around combo CIDs.)
393 : *
394 : * For now, we don't try to use parallel mode if we're running inside a
395 : * parallel worker. We might eventually be able to relax this
396 : * restriction, but for now it seems best not to have parallel workers
397 : * trying to create their own parallel workers.
398 : */
399 460916 : if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
400 432670 : IsUnderPostmaster &&
401 432670 : parse->commandType == CMD_SELECT &&
402 350060 : !parse->hasModifyingCTE &&
403 349916 : max_parallel_workers_per_gather > 0 &&
404 349290 : !IsParallelWorker())
405 : {
406 : /* all the cheap tests pass, so scan the query tree */
407 349242 : glob->maxParallelHazard = max_parallel_hazard(parse);
408 349242 : glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE);
409 : }
410 : else
411 : {
412 : /* skip the query tree scan, just assume it's unsafe */
413 111674 : glob->maxParallelHazard = PROPARALLEL_UNSAFE;
414 111674 : glob->parallelModeOK = false;
415 : }
416 :
417 : /*
418 : * glob->parallelModeNeeded is normally set to false here and changed to
419 : * true during plan creation if a Gather or Gather Merge plan is actually
420 : * created (cf. create_gather_plan, create_gather_merge_plan).
421 : *
422 : * However, if debug_parallel_query = on or debug_parallel_query =
423 : * regress, then we impose parallel mode whenever it's safe to do so, even
424 : * if the final plan doesn't use parallelism. It's not safe to do so if
425 : * the query contains anything parallel-unsafe; parallelModeOK will be
426 : * false in that case. Note that parallelModeOK can't change after this
427 : * point. Otherwise, everything in the query is either parallel-safe or
428 : * parallel-restricted, and in either case it should be OK to impose
429 : * parallel-mode restrictions. If that ends up breaking something, then
430 : * either some function the user included in the query is incorrectly
431 : * labeled as parallel-safe or parallel-restricted when in reality it's
432 : * parallel-unsafe, or else the query planner itself has a bug.
433 : */
434 754662 : glob->parallelModeNeeded = glob->parallelModeOK &&
435 293746 : (debug_parallel_query != DEBUG_PARALLEL_OFF);
436 :
437 : /* Determine what fraction of the plan is likely to be scanned */
438 460916 : if (cursorOptions & CURSOR_OPT_FAST_PLAN)
439 : {
440 : /*
441 : * We have no real idea how many tuples the user will ultimately FETCH
442 : * from a cursor, but it is often the case that he doesn't want 'em
443 : * all, or would prefer a fast-start plan anyway so that he can
444 : * process some of the tuples sooner. Use a GUC parameter to decide
445 : * what fraction to optimize for.
446 : */
447 4708 : tuple_fraction = cursor_tuple_fraction;
448 :
449 : /*
450 : * We document cursor_tuple_fraction as simply being a fraction, which
451 : * means the edge cases 0 and 1 have to be treated specially here. We
452 : * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
453 : */
454 4708 : if (tuple_fraction >= 1.0)
455 0 : tuple_fraction = 0.0;
456 4708 : else if (tuple_fraction <= 0.0)
457 0 : tuple_fraction = 1e-10;
458 : }
459 : else
460 : {
461 : /* Default assumption is we need all the tuples */
462 456208 : tuple_fraction = 0.0;
463 : }
464 :
465 : /* Allow plugins to take control after we've initialized "glob" */
466 460916 : if (planner_setup_hook)
467 0 : (*planner_setup_hook) (glob, parse, query_string, &tuple_fraction, es);
468 :
469 : /* primary planning entry point (may recurse for subqueries) */
470 460916 : root = subquery_planner(glob, parse, NULL, NULL, false, tuple_fraction,
471 : NULL);
472 :
473 : /* Select best Path and turn it into a Plan */
474 456474 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
475 456474 : best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
476 :
477 456474 : top_plan = create_plan(root, best_path);
478 :
479 : /*
480 : * If creating a plan for a scrollable cursor, make sure it can run
481 : * backwards on demand. Add a Material node at the top at need.
482 : */
483 456078 : if (cursorOptions & CURSOR_OPT_SCROLL)
484 : {
485 266 : if (!ExecSupportsBackwardScan(top_plan))
486 32 : top_plan = materialize_finished_plan(top_plan);
487 : }
488 :
489 : /*
490 : * Optionally add a Gather node for testing purposes, provided this is
491 : * actually a safe thing to do.
492 : *
493 : * We can add Gather even when top_plan has parallel-safe initPlans, but
494 : * then we have to move the initPlans to the Gather node because of
495 : * SS_finalize_plan's limitations. That would cause cosmetic breakage of
496 : * regression tests when debug_parallel_query = regress, because initPlans
497 : * that would normally appear on the top_plan move to the Gather, causing
498 : * them to disappear from EXPLAIN output. That doesn't seem worth kluging
499 : * EXPLAIN to hide, so skip it when debug_parallel_query = regress.
500 : */
501 456078 : if (debug_parallel_query != DEBUG_PARALLEL_OFF &&
502 194 : top_plan->parallel_safe &&
503 128 : (top_plan->initPlan == NIL ||
504 0 : debug_parallel_query != DEBUG_PARALLEL_REGRESS))
505 : {
506 128 : Gather *gather = makeNode(Gather);
507 : Cost initplan_cost;
508 : bool unsafe_initplans;
509 :
510 128 : gather->plan.targetlist = top_plan->targetlist;
511 128 : gather->plan.qual = NIL;
512 128 : gather->plan.lefttree = top_plan;
513 128 : gather->plan.righttree = NULL;
514 128 : gather->num_workers = 1;
515 128 : gather->single_copy = true;
516 128 : gather->invisible = (debug_parallel_query == DEBUG_PARALLEL_REGRESS);
517 :
518 : /* Transfer any initPlans to the new top node */
519 128 : gather->plan.initPlan = top_plan->initPlan;
520 128 : top_plan->initPlan = NIL;
521 :
522 : /*
523 : * Since this Gather has no parallel-aware descendants to signal to,
524 : * we don't need a rescan Param.
525 : */
526 128 : gather->rescan_param = -1;
527 :
528 : /*
529 : * Ideally we'd use cost_gather here, but setting up dummy path data
530 : * to satisfy it doesn't seem much cleaner than knowing what it does.
531 : */
532 128 : gather->plan.startup_cost = top_plan->startup_cost +
533 : parallel_setup_cost;
534 128 : gather->plan.total_cost = top_plan->total_cost +
535 128 : parallel_setup_cost + parallel_tuple_cost * top_plan->plan_rows;
536 128 : gather->plan.plan_rows = top_plan->plan_rows;
537 128 : gather->plan.plan_width = top_plan->plan_width;
538 128 : gather->plan.parallel_aware = false;
539 128 : gather->plan.parallel_safe = false;
540 :
541 : /*
542 : * Delete the initplans' cost from top_plan. We needn't add it to the
543 : * Gather node, since the above coding already included it there.
544 : */
545 128 : SS_compute_initplan_cost(gather->plan.initPlan,
546 : &initplan_cost, &unsafe_initplans);
547 128 : top_plan->startup_cost -= initplan_cost;
548 128 : top_plan->total_cost -= initplan_cost;
549 :
550 : /* use parallel mode for parallel plans. */
551 128 : root->glob->parallelModeNeeded = true;
552 :
553 128 : top_plan = &gather->plan;
554 : }
555 :
556 : /*
557 : * If any Params were generated, run through the plan tree and compute
558 : * each plan node's extParam/allParam sets. Ideally we'd merge this into
559 : * set_plan_references' tree traversal, but for now it has to be separate
560 : * because we need to visit subplans before not after main plan.
561 : */
562 456078 : if (glob->paramExecTypes != NIL)
563 : {
564 : Assert(list_length(glob->subplans) == list_length(glob->subroots));
565 204422 : forboth(lp, glob->subplans, lr, glob->subroots)
566 : {
567 44224 : Plan *subplan = (Plan *) lfirst(lp);
568 44224 : PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
569 :
570 44224 : SS_finalize_plan(subroot, subplan);
571 : }
572 160198 : SS_finalize_plan(root, top_plan);
573 : }
574 :
575 : /* final cleanup of the plan */
576 : Assert(glob->finalrtable == NIL);
577 : Assert(glob->finalrteperminfos == NIL);
578 : Assert(glob->finalrowmarks == NIL);
579 : Assert(glob->resultRelations == NIL);
580 : Assert(glob->appendRelations == NIL);
581 456078 : top_plan = set_plan_references(root, top_plan);
582 : /* ... and the subplans (both regular subplans and initplans) */
583 : Assert(list_length(glob->subplans) == list_length(glob->subroots));
584 500302 : forboth(lp, glob->subplans, lr, glob->subroots)
585 : {
586 44224 : Plan *subplan = (Plan *) lfirst(lp);
587 44224 : PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
588 :
589 44224 : lfirst(lp) = set_plan_references(subroot, subplan);
590 : }
591 :
592 : /* build the PlannedStmt result */
593 456078 : result = makeNode(PlannedStmt);
594 :
595 456078 : result->commandType = parse->commandType;
596 456078 : result->queryId = parse->queryId;
597 456078 : result->planOrigin = PLAN_STMT_STANDARD;
598 456078 : result->hasReturning = (parse->returningList != NIL);
599 456078 : result->hasModifyingCTE = parse->hasModifyingCTE;
600 456078 : result->canSetTag = parse->canSetTag;
601 456078 : result->transientPlan = glob->transientPlan;
602 456078 : result->dependsOnRole = glob->dependsOnRole;
603 456078 : result->parallelModeNeeded = glob->parallelModeNeeded;
604 456078 : result->planTree = top_plan;
605 456078 : result->partPruneInfos = glob->partPruneInfos;
606 456078 : result->rtable = glob->finalrtable;
607 912156 : result->unprunableRelids = bms_difference(glob->allRelids,
608 456078 : glob->prunableRelids);
609 456078 : result->permInfos = glob->finalrteperminfos;
610 456078 : result->resultRelations = glob->resultRelations;
611 456078 : result->appendRelations = glob->appendRelations;
612 456078 : result->subplans = glob->subplans;
613 456078 : result->rewindPlanIDs = glob->rewindPlanIDs;
614 456078 : result->rowMarks = glob->finalrowmarks;
615 456078 : result->relationOids = glob->relationOids;
616 456078 : result->invalItems = glob->invalItems;
617 456078 : result->paramExecTypes = glob->paramExecTypes;
618 : /* utilityStmt should be null, but we might as well copy it */
619 456078 : result->utilityStmt = parse->utilityStmt;
620 456078 : result->stmt_location = parse->stmt_location;
621 456078 : result->stmt_len = parse->stmt_len;
622 :
623 456078 : result->jitFlags = PGJIT_NONE;
624 456078 : if (jit_enabled && jit_above_cost >= 0 &&
625 455328 : top_plan->total_cost > jit_above_cost)
626 : {
627 974 : result->jitFlags |= PGJIT_PERFORM;
628 :
629 : /*
630 : * Decide how much effort should be put into generating better code.
631 : */
632 974 : if (jit_optimize_above_cost >= 0 &&
633 974 : top_plan->total_cost > jit_optimize_above_cost)
634 432 : result->jitFlags |= PGJIT_OPT3;
635 974 : if (jit_inline_above_cost >= 0 &&
636 974 : top_plan->total_cost > jit_inline_above_cost)
637 432 : result->jitFlags |= PGJIT_INLINE;
638 :
639 : /*
640 : * Decide which operations should be JITed.
641 : */
642 974 : if (jit_expressions)
643 974 : result->jitFlags |= PGJIT_EXPR;
644 974 : if (jit_tuple_deforming)
645 974 : result->jitFlags |= PGJIT_DEFORM;
646 : }
647 :
648 : /* Allow plugins to take control before we discard "glob" */
649 456078 : if (planner_shutdown_hook)
650 0 : (*planner_shutdown_hook) (glob, parse, query_string, result);
651 :
652 456078 : if (glob->partition_directory != NULL)
653 12148 : DestroyPartitionDirectory(glob->partition_directory);
654 :
655 456078 : return result;
656 : }
657 :
658 :
659 : /*--------------------
660 : * subquery_planner
661 : * Invokes the planner on a subquery. We recurse to here for each
662 : * sub-SELECT found in the query tree.
663 : *
664 : * glob is the global state for the current planner run.
665 : * parse is the querytree produced by the parser & rewriter.
666 : * plan_name is the name to assign to this subplan (NULL at the top level).
667 : * parent_root is the immediate parent Query's info (NULL at the top level).
668 : * hasRecursion is true if this is a recursive WITH query.
669 : * tuple_fraction is the fraction of tuples we expect will be retrieved.
670 : * tuple_fraction is interpreted as explained for grouping_planner, below.
671 : * setops is used for set operation subqueries to provide the subquery with
672 : * the context in which it's being used so that Paths correctly sorted for the
673 : * set operation can be generated. NULL when not planning a set operation
674 : * child, or when a child of a set op that isn't interested in sorted input.
675 : *
676 : * Basically, this routine does the stuff that should only be done once
677 : * per Query object. It then calls grouping_planner. At one time,
678 : * grouping_planner could be invoked recursively on the same Query object;
679 : * that's not currently true, but we keep the separation between the two
680 : * routines anyway, in case we need it again someday.
681 : *
682 : * subquery_planner will be called recursively to handle sub-Query nodes
683 : * found within the query's expressions and rangetable.
684 : *
685 : * Returns the PlannerInfo struct ("root") that contains all data generated
686 : * while planning the subquery. In particular, the Path(s) attached to
687 : * the (UPPERREL_FINAL, NULL) upperrel represent our conclusions about the
688 : * cheapest way(s) to implement the query. The top level will select the
689 : * best Path and pass it through createplan.c to produce a finished Plan.
690 : *--------------------
691 : */
692 : PlannerInfo *
693 544394 : subquery_planner(PlannerGlobal *glob, Query *parse, char *plan_name,
694 : PlannerInfo *parent_root, bool hasRecursion,
695 : double tuple_fraction, SetOperationStmt *setops)
696 : {
697 : PlannerInfo *root;
698 : List *newWithCheckOptions;
699 : List *newHaving;
700 : bool hasOuterJoins;
701 : bool hasResultRTEs;
702 : RelOptInfo *final_rel;
703 : ListCell *l;
704 :
705 : /* Create a PlannerInfo data structure for this subquery */
706 544394 : root = makeNode(PlannerInfo);
707 544394 : root->parse = parse;
708 544394 : root->glob = glob;
709 544394 : root->query_level = parent_root ? parent_root->query_level + 1 : 1;
710 544394 : root->plan_name = plan_name;
711 544394 : root->parent_root = parent_root;
712 544394 : root->plan_params = NIL;
713 544394 : root->outer_params = NULL;
714 544394 : root->planner_cxt = CurrentMemoryContext;
715 544394 : root->init_plans = NIL;
716 544394 : root->cte_plan_ids = NIL;
717 544394 : root->multiexpr_params = NIL;
718 544394 : root->join_domains = NIL;
719 544394 : root->eq_classes = NIL;
720 544394 : root->ec_merging_done = false;
721 544394 : root->last_rinfo_serial = 0;
722 544394 : root->all_result_relids =
723 544394 : parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
724 544394 : root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
725 544394 : root->append_rel_list = NIL;
726 544394 : root->row_identity_vars = NIL;
727 544394 : root->rowMarks = NIL;
728 544394 : memset(root->upper_rels, 0, sizeof(root->upper_rels));
729 544394 : memset(root->upper_targets, 0, sizeof(root->upper_targets));
730 544394 : root->processed_groupClause = NIL;
731 544394 : root->processed_distinctClause = NIL;
732 544394 : root->processed_tlist = NIL;
733 544394 : root->update_colnos = NIL;
734 544394 : root->grouping_map = NULL;
735 544394 : root->minmax_aggs = NIL;
736 544394 : root->qual_security_level = 0;
737 544394 : root->hasPseudoConstantQuals = false;
738 544394 : root->hasAlternativeSubPlans = false;
739 544394 : root->placeholdersFrozen = false;
740 544394 : root->hasRecursion = hasRecursion;
741 544394 : root->assumeReplanning = false;
742 544394 : if (hasRecursion)
743 942 : root->wt_param_id = assign_special_exec_param(root);
744 : else
745 543452 : root->wt_param_id = -1;
746 544394 : root->non_recursive_path = NULL;
747 :
748 : /*
749 : * Create the top-level join domain. This won't have valid contents until
750 : * deconstruct_jointree fills it in, but the node needs to exist before
751 : * that so we can build EquivalenceClasses referencing it.
752 : */
753 544394 : root->join_domains = list_make1(makeNode(JoinDomain));
754 :
755 : /*
756 : * If there is a WITH list, process each WITH query and either convert it
757 : * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
758 : */
759 544394 : if (parse->cteList)
760 2926 : SS_process_ctes(root);
761 :
762 : /*
763 : * If it's a MERGE command, transform the joinlist as appropriate.
764 : */
765 544388 : transform_MERGE_to_join(parse);
766 :
767 : /*
768 : * Scan the rangetable for relation RTEs and retrieve the necessary
769 : * catalog information for each relation. Using this information, clear
770 : * the inh flag for any relation that has no children, collect not-null
771 : * attribute numbers for any relation that has column not-null
772 : * constraints, and expand virtual generated columns for any relation that
773 : * contains them. Note that this step does not descend into sublinks and
774 : * subqueries; if we pull up any sublinks or subqueries below, their
775 : * relation RTEs are processed just before pulling them up.
776 : */
777 544388 : parse = root->parse = preprocess_relation_rtes(root);
778 :
779 : /*
780 : * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
781 : * that we don't need so many special cases to deal with that situation.
782 : */
783 544388 : replace_empty_jointree(parse);
784 :
785 : /*
786 : * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
787 : * to transform them into joins. Note that this step does not descend
788 : * into subqueries; if we pull up any subqueries below, their SubLinks are
789 : * processed just before pulling them up.
790 : */
791 544388 : if (parse->hasSubLinks)
792 36882 : pull_up_sublinks(root);
793 :
794 : /*
795 : * Scan the rangetable for function RTEs, do const-simplification on them,
796 : * and then inline them if possible (producing subqueries that might get
797 : * pulled up next). Recursion issues here are handled in the same way as
798 : * for SubLinks.
799 : */
800 544388 : preprocess_function_rtes(root);
801 :
802 : /*
803 : * Check to see if any subqueries in the jointree can be merged into this
804 : * query.
805 : */
806 544382 : pull_up_subqueries(root);
807 :
808 : /*
809 : * If this is a simple UNION ALL query, flatten it into an appendrel. We
810 : * do this now because it requires applying pull_up_subqueries to the leaf
811 : * queries of the UNION ALL, which weren't touched above because they
812 : * weren't referenced by the jointree (they will be after we do this).
813 : */
814 544376 : if (parse->setOperations)
815 7004 : flatten_simple_union_all(root);
816 :
817 : /*
818 : * Survey the rangetable to see what kinds of entries are present. We can
819 : * skip some later processing if relevant SQL features are not used; for
820 : * example if there are no JOIN RTEs we can avoid the expense of doing
821 : * flatten_join_alias_vars(). This must be done after we have finished
822 : * adding rangetable entries, of course. (Note: actually, processing of
823 : * inherited or partitioned rels can cause RTEs for their child tables to
824 : * get added later; but those must all be RTE_RELATION entries, so they
825 : * don't invalidate the conclusions drawn here.)
826 : */
827 544376 : root->hasJoinRTEs = false;
828 544376 : root->hasLateralRTEs = false;
829 544376 : root->group_rtindex = 0;
830 544376 : hasOuterJoins = false;
831 544376 : hasResultRTEs = false;
832 1483218 : foreach(l, parse->rtable)
833 : {
834 938842 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
835 :
836 938842 : switch (rte->rtekind)
837 : {
838 95008 : case RTE_JOIN:
839 95008 : root->hasJoinRTEs = true;
840 95008 : if (IS_OUTER_JOIN(rte->jointype))
841 49030 : hasOuterJoins = true;
842 95008 : break;
843 202594 : case RTE_RESULT:
844 202594 : hasResultRTEs = true;
845 202594 : break;
846 4930 : case RTE_GROUP:
847 : Assert(parse->hasGroupRTE);
848 4930 : root->group_rtindex = list_cell_number(parse->rtable, l) + 1;
849 4930 : break;
850 636310 : default:
851 : /* No work here for other RTE types */
852 636310 : break;
853 : }
854 :
855 938842 : if (rte->lateral)
856 11046 : root->hasLateralRTEs = true;
857 :
858 : /*
859 : * We can also determine the maximum security level required for any
860 : * securityQuals now. Addition of inheritance-child RTEs won't affect
861 : * this, because child tables don't have their own securityQuals; see
862 : * expand_single_inheritance_child().
863 : */
864 938842 : if (rte->securityQuals)
865 2784 : root->qual_security_level = Max(root->qual_security_level,
866 : list_length(rte->securityQuals));
867 : }
868 :
869 : /*
870 : * If we have now verified that the query target relation is
871 : * non-inheriting, mark it as a leaf target.
872 : */
873 544376 : if (parse->resultRelation)
874 : {
875 89636 : RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
876 :
877 89636 : if (!rte->inh)
878 86702 : root->leaf_result_relids =
879 86702 : bms_make_singleton(parse->resultRelation);
880 : }
881 :
882 : /*
883 : * This would be a convenient time to check access permissions for all
884 : * relations mentioned in the query, since it would be better to fail now,
885 : * before doing any detailed planning. However, for historical reasons,
886 : * we leave this to be done at executor startup.
887 : *
888 : * Note, however, that we do need to check access permissions for any view
889 : * relations mentioned in the query, in order to prevent information being
890 : * leaked by selectivity estimation functions, which only check view owner
891 : * permissions on underlying tables (see all_rows_selectable() and its
892 : * callers). This is a little ugly, because it means that access
893 : * permissions for views will be checked twice, which is another reason
894 : * why it would be better to do all the ACL checks here.
895 : */
896 1482094 : foreach(l, parse->rtable)
897 : {
898 938106 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
899 :
900 938106 : if (rte->perminfoindex != 0 &&
901 502110 : rte->relkind == RELKIND_VIEW)
902 : {
903 : RTEPermissionInfo *perminfo;
904 : bool result;
905 :
906 21198 : perminfo = getRTEPermissionInfo(parse->rteperminfos, rte);
907 21198 : result = ExecCheckOneRelPerms(perminfo);
908 21198 : if (!result)
909 388 : aclcheck_error(ACLCHECK_NO_PRIV, OBJECT_VIEW,
910 388 : get_rel_name(perminfo->relid));
911 : }
912 : }
913 :
914 : /*
915 : * Preprocess RowMark information. We need to do this after subquery
916 : * pullup, so that all base relations are present.
917 : */
918 543988 : preprocess_rowmarks(root);
919 :
920 : /*
921 : * Set hasHavingQual to remember if HAVING clause is present. Needed
922 : * because preprocess_expression will reduce a constant-true condition to
923 : * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
924 : */
925 543988 : root->hasHavingQual = (parse->havingQual != NULL);
926 :
927 : /*
928 : * Do expression preprocessing on targetlist and quals, as well as other
929 : * random expressions in the querytree. Note that we do not need to
930 : * handle sort/group expressions explicitly, because they are actually
931 : * part of the targetlist.
932 : */
933 540018 : parse->targetList = (List *)
934 543988 : preprocess_expression(root, (Node *) parse->targetList,
935 : EXPRKIND_TARGET);
936 :
937 540018 : newWithCheckOptions = NIL;
938 542956 : foreach(l, parse->withCheckOptions)
939 : {
940 2938 : WithCheckOption *wco = lfirst_node(WithCheckOption, l);
941 :
942 2938 : wco->qual = preprocess_expression(root, wco->qual,
943 : EXPRKIND_QUAL);
944 2938 : if (wco->qual != NULL)
945 2538 : newWithCheckOptions = lappend(newWithCheckOptions, wco);
946 : }
947 540018 : parse->withCheckOptions = newWithCheckOptions;
948 :
949 540018 : parse->returningList = (List *)
950 540018 : preprocess_expression(root, (Node *) parse->returningList,
951 : EXPRKIND_TARGET);
952 :
953 540018 : preprocess_qual_conditions(root, (Node *) parse->jointree);
954 :
955 540018 : parse->havingQual = preprocess_expression(root, parse->havingQual,
956 : EXPRKIND_QUAL);
957 :
958 542852 : foreach(l, parse->windowClause)
959 : {
960 2834 : WindowClause *wc = lfirst_node(WindowClause, l);
961 :
962 : /* partitionClause/orderClause are sort/group expressions */
963 2834 : wc->startOffset = preprocess_expression(root, wc->startOffset,
964 : EXPRKIND_LIMIT);
965 2834 : wc->endOffset = preprocess_expression(root, wc->endOffset,
966 : EXPRKIND_LIMIT);
967 : }
968 :
969 540018 : parse->limitOffset = preprocess_expression(root, parse->limitOffset,
970 : EXPRKIND_LIMIT);
971 540018 : parse->limitCount = preprocess_expression(root, parse->limitCount,
972 : EXPRKIND_LIMIT);
973 :
974 540018 : if (parse->onConflict)
975 : {
976 3732 : parse->onConflict->arbiterElems = (List *)
977 1866 : preprocess_expression(root,
978 1866 : (Node *) parse->onConflict->arbiterElems,
979 : EXPRKIND_ARBITER_ELEM);
980 3732 : parse->onConflict->arbiterWhere =
981 1866 : preprocess_expression(root,
982 1866 : parse->onConflict->arbiterWhere,
983 : EXPRKIND_QUAL);
984 3732 : parse->onConflict->onConflictSet = (List *)
985 1866 : preprocess_expression(root,
986 1866 : (Node *) parse->onConflict->onConflictSet,
987 : EXPRKIND_TARGET);
988 1866 : parse->onConflict->onConflictWhere =
989 1866 : preprocess_expression(root,
990 1866 : parse->onConflict->onConflictWhere,
991 : EXPRKIND_QUAL);
992 : /* exclRelTlist contains only Vars, so no preprocessing needed */
993 : }
994 :
995 542886 : foreach(l, parse->mergeActionList)
996 : {
997 2868 : MergeAction *action = (MergeAction *) lfirst(l);
998 :
999 2868 : action->targetList = (List *)
1000 2868 : preprocess_expression(root,
1001 2868 : (Node *) action->targetList,
1002 : EXPRKIND_TARGET);
1003 2868 : action->qual =
1004 2868 : preprocess_expression(root,
1005 : (Node *) action->qual,
1006 : EXPRKIND_QUAL);
1007 : }
1008 :
1009 540018 : parse->mergeJoinCondition =
1010 540018 : preprocess_expression(root, parse->mergeJoinCondition, EXPRKIND_QUAL);
1011 :
1012 540018 : root->append_rel_list = (List *)
1013 540018 : preprocess_expression(root, (Node *) root->append_rel_list,
1014 : EXPRKIND_APPINFO);
1015 :
1016 : /* Also need to preprocess expressions within RTEs */
1017 1473170 : foreach(l, parse->rtable)
1018 : {
1019 933152 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
1020 : int kind;
1021 : ListCell *lcsq;
1022 :
1023 933152 : if (rte->rtekind == RTE_RELATION)
1024 : {
1025 485106 : if (rte->tablesample)
1026 228 : rte->tablesample = (TableSampleClause *)
1027 228 : preprocess_expression(root,
1028 228 : (Node *) rte->tablesample,
1029 : EXPRKIND_TABLESAMPLE);
1030 : }
1031 448046 : else if (rte->rtekind == RTE_SUBQUERY)
1032 : {
1033 : /*
1034 : * We don't want to do all preprocessing yet on the subquery's
1035 : * expressions, since that will happen when we plan it. But if it
1036 : * contains any join aliases of our level, those have to get
1037 : * expanded now, because planning of the subquery won't do it.
1038 : * That's only possible if the subquery is LATERAL.
1039 : */
1040 82280 : if (rte->lateral && root->hasJoinRTEs)
1041 1452 : rte->subquery = (Query *)
1042 1452 : flatten_join_alias_vars(root, root->parse,
1043 1452 : (Node *) rte->subquery);
1044 : }
1045 365766 : else if (rte->rtekind == RTE_FUNCTION)
1046 : {
1047 : /* Preprocess the function expression(s) fully */
1048 52558 : kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
1049 52558 : rte->functions = (List *)
1050 52558 : preprocess_expression(root, (Node *) rte->functions, kind);
1051 : }
1052 313208 : else if (rte->rtekind == RTE_TABLEFUNC)
1053 : {
1054 : /* Preprocess the function expression(s) fully */
1055 626 : kind = rte->lateral ? EXPRKIND_TABLEFUNC_LATERAL : EXPRKIND_TABLEFUNC;
1056 626 : rte->tablefunc = (TableFunc *)
1057 626 : preprocess_expression(root, (Node *) rte->tablefunc, kind);
1058 : }
1059 312582 : else if (rte->rtekind == RTE_VALUES)
1060 : {
1061 : /* Preprocess the values lists fully */
1062 8514 : kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
1063 8514 : rte->values_lists = (List *)
1064 8514 : preprocess_expression(root, (Node *) rte->values_lists, kind);
1065 : }
1066 304068 : else if (rte->rtekind == RTE_GROUP)
1067 : {
1068 : /* Preprocess the groupexprs list fully */
1069 4930 : rte->groupexprs = (List *)
1070 4930 : preprocess_expression(root, (Node *) rte->groupexprs,
1071 : EXPRKIND_GROUPEXPR);
1072 : }
1073 :
1074 : /*
1075 : * Process each element of the securityQuals list as if it were a
1076 : * separate qual expression (as indeed it is). We need to do it this
1077 : * way to get proper canonicalization of AND/OR structure. Note that
1078 : * this converts each element into an implicit-AND sublist.
1079 : */
1080 936330 : foreach(lcsq, rte->securityQuals)
1081 : {
1082 3178 : lfirst(lcsq) = preprocess_expression(root,
1083 3178 : (Node *) lfirst(lcsq),
1084 : EXPRKIND_QUAL);
1085 : }
1086 : }
1087 :
1088 : /*
1089 : * Now that we are done preprocessing expressions, and in particular done
1090 : * flattening join alias variables, get rid of the joinaliasvars lists.
1091 : * They no longer match what expressions in the rest of the tree look
1092 : * like, because we have not preprocessed expressions in those lists (and
1093 : * do not want to; for example, expanding a SubLink there would result in
1094 : * a useless unreferenced subplan). Leaving them in place simply creates
1095 : * a hazard for later scans of the tree. We could try to prevent that by
1096 : * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
1097 : * but that doesn't sound very reliable.
1098 : */
1099 540018 : if (root->hasJoinRTEs)
1100 : {
1101 329846 : foreach(l, parse->rtable)
1102 : {
1103 271362 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
1104 :
1105 271362 : rte->joinaliasvars = NIL;
1106 : }
1107 : }
1108 :
1109 : /*
1110 : * Replace any Vars in the subquery's targetlist and havingQual that
1111 : * reference GROUP outputs with the underlying grouping expressions.
1112 : *
1113 : * Note that we need to perform this replacement after we've preprocessed
1114 : * the grouping expressions. This is to ensure that there is only one
1115 : * instance of SubPlan for each SubLink contained within the grouping
1116 : * expressions.
1117 : */
1118 540018 : if (parse->hasGroupRTE)
1119 : {
1120 4930 : parse->targetList = (List *)
1121 4930 : flatten_group_exprs(root, root->parse, (Node *) parse->targetList);
1122 4930 : parse->havingQual =
1123 4930 : flatten_group_exprs(root, root->parse, parse->havingQual);
1124 : }
1125 :
1126 : /* Constant-folding might have removed all set-returning functions */
1127 540018 : if (parse->hasTargetSRFs)
1128 12030 : parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
1129 :
1130 : /*
1131 : * If we have grouping sets, expand the groupingSets tree of this query to
1132 : * a flat list of grouping sets. We need to do this before optimizing
1133 : * HAVING, since we can't easily tell if there's an empty grouping set
1134 : * until we have this representation.
1135 : */
1136 540018 : if (parse->groupingSets)
1137 : {
1138 980 : parse->groupingSets =
1139 980 : expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
1140 : }
1141 :
1142 : /*
1143 : * In some cases we may want to transfer a HAVING clause into WHERE. We
1144 : * cannot do so if the HAVING clause contains aggregates (obviously) or
1145 : * volatile functions (since a HAVING clause is supposed to be executed
1146 : * only once per group). We also can't do this if there are any grouping
1147 : * sets and the clause references any columns that are nullable by the
1148 : * grouping sets; the nulled values of those columns are not available
1149 : * before the grouping step. (The test on groupClause might seem wrong,
1150 : * but it's okay: it's just an optimization to avoid running pull_varnos
1151 : * when there cannot be any Vars in the HAVING clause.)
1152 : *
1153 : * Also, it may be that the clause is so expensive to execute that we're
1154 : * better off doing it only once per group, despite the loss of
1155 : * selectivity. This is hard to estimate short of doing the entire
1156 : * planning process twice, so we use a heuristic: clauses containing
1157 : * subplans are left in HAVING. Otherwise, we move or copy the HAVING
1158 : * clause into WHERE, in hopes of eliminating tuples before aggregation
1159 : * instead of after.
1160 : *
1161 : * If the query has no empty grouping set then we can simply move such a
1162 : * clause into WHERE; any group that fails the clause will not be in the
1163 : * output because none of its tuples will reach the grouping or
1164 : * aggregation stage. Otherwise we have to keep the clause in HAVING to
1165 : * ensure that we don't emit a bogus aggregated row. But then the HAVING
1166 : * clause must be degenerate (variable-free), so we can copy it into WHERE
1167 : * so that query_planner() can use it in a gating Result node. (This could
1168 : * be done better, but it seems not worth optimizing.)
1169 : *
1170 : * Note that a HAVING clause may contain expressions that are not fully
1171 : * preprocessed. This can happen if these expressions are part of
1172 : * grouping items. In such cases, they are replaced with GROUP Vars in
1173 : * the parser and then replaced back after we're done with expression
1174 : * preprocessing on havingQual. This is not an issue if the clause
1175 : * remains in HAVING, because these expressions will be matched to lower
1176 : * target items in setrefs.c. However, if the clause is moved or copied
1177 : * into WHERE, we need to ensure that these expressions are fully
1178 : * preprocessed.
1179 : *
1180 : * Note that both havingQual and parse->jointree->quals are in
1181 : * implicitly-ANDed-list form at this point, even though they are declared
1182 : * as Node *.
1183 : */
1184 540018 : newHaving = NIL;
1185 541388 : foreach(l, (List *) parse->havingQual)
1186 : {
1187 1370 : Node *havingclause = (Node *) lfirst(l);
1188 :
1189 1868 : if (contain_agg_clause(havingclause) ||
1190 996 : contain_volatile_functions(havingclause) ||
1191 498 : contain_subplans(havingclause) ||
1192 618 : (parse->groupClause && parse->groupingSets &&
1193 120 : bms_is_member(root->group_rtindex, pull_varnos(root, havingclause))))
1194 : {
1195 : /* keep it in HAVING */
1196 944 : newHaving = lappend(newHaving, havingclause);
1197 : }
1198 426 : else if (parse->groupClause &&
1199 390 : (parse->groupingSets == NIL ||
1200 48 : (List *) linitial(parse->groupingSets) != NIL))
1201 378 : {
1202 : /* There is GROUP BY, but no empty grouping set */
1203 : Node *whereclause;
1204 :
1205 : /* Preprocess the HAVING clause fully */
1206 378 : whereclause = preprocess_expression(root, havingclause,
1207 : EXPRKIND_QUAL);
1208 : /* ... and move it to WHERE */
1209 378 : parse->jointree->quals = (Node *)
1210 378 : list_concat((List *) parse->jointree->quals,
1211 : (List *) whereclause);
1212 : }
1213 : else
1214 : {
1215 : /* There is an empty grouping set (perhaps implicitly) */
1216 : Node *whereclause;
1217 :
1218 : /* Preprocess the HAVING clause fully */
1219 48 : whereclause = preprocess_expression(root, copyObject(havingclause),
1220 : EXPRKIND_QUAL);
1221 : /* ... and put a copy in WHERE */
1222 96 : parse->jointree->quals = (Node *)
1223 48 : list_concat((List *) parse->jointree->quals,
1224 : (List *) whereclause);
1225 : /* ... and also keep it in HAVING */
1226 48 : newHaving = lappend(newHaving, havingclause);
1227 : }
1228 : }
1229 540018 : parse->havingQual = (Node *) newHaving;
1230 :
1231 : /*
1232 : * If we have any outer joins, try to reduce them to plain inner joins.
1233 : * This step is most easily done after we've done expression
1234 : * preprocessing.
1235 : */
1236 540018 : if (hasOuterJoins)
1237 34352 : reduce_outer_joins(root);
1238 :
1239 : /*
1240 : * If we have any RTE_RESULT relations, see if they can be deleted from
1241 : * the jointree. We also rely on this processing to flatten single-child
1242 : * FromExprs underneath outer joins. This step is most effectively done
1243 : * after we've done expression preprocessing and outer join reduction.
1244 : */
1245 540018 : if (hasResultRTEs || hasOuterJoins)
1246 231422 : remove_useless_result_rtes(root);
1247 :
1248 : /*
1249 : * Do the main planning.
1250 : */
1251 540018 : grouping_planner(root, tuple_fraction, setops);
1252 :
1253 : /*
1254 : * Capture the set of outer-level param IDs we have access to, for use in
1255 : * extParam/allParam calculations later.
1256 : */
1257 539946 : SS_identify_outer_params(root);
1258 :
1259 : /*
1260 : * If any initPlans were created in this query level, adjust the surviving
1261 : * Paths' costs and parallel-safety flags to account for them. The
1262 : * initPlans won't actually get attached to the plan tree till
1263 : * create_plan() runs, but we must include their effects now.
1264 : */
1265 539946 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1266 539946 : SS_charge_for_initplans(root, final_rel);
1267 :
1268 : /*
1269 : * Make sure we've identified the cheapest Path for the final rel. (By
1270 : * doing this here not in grouping_planner, we include initPlan costs in
1271 : * the decision, though it's unlikely that will change anything.)
1272 : */
1273 539946 : set_cheapest(final_rel);
1274 :
1275 539946 : return root;
1276 : }
1277 :
1278 : /*
1279 : * preprocess_expression
1280 : * Do subquery_planner's preprocessing work for an expression,
1281 : * which can be a targetlist, a WHERE clause (including JOIN/ON
1282 : * conditions), a HAVING clause, or a few other things.
1283 : */
1284 : static Node *
1285 4534670 : preprocess_expression(PlannerInfo *root, Node *expr, int kind)
1286 : {
1287 : /*
1288 : * Fall out quickly if expression is empty. This occurs often enough to
1289 : * be worth checking. Note that null->null is the correct conversion for
1290 : * implicit-AND result format, too.
1291 : */
1292 4534670 : if (expr == NULL)
1293 3576652 : return NULL;
1294 :
1295 : /*
1296 : * If the query has any join RTEs, replace join alias variables with
1297 : * base-relation variables. We must do this first, since any expressions
1298 : * we may extract from the joinaliasvars lists have not been preprocessed.
1299 : * For example, if we did this after sublink processing, sublinks expanded
1300 : * out from join aliases would not get processed. But we can skip this in
1301 : * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1302 : * they can't contain any Vars of the current query level.
1303 : */
1304 958018 : if (root->hasJoinRTEs &&
1305 419502 : !(kind == EXPRKIND_RTFUNC ||
1306 209574 : kind == EXPRKIND_VALUES ||
1307 : kind == EXPRKIND_TABLESAMPLE ||
1308 : kind == EXPRKIND_TABLEFUNC))
1309 209556 : expr = flatten_join_alias_vars(root, root->parse, expr);
1310 :
1311 : /*
1312 : * Simplify constant expressions. For function RTEs, this was already
1313 : * done by preprocess_function_rtes. (But note we must do it again for
1314 : * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1315 : * un-simplified subexpressions inserted by flattening of subqueries or
1316 : * join alias variables.)
1317 : *
1318 : * Note: an essential effect of this is to convert named-argument function
1319 : * calls to positional notation and insert the current actual values of
1320 : * any default arguments for functions. To ensure that happens, we *must*
1321 : * process all expressions here. Previous PG versions sometimes skipped
1322 : * const-simplification if it didn't seem worth the trouble, but we can't
1323 : * do that anymore.
1324 : *
1325 : * Note: this also flattens nested AND and OR expressions into N-argument
1326 : * form. All processing of a qual expression after this point must be
1327 : * careful to maintain AND/OR flatness --- that is, do not generate a tree
1328 : * with AND directly under AND, nor OR directly under OR.
1329 : */
1330 958018 : if (kind != EXPRKIND_RTFUNC)
1331 914118 : expr = eval_const_expressions(root, expr);
1332 :
1333 : /*
1334 : * If it's a qual or havingQual, canonicalize it.
1335 : */
1336 954048 : if (kind == EXPRKIND_QUAL)
1337 : {
1338 345714 : expr = (Node *) canonicalize_qual((Expr *) expr, false);
1339 :
1340 : #ifdef OPTIMIZER_DEBUG
1341 : printf("After canonicalize_qual()\n");
1342 : pprint(expr);
1343 : #endif
1344 : }
1345 :
1346 : /*
1347 : * Check for ANY ScalarArrayOpExpr with Const arrays and set the
1348 : * hashfuncid of any that might execute more quickly by using hash lookups
1349 : * instead of a linear search.
1350 : */
1351 954048 : if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1352 : {
1353 872942 : convert_saop_to_hashed_saop(expr);
1354 : }
1355 :
1356 : /* Expand SubLinks to SubPlans */
1357 954048 : if (root->parse->hasSubLinks)
1358 106490 : expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1359 :
1360 : /*
1361 : * XXX do not insert anything here unless you have grokked the comments in
1362 : * SS_replace_correlation_vars ...
1363 : */
1364 :
1365 : /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1366 954048 : if (root->query_level > 1)
1367 187840 : expr = SS_replace_correlation_vars(root, expr);
1368 :
1369 : /*
1370 : * If it's a qual or havingQual, convert it to implicit-AND format. (We
1371 : * don't want to do this before eval_const_expressions, since the latter
1372 : * would be unable to simplify a top-level AND correctly. Also,
1373 : * SS_process_sublinks expects explicit-AND format.)
1374 : */
1375 954048 : if (kind == EXPRKIND_QUAL)
1376 345714 : expr = (Node *) make_ands_implicit((Expr *) expr);
1377 :
1378 954048 : return expr;
1379 : }
1380 :
1381 : /*
1382 : * preprocess_qual_conditions
1383 : * Recursively scan the query's jointree and do subquery_planner's
1384 : * preprocessing work on each qual condition found therein.
1385 : */
1386 : static void
1387 1345946 : preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
1388 : {
1389 1345946 : if (jtnode == NULL)
1390 0 : return;
1391 1345946 : if (IsA(jtnode, RangeTblRef))
1392 : {
1393 : /* nothing to do here */
1394 : }
1395 658218 : else if (IsA(jtnode, FromExpr))
1396 : {
1397 555388 : FromExpr *f = (FromExpr *) jtnode;
1398 : ListCell *l;
1399 :
1400 1155656 : foreach(l, f->fromlist)
1401 600268 : preprocess_qual_conditions(root, lfirst(l));
1402 :
1403 555388 : f->quals = preprocess_expression(root, f->quals, EXPRKIND_QUAL);
1404 : }
1405 102830 : else if (IsA(jtnode, JoinExpr))
1406 : {
1407 102830 : JoinExpr *j = (JoinExpr *) jtnode;
1408 :
1409 102830 : preprocess_qual_conditions(root, j->larg);
1410 102830 : preprocess_qual_conditions(root, j->rarg);
1411 :
1412 102830 : j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1413 : }
1414 : else
1415 0 : elog(ERROR, "unrecognized node type: %d",
1416 : (int) nodeTag(jtnode));
1417 : }
1418 :
1419 : /*
1420 : * preprocess_phv_expression
1421 : * Do preprocessing on a PlaceHolderVar expression that's been pulled up.
1422 : *
1423 : * If a LATERAL subquery references an output of another subquery, and that
1424 : * output must be wrapped in a PlaceHolderVar because of an intermediate outer
1425 : * join, then we'll push the PlaceHolderVar expression down into the subquery
1426 : * and later pull it back up during find_lateral_references, which runs after
1427 : * subquery_planner has preprocessed all the expressions that were in the
1428 : * current query level to start with. So we need to preprocess it then.
1429 : */
1430 : Expr *
1431 90 : preprocess_phv_expression(PlannerInfo *root, Expr *expr)
1432 : {
1433 90 : return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1434 : }
1435 :
1436 : /*--------------------
1437 : * grouping_planner
1438 : * Perform planning steps related to grouping, aggregation, etc.
1439 : *
1440 : * This function adds all required top-level processing to the scan/join
1441 : * Path(s) produced by query_planner.
1442 : *
1443 : * tuple_fraction is the fraction of tuples we expect will be retrieved.
1444 : * tuple_fraction is interpreted as follows:
1445 : * 0: expect all tuples to be retrieved (normal case)
1446 : * 0 < tuple_fraction < 1: expect the given fraction of tuples available
1447 : * from the plan to be retrieved
1448 : * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
1449 : * expected to be retrieved (ie, a LIMIT specification).
1450 : * setops is used for set operation subqueries to provide the subquery with
1451 : * the context in which it's being used so that Paths correctly sorted for the
1452 : * set operation can be generated. NULL when not planning a set operation
1453 : * child, or when a child of a set op that isn't interested in sorted input.
1454 : *
1455 : * Returns nothing; the useful output is in the Paths we attach to the
1456 : * (UPPERREL_FINAL, NULL) upperrel in *root. In addition,
1457 : * root->processed_tlist contains the final processed targetlist.
1458 : *
1459 : * Note that we have not done set_cheapest() on the final rel; it's convenient
1460 : * to leave this to the caller.
1461 : *--------------------
1462 : */
1463 : static void
1464 540018 : grouping_planner(PlannerInfo *root, double tuple_fraction,
1465 : SetOperationStmt *setops)
1466 : {
1467 540018 : Query *parse = root->parse;
1468 540018 : int64 offset_est = 0;
1469 540018 : int64 count_est = 0;
1470 540018 : double limit_tuples = -1.0;
1471 540018 : bool have_postponed_srfs = false;
1472 : PathTarget *final_target;
1473 : List *final_targets;
1474 : List *final_targets_contain_srfs;
1475 : bool final_target_parallel_safe;
1476 : RelOptInfo *current_rel;
1477 : RelOptInfo *final_rel;
1478 : FinalPathExtraData extra;
1479 : ListCell *lc;
1480 :
1481 : /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1482 540018 : if (parse->limitCount || parse->limitOffset)
1483 : {
1484 5068 : tuple_fraction = preprocess_limit(root, tuple_fraction,
1485 : &offset_est, &count_est);
1486 :
1487 : /*
1488 : * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1489 : * estimate the effects of using a bounded sort.
1490 : */
1491 5068 : if (count_est > 0 && offset_est >= 0)
1492 4516 : limit_tuples = (double) count_est + (double) offset_est;
1493 : }
1494 :
1495 : /* Make tuple_fraction accessible to lower-level routines */
1496 540018 : root->tuple_fraction = tuple_fraction;
1497 :
1498 540018 : if (parse->setOperations)
1499 : {
1500 : /*
1501 : * Construct Paths for set operations. The results will not need any
1502 : * work except perhaps a top-level sort and/or LIMIT. Note that any
1503 : * special work for recursive unions is the responsibility of
1504 : * plan_set_operations.
1505 : */
1506 6212 : current_rel = plan_set_operations(root);
1507 :
1508 : /*
1509 : * We should not need to call preprocess_targetlist, since we must be
1510 : * in a SELECT query node. Instead, use the processed_tlist returned
1511 : * by plan_set_operations (since this tells whether it returned any
1512 : * resjunk columns!), and transfer any sort key information from the
1513 : * original tlist.
1514 : */
1515 : Assert(parse->commandType == CMD_SELECT);
1516 :
1517 : /* for safety, copy processed_tlist instead of modifying in-place */
1518 6206 : root->processed_tlist =
1519 6206 : postprocess_setop_tlist(copyObject(root->processed_tlist),
1520 : parse->targetList);
1521 :
1522 : /* Also extract the PathTarget form of the setop result tlist */
1523 6206 : final_target = current_rel->cheapest_total_path->pathtarget;
1524 :
1525 : /* And check whether it's parallel safe */
1526 : final_target_parallel_safe =
1527 6206 : is_parallel_safe(root, (Node *) final_target->exprs);
1528 :
1529 : /* The setop result tlist couldn't contain any SRFs */
1530 : Assert(!parse->hasTargetSRFs);
1531 6206 : final_targets = final_targets_contain_srfs = NIL;
1532 :
1533 : /*
1534 : * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1535 : * checked already, but let's make sure).
1536 : */
1537 6206 : if (parse->rowMarks)
1538 0 : ereport(ERROR,
1539 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1540 : /*------
1541 : translator: %s is a SQL row locking clause such as FOR UPDATE */
1542 : errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1543 : LCS_asString(linitial_node(RowMarkClause,
1544 : parse->rowMarks)->strength))));
1545 :
1546 : /*
1547 : * Calculate pathkeys that represent result ordering requirements
1548 : */
1549 : Assert(parse->distinctClause == NIL);
1550 6206 : root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1551 : parse->sortClause,
1552 : root->processed_tlist);
1553 : }
1554 : else
1555 : {
1556 : /* No set operations, do regular planning */
1557 : PathTarget *sort_input_target;
1558 : List *sort_input_targets;
1559 : List *sort_input_targets_contain_srfs;
1560 : bool sort_input_target_parallel_safe;
1561 : PathTarget *grouping_target;
1562 : List *grouping_targets;
1563 : List *grouping_targets_contain_srfs;
1564 : bool grouping_target_parallel_safe;
1565 : PathTarget *scanjoin_target;
1566 : List *scanjoin_targets;
1567 : List *scanjoin_targets_contain_srfs;
1568 : bool scanjoin_target_parallel_safe;
1569 : bool scanjoin_target_same_exprs;
1570 : bool have_grouping;
1571 533806 : WindowFuncLists *wflists = NULL;
1572 533806 : List *activeWindows = NIL;
1573 533806 : grouping_sets_data *gset_data = NULL;
1574 : standard_qp_extra qp_extra;
1575 :
1576 : /* A recursive query should always have setOperations */
1577 : Assert(!root->hasRecursion);
1578 :
1579 : /* Preprocess grouping sets and GROUP BY clause, if any */
1580 533806 : if (parse->groupingSets)
1581 : {
1582 980 : gset_data = preprocess_grouping_sets(root);
1583 : }
1584 532826 : else if (parse->groupClause)
1585 : {
1586 : /* Preprocess regular GROUP BY clause, if any */
1587 4028 : root->processed_groupClause = preprocess_groupclause(root, NIL);
1588 : }
1589 :
1590 : /*
1591 : * Preprocess targetlist. Note that much of the remaining planning
1592 : * work will be done with the PathTarget representation of tlists, but
1593 : * we must also maintain the full representation of the final tlist so
1594 : * that we can transfer its decoration (resnames etc) to the topmost
1595 : * tlist of the finished Plan. This is kept in processed_tlist.
1596 : */
1597 533800 : preprocess_targetlist(root);
1598 :
1599 : /*
1600 : * Mark all the aggregates with resolved aggtranstypes, and detect
1601 : * aggregates that are duplicates or can share transition state. We
1602 : * must do this before slicing and dicing the tlist into various
1603 : * pathtargets, else some copies of the Aggref nodes might escape
1604 : * being marked.
1605 : */
1606 533800 : if (parse->hasAggs)
1607 : {
1608 44664 : preprocess_aggrefs(root, (Node *) root->processed_tlist);
1609 44664 : preprocess_aggrefs(root, (Node *) parse->havingQual);
1610 : }
1611 :
1612 : /*
1613 : * Locate any window functions in the tlist. (We don't need to look
1614 : * anywhere else, since expressions used in ORDER BY will be in there
1615 : * too.) Note that they could all have been eliminated by constant
1616 : * folding, in which case we don't need to do any more work.
1617 : */
1618 533800 : if (parse->hasWindowFuncs)
1619 : {
1620 2582 : wflists = find_window_functions((Node *) root->processed_tlist,
1621 2582 : list_length(parse->windowClause));
1622 2582 : if (wflists->numWindowFuncs > 0)
1623 : {
1624 : /*
1625 : * See if any modifications can be made to each WindowClause
1626 : * to allow the executor to execute the WindowFuncs more
1627 : * quickly.
1628 : */
1629 2576 : optimize_window_clauses(root, wflists);
1630 :
1631 : /* Extract the list of windows actually in use. */
1632 2576 : activeWindows = select_active_windows(root, wflists);
1633 :
1634 : /* Make sure they all have names, for EXPLAIN's use. */
1635 2576 : name_active_windows(activeWindows);
1636 : }
1637 : else
1638 6 : parse->hasWindowFuncs = false;
1639 : }
1640 :
1641 : /*
1642 : * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1643 : * adding logic between here and the query_planner() call. Anything
1644 : * that is needed in MIN/MAX-optimizable cases will have to be
1645 : * duplicated in planagg.c.
1646 : */
1647 533800 : if (parse->hasAggs)
1648 44664 : preprocess_minmax_aggregates(root);
1649 :
1650 : /*
1651 : * Figure out whether there's a hard limit on the number of rows that
1652 : * query_planner's result subplan needs to return. Even if we know a
1653 : * hard limit overall, it doesn't apply if the query has any
1654 : * grouping/aggregation operations, or SRFs in the tlist.
1655 : */
1656 533800 : if (parse->groupClause ||
1657 528876 : parse->groupingSets ||
1658 528798 : parse->distinctClause ||
1659 525828 : parse->hasAggs ||
1660 485494 : parse->hasWindowFuncs ||
1661 483062 : parse->hasTargetSRFs ||
1662 471538 : root->hasHavingQual)
1663 62286 : root->limit_tuples = -1.0;
1664 : else
1665 471514 : root->limit_tuples = limit_tuples;
1666 :
1667 : /* Set up data needed by standard_qp_callback */
1668 533800 : qp_extra.activeWindows = activeWindows;
1669 533800 : qp_extra.gset_data = gset_data;
1670 :
1671 : /*
1672 : * If we're a subquery for a set operation, store the SetOperationStmt
1673 : * in qp_extra.
1674 : */
1675 533800 : qp_extra.setop = setops;
1676 :
1677 : /*
1678 : * Generate the best unsorted and presorted paths for the scan/join
1679 : * portion of this Query, ie the processing represented by the
1680 : * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1681 : * We also generate (in standard_qp_callback) pathkey representations
1682 : * of the query's sort clause, distinct clause, etc.
1683 : */
1684 533800 : current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1685 :
1686 : /*
1687 : * Convert the query's result tlist into PathTarget format.
1688 : *
1689 : * Note: this cannot be done before query_planner() has performed
1690 : * appendrel expansion, because that might add resjunk entries to
1691 : * root->processed_tlist. Waiting till afterwards is also helpful
1692 : * because the target width estimates can use per-Var width numbers
1693 : * that were obtained within query_planner().
1694 : */
1695 533746 : final_target = create_pathtarget(root, root->processed_tlist);
1696 : final_target_parallel_safe =
1697 533746 : is_parallel_safe(root, (Node *) final_target->exprs);
1698 :
1699 : /*
1700 : * If ORDER BY was given, consider whether we should use a post-sort
1701 : * projection, and compute the adjusted target for preceding steps if
1702 : * so.
1703 : */
1704 533746 : if (parse->sortClause)
1705 : {
1706 71652 : sort_input_target = make_sort_input_target(root,
1707 : final_target,
1708 : &have_postponed_srfs);
1709 : sort_input_target_parallel_safe =
1710 71652 : is_parallel_safe(root, (Node *) sort_input_target->exprs);
1711 : }
1712 : else
1713 : {
1714 462094 : sort_input_target = final_target;
1715 462094 : sort_input_target_parallel_safe = final_target_parallel_safe;
1716 : }
1717 :
1718 : /*
1719 : * If we have window functions to deal with, the output from any
1720 : * grouping step needs to be what the window functions want;
1721 : * otherwise, it should be sort_input_target.
1722 : */
1723 533746 : if (activeWindows)
1724 : {
1725 2576 : grouping_target = make_window_input_target(root,
1726 : final_target,
1727 : activeWindows);
1728 : grouping_target_parallel_safe =
1729 2576 : is_parallel_safe(root, (Node *) grouping_target->exprs);
1730 : }
1731 : else
1732 : {
1733 531170 : grouping_target = sort_input_target;
1734 531170 : grouping_target_parallel_safe = sort_input_target_parallel_safe;
1735 : }
1736 :
1737 : /*
1738 : * If we have grouping or aggregation to do, the topmost scan/join
1739 : * plan node must emit what the grouping step wants; otherwise, it
1740 : * should emit grouping_target.
1741 : */
1742 528822 : have_grouping = (parse->groupClause || parse->groupingSets ||
1743 1062568 : parse->hasAggs || root->hasHavingQual);
1744 533746 : if (have_grouping)
1745 : {
1746 45398 : scanjoin_target = make_group_input_target(root, final_target);
1747 : scanjoin_target_parallel_safe =
1748 45398 : is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1749 : }
1750 : else
1751 : {
1752 488348 : scanjoin_target = grouping_target;
1753 488348 : scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1754 : }
1755 :
1756 : /*
1757 : * If there are any SRFs in the targetlist, we must separate each of
1758 : * these PathTargets into SRF-computing and SRF-free targets. Replace
1759 : * each of the named targets with a SRF-free version, and remember the
1760 : * list of additional projection steps we need to add afterwards.
1761 : */
1762 533746 : if (parse->hasTargetSRFs)
1763 : {
1764 : /* final_target doesn't recompute any SRFs in sort_input_target */
1765 12030 : split_pathtarget_at_srfs(root, final_target, sort_input_target,
1766 : &final_targets,
1767 : &final_targets_contain_srfs);
1768 12030 : final_target = linitial_node(PathTarget, final_targets);
1769 : Assert(!linitial_int(final_targets_contain_srfs));
1770 : /* likewise for sort_input_target vs. grouping_target */
1771 12030 : split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1772 : &sort_input_targets,
1773 : &sort_input_targets_contain_srfs);
1774 12030 : sort_input_target = linitial_node(PathTarget, sort_input_targets);
1775 : Assert(!linitial_int(sort_input_targets_contain_srfs));
1776 : /* likewise for grouping_target vs. scanjoin_target */
1777 12030 : split_pathtarget_at_srfs_grouping(root,
1778 : grouping_target, scanjoin_target,
1779 : &grouping_targets,
1780 : &grouping_targets_contain_srfs);
1781 12030 : grouping_target = linitial_node(PathTarget, grouping_targets);
1782 : Assert(!linitial_int(grouping_targets_contain_srfs));
1783 : /* scanjoin_target will not have any SRFs precomputed for it */
1784 12030 : split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1785 : &scanjoin_targets,
1786 : &scanjoin_targets_contain_srfs);
1787 12030 : scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1788 : Assert(!linitial_int(scanjoin_targets_contain_srfs));
1789 : }
1790 : else
1791 : {
1792 : /* initialize lists; for most of these, dummy values are OK */
1793 521716 : final_targets = final_targets_contain_srfs = NIL;
1794 521716 : sort_input_targets = sort_input_targets_contain_srfs = NIL;
1795 521716 : grouping_targets = grouping_targets_contain_srfs = NIL;
1796 521716 : scanjoin_targets = list_make1(scanjoin_target);
1797 521716 : scanjoin_targets_contain_srfs = NIL;
1798 : }
1799 :
1800 : /* Apply scan/join target. */
1801 533746 : scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1802 533746 : && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1803 533746 : apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1804 : scanjoin_targets_contain_srfs,
1805 : scanjoin_target_parallel_safe,
1806 : scanjoin_target_same_exprs);
1807 :
1808 : /*
1809 : * Save the various upper-rel PathTargets we just computed into
1810 : * root->upper_targets[]. The core code doesn't use this, but it
1811 : * provides a convenient place for extensions to get at the info. For
1812 : * consistency, we save all the intermediate targets, even though some
1813 : * of the corresponding upperrels might not be needed for this query.
1814 : */
1815 533746 : root->upper_targets[UPPERREL_FINAL] = final_target;
1816 533746 : root->upper_targets[UPPERREL_ORDERED] = final_target;
1817 533746 : root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1818 533746 : root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1819 533746 : root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1820 533746 : root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1821 :
1822 : /*
1823 : * If we have grouping and/or aggregation, consider ways to implement
1824 : * that. We build a new upperrel representing the output of this
1825 : * phase.
1826 : */
1827 533746 : if (have_grouping)
1828 : {
1829 45398 : current_rel = create_grouping_paths(root,
1830 : current_rel,
1831 : grouping_target,
1832 : grouping_target_parallel_safe,
1833 : gset_data);
1834 : /* Fix things up if grouping_target contains SRFs */
1835 45392 : if (parse->hasTargetSRFs)
1836 464 : adjust_paths_for_srfs(root, current_rel,
1837 : grouping_targets,
1838 : grouping_targets_contain_srfs);
1839 : }
1840 :
1841 : /*
1842 : * If we have window functions, consider ways to implement those. We
1843 : * build a new upperrel representing the output of this phase.
1844 : */
1845 533740 : if (activeWindows)
1846 : {
1847 2576 : current_rel = create_window_paths(root,
1848 : current_rel,
1849 : grouping_target,
1850 : sort_input_target,
1851 : sort_input_target_parallel_safe,
1852 : wflists,
1853 : activeWindows);
1854 : /* Fix things up if sort_input_target contains SRFs */
1855 2576 : if (parse->hasTargetSRFs)
1856 12 : adjust_paths_for_srfs(root, current_rel,
1857 : sort_input_targets,
1858 : sort_input_targets_contain_srfs);
1859 : }
1860 :
1861 : /*
1862 : * If there is a DISTINCT clause, consider ways to implement that. We
1863 : * build a new upperrel representing the output of this phase.
1864 : */
1865 533740 : if (parse->distinctClause)
1866 : {
1867 3004 : current_rel = create_distinct_paths(root,
1868 : current_rel,
1869 : sort_input_target);
1870 : }
1871 : } /* end of if (setOperations) */
1872 :
1873 : /*
1874 : * If ORDER BY was given, consider ways to implement that, and generate a
1875 : * new upperrel containing only paths that emit the correct ordering and
1876 : * project the correct final_target. We can apply the original
1877 : * limit_tuples limit in sort costing here, but only if there are no
1878 : * postponed SRFs.
1879 : */
1880 539946 : if (parse->sortClause)
1881 : {
1882 75624 : current_rel = create_ordered_paths(root,
1883 : current_rel,
1884 : final_target,
1885 : final_target_parallel_safe,
1886 : have_postponed_srfs ? -1.0 :
1887 : limit_tuples);
1888 : /* Fix things up if final_target contains SRFs */
1889 75624 : if (parse->hasTargetSRFs)
1890 220 : adjust_paths_for_srfs(root, current_rel,
1891 : final_targets,
1892 : final_targets_contain_srfs);
1893 : }
1894 :
1895 : /*
1896 : * Now we are prepared to build the final-output upperrel.
1897 : */
1898 539946 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1899 :
1900 : /*
1901 : * If the input rel is marked consider_parallel and there's nothing that's
1902 : * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1903 : * consider_parallel as well. Note that if the query has rowMarks or is
1904 : * not a SELECT, consider_parallel will be false for every relation in the
1905 : * query.
1906 : */
1907 718956 : if (current_rel->consider_parallel &&
1908 357996 : is_parallel_safe(root, parse->limitOffset) &&
1909 178986 : is_parallel_safe(root, parse->limitCount))
1910 178980 : final_rel->consider_parallel = true;
1911 :
1912 : /*
1913 : * If the current_rel belongs to a single FDW, so does the final_rel.
1914 : */
1915 539946 : final_rel->serverid = current_rel->serverid;
1916 539946 : final_rel->userid = current_rel->userid;
1917 539946 : final_rel->useridiscurrent = current_rel->useridiscurrent;
1918 539946 : final_rel->fdwroutine = current_rel->fdwroutine;
1919 :
1920 : /*
1921 : * Generate paths for the final_rel. Insert all surviving paths, with
1922 : * LockRows, Limit, and/or ModifyTable steps added if needed.
1923 : */
1924 1101072 : foreach(lc, current_rel->pathlist)
1925 : {
1926 561126 : Path *path = (Path *) lfirst(lc);
1927 :
1928 : /*
1929 : * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1930 : * (Note: we intentionally test parse->rowMarks not root->rowMarks
1931 : * here. If there are only non-locking rowmarks, they should be
1932 : * handled by the ModifyTable node instead. However, root->rowMarks
1933 : * is what goes into the LockRows node.)
1934 : */
1935 561126 : if (parse->rowMarks)
1936 : {
1937 14012 : path = (Path *) create_lockrows_path(root, final_rel, path,
1938 : root->rowMarks,
1939 : assign_special_exec_param(root));
1940 : }
1941 :
1942 : /*
1943 : * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1944 : */
1945 561126 : if (limit_needed(parse))
1946 : {
1947 6080 : path = (Path *) create_limit_path(root, final_rel, path,
1948 : parse->limitOffset,
1949 : parse->limitCount,
1950 : parse->limitOption,
1951 : offset_est, count_est);
1952 : }
1953 :
1954 : /*
1955 : * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1956 : */
1957 561126 : if (parse->commandType != CMD_SELECT)
1958 : {
1959 : Index rootRelation;
1960 89114 : List *resultRelations = NIL;
1961 89114 : List *updateColnosLists = NIL;
1962 89114 : List *withCheckOptionLists = NIL;
1963 89114 : List *returningLists = NIL;
1964 89114 : List *mergeActionLists = NIL;
1965 89114 : List *mergeJoinConditions = NIL;
1966 : List *rowMarks;
1967 :
1968 89114 : if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
1969 : {
1970 : /* Inherited UPDATE/DELETE/MERGE */
1971 2904 : RelOptInfo *top_result_rel = find_base_rel(root,
1972 : parse->resultRelation);
1973 2904 : int resultRelation = -1;
1974 :
1975 : /* Pass the root result rel forward to the executor. */
1976 2904 : rootRelation = parse->resultRelation;
1977 :
1978 : /* Add only leaf children to ModifyTable. */
1979 8450 : while ((resultRelation = bms_next_member(root->leaf_result_relids,
1980 8450 : resultRelation)) >= 0)
1981 : {
1982 5546 : RelOptInfo *this_result_rel = find_base_rel(root,
1983 : resultRelation);
1984 :
1985 : /*
1986 : * Also exclude any leaf rels that have turned dummy since
1987 : * being added to the list, for example, by being excluded
1988 : * by constraint exclusion.
1989 : */
1990 5546 : if (IS_DUMMY_REL(this_result_rel))
1991 174 : continue;
1992 :
1993 : /* Build per-target-rel lists needed by ModifyTable */
1994 5372 : resultRelations = lappend_int(resultRelations,
1995 : resultRelation);
1996 5372 : if (parse->commandType == CMD_UPDATE)
1997 : {
1998 3692 : List *update_colnos = root->update_colnos;
1999 :
2000 3692 : if (this_result_rel != top_result_rel)
2001 : update_colnos =
2002 3692 : adjust_inherited_attnums_multilevel(root,
2003 : update_colnos,
2004 : this_result_rel->relid,
2005 : top_result_rel->relid);
2006 3692 : updateColnosLists = lappend(updateColnosLists,
2007 : update_colnos);
2008 : }
2009 5372 : if (parse->withCheckOptions)
2010 : {
2011 504 : List *withCheckOptions = parse->withCheckOptions;
2012 :
2013 504 : if (this_result_rel != top_result_rel)
2014 : withCheckOptions = (List *)
2015 504 : adjust_appendrel_attrs_multilevel(root,
2016 : (Node *) withCheckOptions,
2017 : this_result_rel,
2018 : top_result_rel);
2019 504 : withCheckOptionLists = lappend(withCheckOptionLists,
2020 : withCheckOptions);
2021 : }
2022 5372 : if (parse->returningList)
2023 : {
2024 846 : List *returningList = parse->returningList;
2025 :
2026 846 : if (this_result_rel != top_result_rel)
2027 : returningList = (List *)
2028 846 : adjust_appendrel_attrs_multilevel(root,
2029 : (Node *) returningList,
2030 : this_result_rel,
2031 : top_result_rel);
2032 846 : returningLists = lappend(returningLists,
2033 : returningList);
2034 : }
2035 5372 : if (parse->mergeActionList)
2036 : {
2037 : ListCell *l;
2038 542 : List *mergeActionList = NIL;
2039 :
2040 : /*
2041 : * Copy MergeActions and translate stuff that
2042 : * references attribute numbers.
2043 : */
2044 1692 : foreach(l, parse->mergeActionList)
2045 : {
2046 1150 : MergeAction *action = lfirst(l),
2047 1150 : *leaf_action = copyObject(action);
2048 :
2049 1150 : leaf_action->qual =
2050 1150 : adjust_appendrel_attrs_multilevel(root,
2051 : (Node *) action->qual,
2052 : this_result_rel,
2053 : top_result_rel);
2054 1150 : leaf_action->targetList = (List *)
2055 1150 : adjust_appendrel_attrs_multilevel(root,
2056 1150 : (Node *) action->targetList,
2057 : this_result_rel,
2058 : top_result_rel);
2059 1150 : if (leaf_action->commandType == CMD_UPDATE)
2060 642 : leaf_action->updateColnos =
2061 642 : adjust_inherited_attnums_multilevel(root,
2062 : action->updateColnos,
2063 : this_result_rel->relid,
2064 : top_result_rel->relid);
2065 1150 : mergeActionList = lappend(mergeActionList,
2066 : leaf_action);
2067 : }
2068 :
2069 542 : mergeActionLists = lappend(mergeActionLists,
2070 : mergeActionList);
2071 : }
2072 5372 : if (parse->commandType == CMD_MERGE)
2073 : {
2074 542 : Node *mergeJoinCondition = parse->mergeJoinCondition;
2075 :
2076 542 : if (this_result_rel != top_result_rel)
2077 : mergeJoinCondition =
2078 542 : adjust_appendrel_attrs_multilevel(root,
2079 : mergeJoinCondition,
2080 : this_result_rel,
2081 : top_result_rel);
2082 542 : mergeJoinConditions = lappend(mergeJoinConditions,
2083 : mergeJoinCondition);
2084 : }
2085 : }
2086 :
2087 2904 : if (resultRelations == NIL)
2088 : {
2089 : /*
2090 : * We managed to exclude every child rel, so generate a
2091 : * dummy one-relation plan using info for the top target
2092 : * rel (even though that may not be a leaf target).
2093 : * Although it's clear that no data will be updated or
2094 : * deleted, we still need to have a ModifyTable node so
2095 : * that any statement triggers will be executed. (This
2096 : * could be cleaner if we fixed nodeModifyTable.c to allow
2097 : * zero target relations, but that probably wouldn't be a
2098 : * net win.)
2099 : */
2100 30 : resultRelations = list_make1_int(parse->resultRelation);
2101 30 : if (parse->commandType == CMD_UPDATE)
2102 30 : updateColnosLists = list_make1(root->update_colnos);
2103 30 : if (parse->withCheckOptions)
2104 0 : withCheckOptionLists = list_make1(parse->withCheckOptions);
2105 30 : if (parse->returningList)
2106 18 : returningLists = list_make1(parse->returningList);
2107 30 : if (parse->mergeActionList)
2108 0 : mergeActionLists = list_make1(parse->mergeActionList);
2109 30 : if (parse->commandType == CMD_MERGE)
2110 0 : mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2111 : }
2112 : }
2113 : else
2114 : {
2115 : /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2116 86210 : rootRelation = 0; /* there's no separate root rel */
2117 86210 : resultRelations = list_make1_int(parse->resultRelation);
2118 86210 : if (parse->commandType == CMD_UPDATE)
2119 12676 : updateColnosLists = list_make1(root->update_colnos);
2120 86210 : if (parse->withCheckOptions)
2121 1040 : withCheckOptionLists = list_make1(parse->withCheckOptions);
2122 86210 : if (parse->returningList)
2123 2542 : returningLists = list_make1(parse->returningList);
2124 86210 : if (parse->mergeActionList)
2125 1592 : mergeActionLists = list_make1(parse->mergeActionList);
2126 86210 : if (parse->commandType == CMD_MERGE)
2127 1592 : mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2128 : }
2129 :
2130 : /*
2131 : * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2132 : * will have dealt with fetching non-locked marked rows, else we
2133 : * need to have ModifyTable do that.
2134 : */
2135 89114 : if (parse->rowMarks)
2136 0 : rowMarks = NIL;
2137 : else
2138 89114 : rowMarks = root->rowMarks;
2139 :
2140 : path = (Path *)
2141 89114 : create_modifytable_path(root, final_rel,
2142 : path,
2143 : parse->commandType,
2144 89114 : parse->canSetTag,
2145 89114 : parse->resultRelation,
2146 : rootRelation,
2147 : resultRelations,
2148 : updateColnosLists,
2149 : withCheckOptionLists,
2150 : returningLists,
2151 : rowMarks,
2152 : parse->onConflict,
2153 : mergeActionLists,
2154 : mergeJoinConditions,
2155 : assign_special_exec_param(root));
2156 : }
2157 :
2158 : /* And shove it into final_rel */
2159 561126 : add_path(final_rel, path);
2160 : }
2161 :
2162 : /*
2163 : * Generate partial paths for final_rel, too, if outer query levels might
2164 : * be able to make use of them.
2165 : */
2166 539946 : if (final_rel->consider_parallel && root->query_level > 1 &&
2167 30636 : !limit_needed(parse))
2168 : {
2169 : Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2170 30558 : foreach(lc, current_rel->partial_pathlist)
2171 : {
2172 108 : Path *partial_path = (Path *) lfirst(lc);
2173 :
2174 108 : add_partial_path(final_rel, partial_path);
2175 : }
2176 : }
2177 :
2178 539946 : extra.limit_needed = limit_needed(parse);
2179 539946 : extra.limit_tuples = limit_tuples;
2180 539946 : extra.count_est = count_est;
2181 539946 : extra.offset_est = offset_est;
2182 :
2183 : /*
2184 : * If there is an FDW that's responsible for all baserels of the query,
2185 : * let it consider adding ForeignPaths.
2186 : */
2187 539946 : if (final_rel->fdwroutine &&
2188 1260 : final_rel->fdwroutine->GetForeignUpperPaths)
2189 1192 : final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2190 : current_rel, final_rel,
2191 : &extra);
2192 :
2193 : /* Let extensions possibly add some more paths */
2194 539946 : if (create_upper_paths_hook)
2195 0 : (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2196 : current_rel, final_rel, &extra);
2197 :
2198 : /* Note: currently, we leave it to callers to do set_cheapest() */
2199 539946 : }
2200 :
2201 : /*
2202 : * Do preprocessing for groupingSets clause and related data.
2203 : *
2204 : * We expect that parse->groupingSets has already been expanded into a flat
2205 : * list of grouping sets (that is, just integer Lists of ressortgroupref
2206 : * numbers) by expand_grouping_sets(). This function handles the preliminary
2207 : * steps of organizing the grouping sets into lists of rollups, and preparing
2208 : * annotations which will later be filled in with size estimates.
2209 : */
2210 : static grouping_sets_data *
2211 980 : preprocess_grouping_sets(PlannerInfo *root)
2212 : {
2213 980 : Query *parse = root->parse;
2214 : List *sets;
2215 980 : int maxref = 0;
2216 : ListCell *lc_set;
2217 980 : grouping_sets_data *gd = palloc0_object(grouping_sets_data);
2218 :
2219 : /*
2220 : * We don't currently make any attempt to optimize the groupClause when
2221 : * there are grouping sets, so just duplicate it in processed_groupClause.
2222 : */
2223 980 : root->processed_groupClause = parse->groupClause;
2224 :
2225 : /* Detect unhashable and unsortable grouping expressions */
2226 980 : gd->any_hashable = false;
2227 980 : gd->unhashable_refs = NULL;
2228 980 : gd->unsortable_refs = NULL;
2229 980 : gd->unsortable_sets = NIL;
2230 :
2231 980 : if (parse->groupClause)
2232 : {
2233 : ListCell *lc;
2234 :
2235 2864 : foreach(lc, parse->groupClause)
2236 : {
2237 1962 : SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2238 1962 : Index ref = gc->tleSortGroupRef;
2239 :
2240 1962 : if (ref > maxref)
2241 1914 : maxref = ref;
2242 :
2243 1962 : if (!gc->hashable)
2244 30 : gd->unhashable_refs = bms_add_member(gd->unhashable_refs, ref);
2245 :
2246 1962 : if (!OidIsValid(gc->sortop))
2247 42 : gd->unsortable_refs = bms_add_member(gd->unsortable_refs, ref);
2248 : }
2249 : }
2250 :
2251 : /* Allocate workspace array for remapping */
2252 980 : gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2253 :
2254 : /*
2255 : * If we have any unsortable sets, we must extract them before trying to
2256 : * prepare rollups. Unsortable sets don't go through
2257 : * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2258 : * here.
2259 : */
2260 980 : if (!bms_is_empty(gd->unsortable_refs))
2261 : {
2262 42 : List *sortable_sets = NIL;
2263 : ListCell *lc;
2264 :
2265 126 : foreach(lc, parse->groupingSets)
2266 : {
2267 90 : List *gset = (List *) lfirst(lc);
2268 :
2269 90 : if (bms_overlap_list(gd->unsortable_refs, gset))
2270 : {
2271 48 : GroupingSetData *gs = makeNode(GroupingSetData);
2272 :
2273 48 : gs->set = gset;
2274 48 : gd->unsortable_sets = lappend(gd->unsortable_sets, gs);
2275 :
2276 : /*
2277 : * We must enforce here that an unsortable set is hashable;
2278 : * later code assumes this. Parse analysis only checks that
2279 : * every individual column is either hashable or sortable.
2280 : *
2281 : * Note that passing this test doesn't guarantee we can
2282 : * generate a plan; there might be other showstoppers.
2283 : */
2284 48 : if (bms_overlap_list(gd->unhashable_refs, gset))
2285 6 : ereport(ERROR,
2286 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2287 : errmsg("could not implement GROUP BY"),
2288 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2289 : }
2290 : else
2291 42 : sortable_sets = lappend(sortable_sets, gset);
2292 : }
2293 :
2294 36 : if (sortable_sets)
2295 30 : sets = extract_rollup_sets(sortable_sets);
2296 : else
2297 6 : sets = NIL;
2298 : }
2299 : else
2300 938 : sets = extract_rollup_sets(parse->groupingSets);
2301 :
2302 2538 : foreach(lc_set, sets)
2303 : {
2304 1564 : List *current_sets = (List *) lfirst(lc_set);
2305 1564 : RollupData *rollup = makeNode(RollupData);
2306 : GroupingSetData *gs;
2307 :
2308 : /*
2309 : * Reorder the current list of grouping sets into correct prefix
2310 : * order. If only one aggregation pass is needed, try to make the
2311 : * list match the ORDER BY clause; if more than one pass is needed, we
2312 : * don't bother with that.
2313 : *
2314 : * Note that this reorders the sets from smallest-member-first to
2315 : * largest-member-first, and applies the GroupingSetData annotations,
2316 : * though the data will be filled in later.
2317 : */
2318 1564 : current_sets = reorder_grouping_sets(current_sets,
2319 1564 : (list_length(sets) == 1
2320 : ? parse->sortClause
2321 : : NIL));
2322 :
2323 : /*
2324 : * Get the initial (and therefore largest) grouping set.
2325 : */
2326 1564 : gs = linitial_node(GroupingSetData, current_sets);
2327 :
2328 : /*
2329 : * Order the groupClause appropriately. If the first grouping set is
2330 : * empty, then the groupClause must also be empty; otherwise we have
2331 : * to force the groupClause to match that grouping set's order.
2332 : *
2333 : * (The first grouping set can be empty even though parse->groupClause
2334 : * is not empty only if all non-empty grouping sets are unsortable.
2335 : * The groupClauses for hashed grouping sets are built later on.)
2336 : */
2337 1564 : if (gs->set)
2338 1486 : rollup->groupClause = preprocess_groupclause(root, gs->set);
2339 : else
2340 78 : rollup->groupClause = NIL;
2341 :
2342 : /*
2343 : * Is it hashable? We pretend empty sets are hashable even though we
2344 : * actually force them not to be hashed later. But don't bother if
2345 : * there's nothing but empty sets (since in that case we can't hash
2346 : * anything).
2347 : */
2348 1564 : if (gs->set &&
2349 1486 : !bms_overlap_list(gd->unhashable_refs, gs->set))
2350 : {
2351 1462 : rollup->hashable = true;
2352 1462 : gd->any_hashable = true;
2353 : }
2354 :
2355 : /*
2356 : * Now that we've pinned down an order for the groupClause for this
2357 : * list of grouping sets, we need to remap the entries in the grouping
2358 : * sets from sortgrouprefs to plain indices (0-based) into the
2359 : * groupClause for this collection of grouping sets. We keep the
2360 : * original form for later use, though.
2361 : */
2362 1564 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2363 : current_sets,
2364 : gd->tleref_to_colnum_map);
2365 1564 : rollup->gsets_data = current_sets;
2366 :
2367 1564 : gd->rollups = lappend(gd->rollups, rollup);
2368 : }
2369 :
2370 974 : if (gd->unsortable_sets)
2371 : {
2372 : /*
2373 : * We have not yet pinned down a groupclause for this, but we will
2374 : * need index-based lists for estimation purposes. Construct
2375 : * hash_sets_idx based on the entire original groupclause for now.
2376 : */
2377 36 : gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2378 : gd->unsortable_sets,
2379 : gd->tleref_to_colnum_map);
2380 36 : gd->any_hashable = true;
2381 : }
2382 :
2383 974 : return gd;
2384 : }
2385 :
2386 : /*
2387 : * Given a groupclause and a list of GroupingSetData, return equivalent sets
2388 : * (without annotation) mapped to indexes into the given groupclause.
2389 : */
2390 : static List *
2391 4464 : remap_to_groupclause_idx(List *groupClause,
2392 : List *gsets,
2393 : int *tleref_to_colnum_map)
2394 : {
2395 4464 : int ref = 0;
2396 4464 : List *result = NIL;
2397 : ListCell *lc;
2398 :
2399 10804 : foreach(lc, groupClause)
2400 : {
2401 6340 : SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2402 :
2403 6340 : tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2404 : }
2405 :
2406 10308 : foreach(lc, gsets)
2407 : {
2408 5844 : List *set = NIL;
2409 : ListCell *lc2;
2410 5844 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
2411 :
2412 13046 : foreach(lc2, gs->set)
2413 : {
2414 7202 : set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2415 : }
2416 :
2417 5844 : result = lappend(result, set);
2418 : }
2419 :
2420 4464 : return result;
2421 : }
2422 :
2423 :
2424 : /*
2425 : * preprocess_rowmarks - set up PlanRowMarks if needed
2426 : */
2427 : static void
2428 543988 : preprocess_rowmarks(PlannerInfo *root)
2429 : {
2430 543988 : Query *parse = root->parse;
2431 : Bitmapset *rels;
2432 : List *prowmarks;
2433 : ListCell *l;
2434 : int i;
2435 :
2436 543988 : if (parse->rowMarks)
2437 : {
2438 : /*
2439 : * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2440 : * grouping, since grouping renders a reference to individual tuple
2441 : * CTIDs invalid. This is also checked at parse time, but that's
2442 : * insufficient because of rule substitution, query pullup, etc.
2443 : */
2444 13524 : CheckSelectLocking(parse, linitial_node(RowMarkClause,
2445 : parse->rowMarks)->strength);
2446 : }
2447 : else
2448 : {
2449 : /*
2450 : * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
2451 : * UPDATE/SHARE.
2452 : */
2453 530464 : if (parse->commandType != CMD_UPDATE &&
2454 515750 : parse->commandType != CMD_DELETE &&
2455 511384 : parse->commandType != CMD_MERGE)
2456 509542 : return;
2457 : }
2458 :
2459 : /*
2460 : * We need to have rowmarks for all base relations except the target. We
2461 : * make a bitmapset of all base rels and then remove the items we don't
2462 : * need or have FOR [KEY] UPDATE/SHARE marks for.
2463 : */
2464 34446 : rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
2465 34446 : if (parse->resultRelation)
2466 20922 : rels = bms_del_member(rels, parse->resultRelation);
2467 :
2468 : /*
2469 : * Convert RowMarkClauses to PlanRowMark representation.
2470 : */
2471 34446 : prowmarks = NIL;
2472 48200 : foreach(l, parse->rowMarks)
2473 : {
2474 13754 : RowMarkClause *rc = lfirst_node(RowMarkClause, l);
2475 13754 : RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2476 : PlanRowMark *newrc;
2477 :
2478 : /*
2479 : * Currently, it is syntactically impossible to have FOR UPDATE et al
2480 : * applied to an update/delete target rel. If that ever becomes
2481 : * possible, we should drop the target from the PlanRowMark list.
2482 : */
2483 : Assert(rc->rti != parse->resultRelation);
2484 :
2485 : /*
2486 : * Ignore RowMarkClauses for subqueries; they aren't real tables and
2487 : * can't support true locking. Subqueries that got flattened into the
2488 : * main query should be ignored completely. Any that didn't will get
2489 : * ROW_MARK_COPY items in the next loop.
2490 : */
2491 13754 : if (rte->rtekind != RTE_RELATION)
2492 60 : continue;
2493 :
2494 13694 : rels = bms_del_member(rels, rc->rti);
2495 :
2496 13694 : newrc = makeNode(PlanRowMark);
2497 13694 : newrc->rti = newrc->prti = rc->rti;
2498 13694 : newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2499 13694 : newrc->markType = select_rowmark_type(rte, rc->strength);
2500 13694 : newrc->allMarkTypes = (1 << newrc->markType);
2501 13694 : newrc->strength = rc->strength;
2502 13694 : newrc->waitPolicy = rc->waitPolicy;
2503 13694 : newrc->isParent = false;
2504 :
2505 13694 : prowmarks = lappend(prowmarks, newrc);
2506 : }
2507 :
2508 : /*
2509 : * Now, add rowmarks for any non-target, non-locked base relations.
2510 : */
2511 34446 : i = 0;
2512 79706 : foreach(l, parse->rtable)
2513 : {
2514 45260 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
2515 : PlanRowMark *newrc;
2516 :
2517 45260 : i++;
2518 45260 : if (!bms_is_member(i, rels))
2519 41512 : continue;
2520 :
2521 3748 : newrc = makeNode(PlanRowMark);
2522 3748 : newrc->rti = newrc->prti = i;
2523 3748 : newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2524 3748 : newrc->markType = select_rowmark_type(rte, LCS_NONE);
2525 3748 : newrc->allMarkTypes = (1 << newrc->markType);
2526 3748 : newrc->strength = LCS_NONE;
2527 3748 : newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2528 3748 : newrc->isParent = false;
2529 :
2530 3748 : prowmarks = lappend(prowmarks, newrc);
2531 : }
2532 :
2533 34446 : root->rowMarks = prowmarks;
2534 : }
2535 :
2536 : /*
2537 : * Select RowMarkType to use for a given table
2538 : */
2539 : RowMarkType
2540 19852 : select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
2541 : {
2542 19852 : if (rte->rtekind != RTE_RELATION)
2543 : {
2544 : /* If it's not a table at all, use ROW_MARK_COPY */
2545 1416 : return ROW_MARK_COPY;
2546 : }
2547 18436 : else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2548 : {
2549 : /* Let the FDW select the rowmark type, if it wants to */
2550 228 : FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2551 :
2552 228 : if (fdwroutine->GetForeignRowMarkType != NULL)
2553 0 : return fdwroutine->GetForeignRowMarkType(rte, strength);
2554 : /* Otherwise, use ROW_MARK_COPY by default */
2555 228 : return ROW_MARK_COPY;
2556 : }
2557 : else
2558 : {
2559 : /* Regular table, apply the appropriate lock type */
2560 18208 : switch (strength)
2561 : {
2562 2546 : case LCS_NONE:
2563 :
2564 : /*
2565 : * We don't need a tuple lock, only the ability to re-fetch
2566 : * the row.
2567 : */
2568 2546 : return ROW_MARK_REFERENCE;
2569 : break;
2570 13742 : case LCS_FORKEYSHARE:
2571 13742 : return ROW_MARK_KEYSHARE;
2572 : break;
2573 306 : case LCS_FORSHARE:
2574 306 : return ROW_MARK_SHARE;
2575 : break;
2576 78 : case LCS_FORNOKEYUPDATE:
2577 78 : return ROW_MARK_NOKEYEXCLUSIVE;
2578 : break;
2579 1536 : case LCS_FORUPDATE:
2580 1536 : return ROW_MARK_EXCLUSIVE;
2581 : break;
2582 : }
2583 0 : elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2584 : return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2585 : }
2586 : }
2587 :
2588 : /*
2589 : * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
2590 : *
2591 : * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
2592 : * results back in *count_est and *offset_est. These variables are set to
2593 : * 0 if the corresponding clause is not present, and -1 if it's present
2594 : * but we couldn't estimate the value for it. (The "0" convention is OK
2595 : * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
2596 : * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
2597 : * usual practice of never estimating less than one row.) These values will
2598 : * be passed to create_limit_path, which see if you change this code.
2599 : *
2600 : * The return value is the suitably adjusted tuple_fraction to use for
2601 : * planning the query. This adjustment is not overridable, since it reflects
2602 : * plan actions that grouping_planner() will certainly take, not assumptions
2603 : * about context.
2604 : */
2605 : static double
2606 5068 : preprocess_limit(PlannerInfo *root, double tuple_fraction,
2607 : int64 *offset_est, int64 *count_est)
2608 : {
2609 5068 : Query *parse = root->parse;
2610 : Node *est;
2611 : double limit_fraction;
2612 :
2613 : /* Should not be called unless LIMIT or OFFSET */
2614 : Assert(parse->limitCount || parse->limitOffset);
2615 :
2616 : /*
2617 : * Try to obtain the clause values. We use estimate_expression_value
2618 : * primarily because it can sometimes do something useful with Params.
2619 : */
2620 5068 : if (parse->limitCount)
2621 : {
2622 4540 : est = estimate_expression_value(root, parse->limitCount);
2623 4540 : if (est && IsA(est, Const))
2624 : {
2625 4534 : if (((Const *) est)->constisnull)
2626 : {
2627 : /* NULL indicates LIMIT ALL, ie, no limit */
2628 0 : *count_est = 0; /* treat as not present */
2629 : }
2630 : else
2631 : {
2632 4534 : *count_est = DatumGetInt64(((Const *) est)->constvalue);
2633 4534 : if (*count_est <= 0)
2634 150 : *count_est = 1; /* force to at least 1 */
2635 : }
2636 : }
2637 : else
2638 6 : *count_est = -1; /* can't estimate */
2639 : }
2640 : else
2641 528 : *count_est = 0; /* not present */
2642 :
2643 5068 : if (parse->limitOffset)
2644 : {
2645 900 : est = estimate_expression_value(root, parse->limitOffset);
2646 900 : if (est && IsA(est, Const))
2647 : {
2648 876 : if (((Const *) est)->constisnull)
2649 : {
2650 : /* Treat NULL as no offset; the executor will too */
2651 0 : *offset_est = 0; /* treat as not present */
2652 : }
2653 : else
2654 : {
2655 876 : *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2656 876 : if (*offset_est < 0)
2657 0 : *offset_est = 0; /* treat as not present */
2658 : }
2659 : }
2660 : else
2661 24 : *offset_est = -1; /* can't estimate */
2662 : }
2663 : else
2664 4168 : *offset_est = 0; /* not present */
2665 :
2666 5068 : if (*count_est != 0)
2667 : {
2668 : /*
2669 : * A LIMIT clause limits the absolute number of tuples returned.
2670 : * However, if it's not a constant LIMIT then we have to guess; for
2671 : * lack of a better idea, assume 10% of the plan's result is wanted.
2672 : */
2673 4540 : if (*count_est < 0 || *offset_est < 0)
2674 : {
2675 : /* LIMIT or OFFSET is an expression ... punt ... */
2676 24 : limit_fraction = 0.10;
2677 : }
2678 : else
2679 : {
2680 : /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2681 4516 : limit_fraction = (double) *count_est + (double) *offset_est;
2682 : }
2683 :
2684 : /*
2685 : * If we have absolute limits from both caller and LIMIT, use the
2686 : * smaller value; likewise if they are both fractional. If one is
2687 : * fractional and the other absolute, we can't easily determine which
2688 : * is smaller, but we use the heuristic that the absolute will usually
2689 : * be smaller.
2690 : */
2691 4540 : if (tuple_fraction >= 1.0)
2692 : {
2693 6 : if (limit_fraction >= 1.0)
2694 : {
2695 : /* both absolute */
2696 6 : tuple_fraction = Min(tuple_fraction, limit_fraction);
2697 : }
2698 : else
2699 : {
2700 : /* caller absolute, limit fractional; use caller's value */
2701 : }
2702 : }
2703 4534 : else if (tuple_fraction > 0.0)
2704 : {
2705 148 : if (limit_fraction >= 1.0)
2706 : {
2707 : /* caller fractional, limit absolute; use limit */
2708 148 : tuple_fraction = limit_fraction;
2709 : }
2710 : else
2711 : {
2712 : /* both fractional */
2713 0 : tuple_fraction = Min(tuple_fraction, limit_fraction);
2714 : }
2715 : }
2716 : else
2717 : {
2718 : /* no info from caller, just use limit */
2719 4386 : tuple_fraction = limit_fraction;
2720 : }
2721 : }
2722 528 : else if (*offset_est != 0 && tuple_fraction > 0.0)
2723 : {
2724 : /*
2725 : * We have an OFFSET but no LIMIT. This acts entirely differently
2726 : * from the LIMIT case: here, we need to increase rather than decrease
2727 : * the caller's tuple_fraction, because the OFFSET acts to cause more
2728 : * tuples to be fetched instead of fewer. This only matters if we got
2729 : * a tuple_fraction > 0, however.
2730 : *
2731 : * As above, use 10% if OFFSET is present but unestimatable.
2732 : */
2733 16 : if (*offset_est < 0)
2734 0 : limit_fraction = 0.10;
2735 : else
2736 16 : limit_fraction = (double) *offset_est;
2737 :
2738 : /*
2739 : * If we have absolute counts from both caller and OFFSET, add them
2740 : * together; likewise if they are both fractional. If one is
2741 : * fractional and the other absolute, we want to take the larger, and
2742 : * we heuristically assume that's the fractional one.
2743 : */
2744 16 : if (tuple_fraction >= 1.0)
2745 : {
2746 0 : if (limit_fraction >= 1.0)
2747 : {
2748 : /* both absolute, so add them together */
2749 0 : tuple_fraction += limit_fraction;
2750 : }
2751 : else
2752 : {
2753 : /* caller absolute, limit fractional; use limit */
2754 0 : tuple_fraction = limit_fraction;
2755 : }
2756 : }
2757 : else
2758 : {
2759 16 : if (limit_fraction >= 1.0)
2760 : {
2761 : /* caller fractional, limit absolute; use caller's value */
2762 : }
2763 : else
2764 : {
2765 : /* both fractional, so add them together */
2766 0 : tuple_fraction += limit_fraction;
2767 0 : if (tuple_fraction >= 1.0)
2768 0 : tuple_fraction = 0.0; /* assume fetch all */
2769 : }
2770 : }
2771 : }
2772 :
2773 5068 : return tuple_fraction;
2774 : }
2775 :
2776 : /*
2777 : * limit_needed - do we actually need a Limit plan node?
2778 : *
2779 : * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
2780 : * a Limit node. This is worth checking for because "OFFSET 0" is a common
2781 : * locution for an optimization fence. (Because other places in the planner
2782 : * merely check whether parse->limitOffset isn't NULL, it will still work as
2783 : * an optimization fence --- we're just suppressing unnecessary run-time
2784 : * overhead.)
2785 : *
2786 : * This might look like it could be merged into preprocess_limit, but there's
2787 : * a key distinction: here we need hard constants in OFFSET/LIMIT, whereas
2788 : * in preprocess_limit it's good enough to consider estimated values.
2789 : */
2790 : bool
2791 1151304 : limit_needed(Query *parse)
2792 : {
2793 : Node *node;
2794 :
2795 1151304 : node = parse->limitCount;
2796 1151304 : if (node)
2797 : {
2798 10936 : if (IsA(node, Const))
2799 : {
2800 : /* NULL indicates LIMIT ALL, ie, no limit */
2801 10700 : if (!((Const *) node)->constisnull)
2802 10700 : return true; /* LIMIT with a constant value */
2803 : }
2804 : else
2805 236 : return true; /* non-constant LIMIT */
2806 : }
2807 :
2808 1140368 : node = parse->limitOffset;
2809 1140368 : if (node)
2810 : {
2811 1534 : if (IsA(node, Const))
2812 : {
2813 : /* Treat NULL as no offset; the executor would too */
2814 1226 : if (!((Const *) node)->constisnull)
2815 : {
2816 1226 : int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2817 :
2818 1226 : if (offset != 0)
2819 146 : return true; /* OFFSET with a nonzero value */
2820 : }
2821 : }
2822 : else
2823 308 : return true; /* non-constant OFFSET */
2824 : }
2825 :
2826 1139914 : return false; /* don't need a Limit plan node */
2827 : }
2828 :
2829 : /*
2830 : * preprocess_groupclause - do preparatory work on GROUP BY clause
2831 : *
2832 : * The idea here is to adjust the ordering of the GROUP BY elements
2833 : * (which in itself is semantically insignificant) to match ORDER BY,
2834 : * thereby allowing a single sort operation to both implement the ORDER BY
2835 : * requirement and set up for a Unique step that implements GROUP BY.
2836 : * We also consider partial match between GROUP BY and ORDER BY elements,
2837 : * which could allow to implement ORDER BY using the incremental sort.
2838 : *
2839 : * We also consider other orderings of the GROUP BY elements, which could
2840 : * match the sort ordering of other possible plans (eg an indexscan) and
2841 : * thereby reduce cost. This is implemented during the generation of grouping
2842 : * paths. See get_useful_group_keys_orderings() for details.
2843 : *
2844 : * Note: we need no comparable processing of the distinctClause because
2845 : * the parser already enforced that that matches ORDER BY.
2846 : *
2847 : * Note: we return a fresh List, but its elements are the same
2848 : * SortGroupClauses appearing in parse->groupClause. This is important
2849 : * because later processing may modify the processed_groupClause list.
2850 : *
2851 : * For grouping sets, the order of items is instead forced to agree with that
2852 : * of the grouping set (and items not in the grouping set are skipped). The
2853 : * work of sorting the order of grouping set elements to match the ORDER BY if
2854 : * possible is done elsewhere.
2855 : */
2856 : static List *
2857 8378 : preprocess_groupclause(PlannerInfo *root, List *force)
2858 : {
2859 8378 : Query *parse = root->parse;
2860 8378 : List *new_groupclause = NIL;
2861 : ListCell *sl;
2862 : ListCell *gl;
2863 :
2864 : /* For grouping sets, we need to force the ordering */
2865 8378 : if (force)
2866 : {
2867 10612 : foreach(sl, force)
2868 : {
2869 6262 : Index ref = lfirst_int(sl);
2870 6262 : SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
2871 :
2872 6262 : new_groupclause = lappend(new_groupclause, cl);
2873 : }
2874 :
2875 4350 : return new_groupclause;
2876 : }
2877 :
2878 : /* If no ORDER BY, nothing useful to do here */
2879 4028 : if (parse->sortClause == NIL)
2880 2214 : return list_copy(parse->groupClause);
2881 :
2882 : /*
2883 : * Scan the ORDER BY clause and construct a list of matching GROUP BY
2884 : * items, but only as far as we can make a matching prefix.
2885 : *
2886 : * This code assumes that the sortClause contains no duplicate items.
2887 : */
2888 3534 : foreach(sl, parse->sortClause)
2889 : {
2890 2366 : SortGroupClause *sc = lfirst_node(SortGroupClause, sl);
2891 :
2892 3462 : foreach(gl, parse->groupClause)
2893 : {
2894 2816 : SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
2895 :
2896 2816 : if (equal(gc, sc))
2897 : {
2898 1720 : new_groupclause = lappend(new_groupclause, gc);
2899 1720 : break;
2900 : }
2901 : }
2902 2366 : if (gl == NULL)
2903 646 : break; /* no match, so stop scanning */
2904 : }
2905 :
2906 :
2907 : /* If no match at all, no point in reordering GROUP BY */
2908 1814 : if (new_groupclause == NIL)
2909 298 : return list_copy(parse->groupClause);
2910 :
2911 : /*
2912 : * Add any remaining GROUP BY items to the new list. We don't require a
2913 : * complete match, because even partial match allows ORDER BY to be
2914 : * implemented using incremental sort. Also, give up if there are any
2915 : * non-sortable GROUP BY items, since then there's no hope anyway.
2916 : */
2917 3402 : foreach(gl, parse->groupClause)
2918 : {
2919 1886 : SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
2920 :
2921 1886 : if (list_member_ptr(new_groupclause, gc))
2922 1720 : continue; /* it matched an ORDER BY item */
2923 166 : if (!OidIsValid(gc->sortop)) /* give up, GROUP BY can't be sorted */
2924 0 : return list_copy(parse->groupClause);
2925 166 : new_groupclause = lappend(new_groupclause, gc);
2926 : }
2927 :
2928 : /* Success --- install the rearranged GROUP BY list */
2929 : Assert(list_length(parse->groupClause) == list_length(new_groupclause));
2930 1516 : return new_groupclause;
2931 : }
2932 :
2933 : /*
2934 : * Extract lists of grouping sets that can be implemented using a single
2935 : * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
2936 : *
2937 : * Input must be sorted with smallest sets first. Result has each sublist
2938 : * sorted with smallest sets first.
2939 : *
2940 : * We want to produce the absolute minimum possible number of lists here to
2941 : * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
2942 : * of finding the minimal partition of a partially-ordered set into chains
2943 : * (which is what we need, taking the list of grouping sets as a poset ordered
2944 : * by set inclusion) can be mapped to the problem of finding the maximum
2945 : * cardinality matching on a bipartite graph, which is solvable in polynomial
2946 : * time with a worst case of no worse than O(n^2.5) and usually much
2947 : * better. Since our N is at most 4096, we don't need to consider fallbacks to
2948 : * heuristic or approximate methods. (Planning time for a 12-d cube is under
2949 : * half a second on my modest system even with optimization off and assertions
2950 : * on.)
2951 : */
2952 : static List *
2953 968 : extract_rollup_sets(List *groupingSets)
2954 : {
2955 968 : int num_sets_raw = list_length(groupingSets);
2956 968 : int num_empty = 0;
2957 968 : int num_sets = 0; /* distinct sets */
2958 968 : int num_chains = 0;
2959 968 : List *result = NIL;
2960 : List **results;
2961 : List **orig_sets;
2962 : Bitmapset **set_masks;
2963 : int *chains;
2964 : short **adjacency;
2965 : short *adjacency_buf;
2966 : BipartiteMatchState *state;
2967 : int i;
2968 : int j;
2969 : int j_size;
2970 968 : ListCell *lc1 = list_head(groupingSets);
2971 : ListCell *lc;
2972 :
2973 : /*
2974 : * Start by stripping out empty sets. The algorithm doesn't require this,
2975 : * but the planner currently needs all empty sets to be returned in the
2976 : * first list, so we strip them here and add them back after.
2977 : */
2978 1668 : while (lc1 && lfirst(lc1) == NIL)
2979 : {
2980 700 : ++num_empty;
2981 700 : lc1 = lnext(groupingSets, lc1);
2982 : }
2983 :
2984 : /* bail out now if it turns out that all we had were empty sets. */
2985 968 : if (!lc1)
2986 78 : return list_make1(groupingSets);
2987 :
2988 : /*----------
2989 : * We don't strictly need to remove duplicate sets here, but if we don't,
2990 : * they tend to become scattered through the result, which is a bit
2991 : * confusing (and irritating if we ever decide to optimize them out).
2992 : * So we remove them here and add them back after.
2993 : *
2994 : * For each non-duplicate set, we fill in the following:
2995 : *
2996 : * orig_sets[i] = list of the original set lists
2997 : * set_masks[i] = bitmapset for testing inclusion
2998 : * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
2999 : *
3000 : * chains[i] will be the result group this set is assigned to.
3001 : *
3002 : * We index all of these from 1 rather than 0 because it is convenient
3003 : * to leave 0 free for the NIL node in the graph algorithm.
3004 : *----------
3005 : */
3006 890 : orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
3007 890 : set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
3008 890 : adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
3009 890 : adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
3010 :
3011 890 : j_size = 0;
3012 890 : j = 0;
3013 890 : i = 1;
3014 :
3015 3128 : for_each_cell(lc, groupingSets, lc1)
3016 : {
3017 2238 : List *candidate = (List *) lfirst(lc);
3018 2238 : Bitmapset *candidate_set = NULL;
3019 : ListCell *lc2;
3020 2238 : int dup_of = 0;
3021 :
3022 5394 : foreach(lc2, candidate)
3023 : {
3024 3156 : candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
3025 : }
3026 :
3027 : /* we can only be a dup if we're the same length as a previous set */
3028 2238 : if (j_size == list_length(candidate))
3029 : {
3030 : int k;
3031 :
3032 2000 : for (k = j; k < i; ++k)
3033 : {
3034 1308 : if (bms_equal(set_masks[k], candidate_set))
3035 : {
3036 158 : dup_of = k;
3037 158 : break;
3038 : }
3039 : }
3040 : }
3041 1388 : else if (j_size < list_length(candidate))
3042 : {
3043 1388 : j_size = list_length(candidate);
3044 1388 : j = i;
3045 : }
3046 :
3047 2238 : if (dup_of > 0)
3048 : {
3049 158 : orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
3050 158 : bms_free(candidate_set);
3051 : }
3052 : else
3053 : {
3054 : int k;
3055 2080 : int n_adj = 0;
3056 :
3057 2080 : orig_sets[i] = list_make1(candidate);
3058 2080 : set_masks[i] = candidate_set;
3059 :
3060 : /* fill in adjacency list; no need to compare equal-size sets */
3061 :
3062 3376 : for (k = j - 1; k > 0; --k)
3063 : {
3064 1296 : if (bms_is_subset(set_masks[k], candidate_set))
3065 1134 : adjacency_buf[++n_adj] = k;
3066 : }
3067 :
3068 2080 : if (n_adj > 0)
3069 : {
3070 622 : adjacency_buf[0] = n_adj;
3071 622 : adjacency[i] = palloc((n_adj + 1) * sizeof(short));
3072 622 : memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
3073 : }
3074 : else
3075 1458 : adjacency[i] = NULL;
3076 :
3077 2080 : ++i;
3078 : }
3079 : }
3080 :
3081 890 : num_sets = i - 1;
3082 :
3083 : /*
3084 : * Apply the graph matching algorithm to do the work.
3085 : */
3086 890 : state = BipartiteMatch(num_sets, num_sets, adjacency);
3087 :
3088 : /*
3089 : * Now, the state->pair* fields have the info we need to assign sets to
3090 : * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3091 : * pair_vu[v] = u (both will be true, but we check both so that we can do
3092 : * it in one pass)
3093 : */
3094 890 : chains = palloc0((num_sets + 1) * sizeof(int));
3095 :
3096 2970 : for (i = 1; i <= num_sets; ++i)
3097 : {
3098 2080 : int u = state->pair_vu[i];
3099 2080 : int v = state->pair_uv[i];
3100 :
3101 2080 : if (u > 0 && u < i)
3102 0 : chains[i] = chains[u];
3103 2080 : else if (v > 0 && v < i)
3104 594 : chains[i] = chains[v];
3105 : else
3106 1486 : chains[i] = ++num_chains;
3107 : }
3108 :
3109 : /* build result lists. */
3110 890 : results = palloc0((num_chains + 1) * sizeof(List *));
3111 :
3112 2970 : for (i = 1; i <= num_sets; ++i)
3113 : {
3114 2080 : int c = chains[i];
3115 :
3116 : Assert(c > 0);
3117 :
3118 2080 : results[c] = list_concat(results[c], orig_sets[i]);
3119 : }
3120 :
3121 : /* push any empty sets back on the first list. */
3122 1452 : while (num_empty-- > 0)
3123 562 : results[1] = lcons(NIL, results[1]);
3124 :
3125 : /* make result list */
3126 2376 : for (i = 1; i <= num_chains; ++i)
3127 1486 : result = lappend(result, results[i]);
3128 :
3129 : /*
3130 : * Free all the things.
3131 : *
3132 : * (This is over-fussy for small sets but for large sets we could have
3133 : * tied up a nontrivial amount of memory.)
3134 : */
3135 890 : BipartiteMatchFree(state);
3136 890 : pfree(results);
3137 890 : pfree(chains);
3138 2970 : for (i = 1; i <= num_sets; ++i)
3139 2080 : if (adjacency[i])
3140 622 : pfree(adjacency[i]);
3141 890 : pfree(adjacency);
3142 890 : pfree(adjacency_buf);
3143 890 : pfree(orig_sets);
3144 2970 : for (i = 1; i <= num_sets; ++i)
3145 2080 : bms_free(set_masks[i]);
3146 890 : pfree(set_masks);
3147 :
3148 890 : return result;
3149 : }
3150 :
3151 : /*
3152 : * Reorder the elements of a list of grouping sets such that they have correct
3153 : * prefix relationships. Also inserts the GroupingSetData annotations.
3154 : *
3155 : * The input must be ordered with smallest sets first; the result is returned
3156 : * with largest sets first. Note that the result shares no list substructure
3157 : * with the input, so it's safe for the caller to modify it later.
3158 : *
3159 : * If we're passed in a sortclause, we follow its order of columns to the
3160 : * extent possible, to minimize the chance that we add unnecessary sorts.
3161 : * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
3162 : * gets implemented in one pass.)
3163 : */
3164 : static List *
3165 1564 : reorder_grouping_sets(List *groupingSets, List *sortclause)
3166 : {
3167 : ListCell *lc;
3168 1564 : List *previous = NIL;
3169 1564 : List *result = NIL;
3170 :
3171 4502 : foreach(lc, groupingSets)
3172 : {
3173 2938 : List *candidate = (List *) lfirst(lc);
3174 2938 : List *new_elems = list_difference_int(candidate, previous);
3175 2938 : GroupingSetData *gs = makeNode(GroupingSetData);
3176 :
3177 3114 : while (list_length(sortclause) > list_length(previous) &&
3178 : new_elems != NIL)
3179 : {
3180 296 : SortGroupClause *sc = list_nth(sortclause, list_length(previous));
3181 296 : int ref = sc->tleSortGroupRef;
3182 :
3183 296 : if (list_member_int(new_elems, ref))
3184 : {
3185 176 : previous = lappend_int(previous, ref);
3186 176 : new_elems = list_delete_int(new_elems, ref);
3187 : }
3188 : else
3189 : {
3190 : /* diverged from the sortclause; give up on it */
3191 120 : sortclause = NIL;
3192 120 : break;
3193 : }
3194 : }
3195 :
3196 2938 : previous = list_concat(previous, new_elems);
3197 :
3198 2938 : gs->set = list_copy(previous);
3199 2938 : result = lcons(gs, result);
3200 : }
3201 :
3202 1564 : list_free(previous);
3203 :
3204 1564 : return result;
3205 : }
3206 :
3207 : /*
3208 : * has_volatile_pathkey
3209 : * Returns true if any PathKey in 'keys' has an EquivalenceClass
3210 : * containing a volatile function. Otherwise returns false.
3211 : */
3212 : static bool
3213 2836 : has_volatile_pathkey(List *keys)
3214 : {
3215 : ListCell *lc;
3216 :
3217 5816 : foreach(lc, keys)
3218 : {
3219 2998 : PathKey *pathkey = lfirst_node(PathKey, lc);
3220 :
3221 2998 : if (pathkey->pk_eclass->ec_has_volatile)
3222 18 : return true;
3223 : }
3224 :
3225 2818 : return false;
3226 : }
3227 :
3228 : /*
3229 : * adjust_group_pathkeys_for_groupagg
3230 : * Add pathkeys to root->group_pathkeys to reflect the best set of
3231 : * pre-ordered input for ordered aggregates.
3232 : *
3233 : * We define "best" as the pathkeys that suit the largest number of
3234 : * aggregate functions. We find these by looking at the first ORDER BY /
3235 : * DISTINCT aggregate and take the pathkeys for that before searching for
3236 : * other aggregates that require the same or a more strict variation of the
3237 : * same pathkeys. We then repeat that process for any remaining aggregates
3238 : * with different pathkeys and if we find another set of pathkeys that suits a
3239 : * larger number of aggregates then we select those pathkeys instead.
3240 : *
3241 : * When the best pathkeys are found we also mark each Aggref that can use
3242 : * those pathkeys as aggpresorted = true.
3243 : *
3244 : * Note: When an aggregate function's ORDER BY / DISTINCT clause contains any
3245 : * volatile functions, we never make use of these pathkeys. We want to ensure
3246 : * that sorts using volatile functions are done independently in each Aggref
3247 : * rather than once at the query level. If we were to allow this then Aggrefs
3248 : * with compatible sort orders would all transition their rows in the same
3249 : * order if those pathkeys were deemed to be the best pathkeys to sort on.
3250 : * Whereas, if some other set of Aggref's pathkeys happened to be deemed
3251 : * better pathkeys to sort on, then the volatile function Aggrefs would be
3252 : * left to perform their sorts individually. To avoid this inconsistent
3253 : * behavior which could make Aggref results depend on what other Aggrefs the
3254 : * query contains, we always force Aggrefs with volatile functions to perform
3255 : * their own sorts.
3256 : */
3257 : static void
3258 2440 : adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
3259 : {
3260 2440 : List *grouppathkeys = root->group_pathkeys;
3261 : List *bestpathkeys;
3262 : Bitmapset *bestaggs;
3263 : Bitmapset *unprocessed_aggs;
3264 : ListCell *lc;
3265 : int i;
3266 :
3267 : /* Shouldn't be here if there are grouping sets */
3268 : Assert(root->parse->groupingSets == NIL);
3269 : /* Shouldn't be here unless there are some ordered aggregates */
3270 : Assert(root->numOrderedAggs > 0);
3271 :
3272 : /* Do nothing if disabled */
3273 2440 : if (!enable_presorted_aggregate)
3274 6 : return;
3275 :
3276 : /*
3277 : * Make a first pass over all AggInfos to collect a Bitmapset containing
3278 : * the indexes of all AggInfos to be processed below.
3279 : */
3280 2434 : unprocessed_aggs = NULL;
3281 5552 : foreach(lc, root->agginfos)
3282 : {
3283 3118 : AggInfo *agginfo = lfirst_node(AggInfo, lc);
3284 3118 : Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3285 :
3286 3118 : if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3287 264 : continue;
3288 :
3289 : /* Skip unless there's a DISTINCT or ORDER BY clause */
3290 2854 : if (aggref->aggdistinct == NIL && aggref->aggorder == NIL)
3291 300 : continue;
3292 :
3293 : /* Additional safety checks are needed if there's a FILTER clause */
3294 2554 : if (aggref->aggfilter != NULL)
3295 : {
3296 : ListCell *lc2;
3297 54 : bool allow_presort = true;
3298 :
3299 : /*
3300 : * When the Aggref has a FILTER clause, it's possible that the
3301 : * filter removes rows that cannot be sorted because the
3302 : * expression to sort by results in an error during its
3303 : * evaluation. This is a problem for presorting as that happens
3304 : * before the FILTER, whereas without presorting, the Aggregate
3305 : * node will apply the FILTER *before* sorting. So that we never
3306 : * try to sort anything that might error, here we aim to skip over
3307 : * any Aggrefs with arguments with expressions which, when
3308 : * evaluated, could cause an ERROR. Vars and Consts are ok. There
3309 : * may be more cases that should be allowed, but more thought
3310 : * needs to be given. Err on the side of caution.
3311 : */
3312 102 : foreach(lc2, aggref->args)
3313 : {
3314 72 : TargetEntry *tle = (TargetEntry *) lfirst(lc2);
3315 72 : Expr *expr = tle->expr;
3316 :
3317 84 : while (IsA(expr, RelabelType))
3318 12 : expr = (Expr *) (castNode(RelabelType, expr))->arg;
3319 :
3320 : /* Common case, Vars and Consts are ok */
3321 72 : if (IsA(expr, Var) || IsA(expr, Const))
3322 48 : continue;
3323 :
3324 : /* Unsupported. Don't try to presort for this Aggref */
3325 24 : allow_presort = false;
3326 24 : break;
3327 : }
3328 :
3329 : /* Skip unsupported Aggrefs */
3330 54 : if (!allow_presort)
3331 24 : continue;
3332 : }
3333 :
3334 2530 : unprocessed_aggs = bms_add_member(unprocessed_aggs,
3335 : foreach_current_index(lc));
3336 : }
3337 :
3338 : /*
3339 : * Now process all the unprocessed_aggs to find the best set of pathkeys
3340 : * for the given set of aggregates.
3341 : *
3342 : * On the first outer loop here 'bestaggs' will be empty. We'll populate
3343 : * this during the first loop using the pathkeys for the very first
3344 : * AggInfo then taking any stronger pathkeys from any other AggInfos with
3345 : * a more strict set of compatible pathkeys. Once the outer loop is
3346 : * complete, we mark off all the aggregates with compatible pathkeys then
3347 : * remove those from the unprocessed_aggs and repeat the process to try to
3348 : * find another set of pathkeys that are suitable for a larger number of
3349 : * aggregates. The outer loop will stop when there are not enough
3350 : * unprocessed aggregates for it to be possible to find a set of pathkeys
3351 : * to suit a larger number of aggregates.
3352 : */
3353 2434 : bestpathkeys = NIL;
3354 2434 : bestaggs = NULL;
3355 4802 : while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3356 : {
3357 2368 : Bitmapset *aggindexes = NULL;
3358 2368 : List *currpathkeys = NIL;
3359 :
3360 2368 : i = -1;
3361 5204 : while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3362 : {
3363 2836 : AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3364 2836 : Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3365 : List *sortlist;
3366 : List *pathkeys;
3367 :
3368 2836 : if (aggref->aggdistinct != NIL)
3369 724 : sortlist = aggref->aggdistinct;
3370 : else
3371 2112 : sortlist = aggref->aggorder;
3372 :
3373 2836 : pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3374 : aggref->args);
3375 :
3376 : /*
3377 : * Ignore Aggrefs which have volatile functions in their ORDER BY
3378 : * or DISTINCT clause.
3379 : */
3380 2836 : if (has_volatile_pathkey(pathkeys))
3381 : {
3382 18 : unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3383 18 : continue;
3384 : }
3385 :
3386 : /*
3387 : * When not set yet, take the pathkeys from the first unprocessed
3388 : * aggregate.
3389 : */
3390 2818 : if (currpathkeys == NIL)
3391 : {
3392 2362 : currpathkeys = pathkeys;
3393 :
3394 : /* include the GROUP BY pathkeys, if they exist */
3395 2362 : if (grouppathkeys != NIL)
3396 276 : currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3397 : currpathkeys);
3398 :
3399 : /* record that we found pathkeys for this aggregate */
3400 2362 : aggindexes = bms_add_member(aggindexes, i);
3401 : }
3402 : else
3403 : {
3404 : /* now look for a stronger set of matching pathkeys */
3405 :
3406 : /* include the GROUP BY pathkeys, if they exist */
3407 456 : if (grouppathkeys != NIL)
3408 288 : pathkeys = append_pathkeys(list_copy(grouppathkeys),
3409 : pathkeys);
3410 :
3411 : /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3412 456 : switch (compare_pathkeys(currpathkeys, pathkeys))
3413 : {
3414 12 : case PATHKEYS_BETTER2:
3415 : /* 'pathkeys' are stronger, use these ones instead */
3416 12 : currpathkeys = pathkeys;
3417 : /* FALLTHROUGH */
3418 :
3419 66 : case PATHKEYS_BETTER1:
3420 : /* 'pathkeys' are less strict */
3421 : /* FALLTHROUGH */
3422 :
3423 : case PATHKEYS_EQUAL:
3424 : /* mark this aggregate as covered by 'currpathkeys' */
3425 66 : aggindexes = bms_add_member(aggindexes, i);
3426 66 : break;
3427 :
3428 390 : case PATHKEYS_DIFFERENT:
3429 390 : break;
3430 : }
3431 : }
3432 : }
3433 :
3434 : /* remove the aggregates that we've just processed */
3435 2368 : unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3436 :
3437 : /*
3438 : * If this pass included more aggregates than the previous best then
3439 : * use these ones as the best set.
3440 : */
3441 2368 : if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3442 : {
3443 2260 : bestaggs = aggindexes;
3444 2260 : bestpathkeys = currpathkeys;
3445 : }
3446 : }
3447 :
3448 : /*
3449 : * If we found any ordered aggregates, update root->group_pathkeys to add
3450 : * the best set of aggregate pathkeys. Note that bestpathkeys includes
3451 : * the original GROUP BY pathkeys already.
3452 : */
3453 2434 : if (bestpathkeys != NIL)
3454 2200 : root->group_pathkeys = bestpathkeys;
3455 :
3456 : /*
3457 : * Now that we've found the best set of aggregates we can set the
3458 : * presorted flag to indicate to the executor that it needn't bother
3459 : * performing a sort for these Aggrefs. We're able to do this now as
3460 : * there's no chance of a Hash Aggregate plan as create_grouping_paths
3461 : * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3462 : * of ordered aggregates.
3463 : */
3464 2434 : i = -1;
3465 4730 : while ((i = bms_next_member(bestaggs, i)) >= 0)
3466 : {
3467 2296 : AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3468 :
3469 4610 : foreach(lc, agginfo->aggrefs)
3470 : {
3471 2314 : Aggref *aggref = lfirst_node(Aggref, lc);
3472 :
3473 2314 : aggref->aggpresorted = true;
3474 : }
3475 : }
3476 : }
3477 :
3478 : /*
3479 : * Compute query_pathkeys and other pathkeys during plan generation
3480 : */
3481 : static void
3482 533782 : standard_qp_callback(PlannerInfo *root, void *extra)
3483 : {
3484 533782 : Query *parse = root->parse;
3485 533782 : standard_qp_extra *qp_extra = (standard_qp_extra *) extra;
3486 533782 : List *tlist = root->processed_tlist;
3487 533782 : List *activeWindows = qp_extra->activeWindows;
3488 :
3489 : /*
3490 : * Calculate pathkeys that represent grouping/ordering and/or ordered
3491 : * aggregate requirements.
3492 : */
3493 533782 : if (qp_extra->gset_data)
3494 : {
3495 : /*
3496 : * With grouping sets, just use the first RollupData's groupClause. We
3497 : * don't make any effort to optimize grouping clauses when there are
3498 : * grouping sets, nor can we combine aggregate ordering keys with
3499 : * grouping.
3500 : */
3501 974 : List *rollups = qp_extra->gset_data->rollups;
3502 974 : List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3503 :
3504 974 : if (grouping_is_sortable(groupClause))
3505 : {
3506 : bool sortable;
3507 :
3508 : /*
3509 : * The groupClause is logically below the grouping step. So if
3510 : * there is an RTE entry for the grouping step, we need to remove
3511 : * its RT index from the sort expressions before we make PathKeys
3512 : * for them.
3513 : */
3514 974 : root->group_pathkeys =
3515 974 : make_pathkeys_for_sortclauses_extended(root,
3516 : &groupClause,
3517 : tlist,
3518 : false,
3519 974 : parse->hasGroupRTE,
3520 : &sortable,
3521 : false);
3522 : Assert(sortable);
3523 974 : root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3524 : }
3525 : else
3526 : {
3527 0 : root->group_pathkeys = NIL;
3528 0 : root->num_groupby_pathkeys = 0;
3529 : }
3530 : }
3531 532808 : else if (parse->groupClause || root->numOrderedAggs > 0)
3532 6224 : {
3533 : /*
3534 : * With a plain GROUP BY list, we can remove any grouping items that
3535 : * are proven redundant by EquivalenceClass processing. For example,
3536 : * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3537 : * especially common cases, but they're nearly free to detect. Note
3538 : * that we remove redundant items from processed_groupClause but not
3539 : * the original parse->groupClause.
3540 : */
3541 : bool sortable;
3542 :
3543 : /*
3544 : * Convert group clauses into pathkeys. Set the ec_sortref field of
3545 : * EquivalenceClass'es if it's not set yet.
3546 : */
3547 6224 : root->group_pathkeys =
3548 6224 : make_pathkeys_for_sortclauses_extended(root,
3549 : &root->processed_groupClause,
3550 : tlist,
3551 : true,
3552 : false,
3553 : &sortable,
3554 : true);
3555 6224 : if (!sortable)
3556 : {
3557 : /* Can't sort; no point in considering aggregate ordering either */
3558 0 : root->group_pathkeys = NIL;
3559 0 : root->num_groupby_pathkeys = 0;
3560 : }
3561 : else
3562 : {
3563 6224 : root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3564 : /* If we have ordered aggs, consider adding onto group_pathkeys */
3565 6224 : if (root->numOrderedAggs > 0)
3566 2440 : adjust_group_pathkeys_for_groupagg(root);
3567 : }
3568 : }
3569 : else
3570 : {
3571 526584 : root->group_pathkeys = NIL;
3572 526584 : root->num_groupby_pathkeys = 0;
3573 : }
3574 :
3575 : /* We consider only the first (bottom) window in pathkeys logic */
3576 533782 : if (activeWindows != NIL)
3577 : {
3578 2576 : WindowClause *wc = linitial_node(WindowClause, activeWindows);
3579 :
3580 2576 : root->window_pathkeys = make_pathkeys_for_window(root,
3581 : wc,
3582 : tlist);
3583 : }
3584 : else
3585 531206 : root->window_pathkeys = NIL;
3586 :
3587 : /*
3588 : * As with GROUP BY, we can discard any DISTINCT items that are proven
3589 : * redundant by EquivalenceClass processing. The non-redundant list is
3590 : * kept in root->processed_distinctClause, leaving the original
3591 : * parse->distinctClause alone.
3592 : */
3593 533782 : if (parse->distinctClause)
3594 : {
3595 : bool sortable;
3596 :
3597 : /* Make a copy since pathkey processing can modify the list */
3598 3004 : root->processed_distinctClause = list_copy(parse->distinctClause);
3599 3004 : root->distinct_pathkeys =
3600 3004 : make_pathkeys_for_sortclauses_extended(root,
3601 : &root->processed_distinctClause,
3602 : tlist,
3603 : true,
3604 : false,
3605 : &sortable,
3606 : false);
3607 3004 : if (!sortable)
3608 6 : root->distinct_pathkeys = NIL;
3609 : }
3610 : else
3611 530778 : root->distinct_pathkeys = NIL;
3612 :
3613 533782 : root->sort_pathkeys =
3614 533782 : make_pathkeys_for_sortclauses(root,
3615 : parse->sortClause,
3616 : tlist);
3617 :
3618 : /* setting setop_pathkeys might be useful to the union planner */
3619 533782 : if (qp_extra->setop != NULL)
3620 : {
3621 : List *groupClauses;
3622 : bool sortable;
3623 :
3624 12762 : groupClauses = generate_setop_child_grouplist(qp_extra->setop, tlist);
3625 :
3626 12762 : root->setop_pathkeys =
3627 12762 : make_pathkeys_for_sortclauses_extended(root,
3628 : &groupClauses,
3629 : tlist,
3630 : false,
3631 : false,
3632 : &sortable,
3633 : false);
3634 12762 : if (!sortable)
3635 208 : root->setop_pathkeys = NIL;
3636 : }
3637 : else
3638 521020 : root->setop_pathkeys = NIL;
3639 :
3640 : /*
3641 : * Figure out whether we want a sorted result from query_planner.
3642 : *
3643 : * If we have a sortable GROUP BY clause, then we want a result sorted
3644 : * properly for grouping. Otherwise, if we have window functions to
3645 : * evaluate, we try to sort for the first window. Otherwise, if there's a
3646 : * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3647 : * we try to produce output that's sufficiently well sorted for the
3648 : * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3649 : * by the ORDER BY clause. Otherwise, if we're a subquery being planned
3650 : * for a set operation which can benefit from presorted results and have a
3651 : * sortable targetlist, we want to sort by the target list.
3652 : *
3653 : * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3654 : * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3655 : * that might just leave us failing to exploit an available sort order at
3656 : * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3657 : * much easier, since we know that the parser ensured that one is a
3658 : * superset of the other.
3659 : */
3660 533782 : if (root->group_pathkeys)
3661 6806 : root->query_pathkeys = root->group_pathkeys;
3662 526976 : else if (root->window_pathkeys)
3663 2110 : root->query_pathkeys = root->window_pathkeys;
3664 1049732 : else if (list_length(root->distinct_pathkeys) >
3665 524866 : list_length(root->sort_pathkeys))
3666 2520 : root->query_pathkeys = root->distinct_pathkeys;
3667 522346 : else if (root->sort_pathkeys)
3668 68882 : root->query_pathkeys = root->sort_pathkeys;
3669 453464 : else if (root->setop_pathkeys != NIL)
3670 11354 : root->query_pathkeys = root->setop_pathkeys;
3671 : else
3672 442110 : root->query_pathkeys = NIL;
3673 533782 : }
3674 :
3675 : /*
3676 : * Estimate number of groups produced by grouping clauses (1 if not grouping)
3677 : *
3678 : * path_rows: number of output rows from scan/join step
3679 : * gd: grouping sets data including list of grouping sets and their clauses
3680 : * target_list: target list containing group clause references
3681 : *
3682 : * If doing grouping sets, we also annotate the gsets data with the estimates
3683 : * for each set and each individual rollup list, with a view to later
3684 : * determining whether some combination of them could be hashed instead.
3685 : */
3686 : static double
3687 52438 : get_number_of_groups(PlannerInfo *root,
3688 : double path_rows,
3689 : grouping_sets_data *gd,
3690 : List *target_list)
3691 : {
3692 52438 : Query *parse = root->parse;
3693 : double dNumGroups;
3694 :
3695 52438 : if (parse->groupClause)
3696 : {
3697 : List *groupExprs;
3698 :
3699 10250 : if (parse->groupingSets)
3700 : {
3701 : /* Add up the estimates for each grouping set */
3702 : ListCell *lc;
3703 :
3704 : Assert(gd); /* keep Coverity happy */
3705 :
3706 896 : dNumGroups = 0;
3707 :
3708 2382 : foreach(lc, gd->rollups)
3709 : {
3710 1486 : RollupData *rollup = lfirst_node(RollupData, lc);
3711 : ListCell *lc2;
3712 : ListCell *lc3;
3713 :
3714 1486 : groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3715 : target_list);
3716 :
3717 1486 : rollup->numGroups = 0.0;
3718 :
3719 4286 : forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3720 : {
3721 2800 : List *gset = (List *) lfirst(lc2);
3722 2800 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc3);
3723 2800 : double numGroups = estimate_num_groups(root,
3724 : groupExprs,
3725 : path_rows,
3726 : &gset,
3727 : NULL);
3728 :
3729 2800 : gs->numGroups = numGroups;
3730 2800 : rollup->numGroups += numGroups;
3731 : }
3732 :
3733 1486 : dNumGroups += rollup->numGroups;
3734 : }
3735 :
3736 896 : if (gd->hash_sets_idx)
3737 : {
3738 : ListCell *lc2;
3739 :
3740 36 : gd->dNumHashGroups = 0;
3741 :
3742 36 : groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3743 : target_list);
3744 :
3745 78 : forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3746 : {
3747 42 : List *gset = (List *) lfirst(lc);
3748 42 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
3749 42 : double numGroups = estimate_num_groups(root,
3750 : groupExprs,
3751 : path_rows,
3752 : &gset,
3753 : NULL);
3754 :
3755 42 : gs->numGroups = numGroups;
3756 42 : gd->dNumHashGroups += numGroups;
3757 : }
3758 :
3759 36 : dNumGroups += gd->dNumHashGroups;
3760 : }
3761 : }
3762 : else
3763 : {
3764 : /* Plain GROUP BY -- estimate based on optimized groupClause */
3765 9354 : groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3766 : target_list);
3767 :
3768 9354 : dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3769 : NULL, NULL);
3770 : }
3771 : }
3772 42188 : else if (parse->groupingSets)
3773 : {
3774 : /* Empty grouping sets ... one result row for each one */
3775 60 : dNumGroups = list_length(parse->groupingSets);
3776 : }
3777 42128 : else if (parse->hasAggs || root->hasHavingQual)
3778 : {
3779 : /* Plain aggregation, one result row */
3780 42128 : dNumGroups = 1;
3781 : }
3782 : else
3783 : {
3784 : /* Not grouping */
3785 0 : dNumGroups = 1;
3786 : }
3787 :
3788 52438 : return dNumGroups;
3789 : }
3790 :
3791 : /*
3792 : * create_grouping_paths
3793 : *
3794 : * Build a new upperrel containing Paths for grouping and/or aggregation.
3795 : * Along the way, we also build an upperrel for Paths which are partially
3796 : * grouped and/or aggregated. A partially grouped and/or aggregated path
3797 : * needs a FinalizeAggregate node to complete the aggregation. Currently,
3798 : * the only partially grouped paths we build are also partial paths; that
3799 : * is, they need a Gather and then a FinalizeAggregate.
3800 : *
3801 : * input_rel: contains the source-data Paths
3802 : * target: the pathtarget for the result Paths to compute
3803 : * gd: grouping sets data including list of grouping sets and their clauses
3804 : *
3805 : * Note: all Paths in input_rel are expected to return the target computed
3806 : * by make_group_input_target.
3807 : */
3808 : static RelOptInfo *
3809 45398 : create_grouping_paths(PlannerInfo *root,
3810 : RelOptInfo *input_rel,
3811 : PathTarget *target,
3812 : bool target_parallel_safe,
3813 : grouping_sets_data *gd)
3814 : {
3815 45398 : Query *parse = root->parse;
3816 : RelOptInfo *grouped_rel;
3817 : RelOptInfo *partially_grouped_rel;
3818 : AggClauseCosts agg_costs;
3819 :
3820 272388 : MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3821 45398 : get_agg_clause_costs(root, AGGSPLIT_SIMPLE, &agg_costs);
3822 :
3823 : /*
3824 : * Create grouping relation to hold fully aggregated grouping and/or
3825 : * aggregation paths.
3826 : */
3827 45398 : grouped_rel = make_grouping_rel(root, input_rel, target,
3828 : target_parallel_safe, parse->havingQual);
3829 :
3830 : /*
3831 : * Create either paths for a degenerate grouping or paths for ordinary
3832 : * grouping, as appropriate.
3833 : */
3834 45398 : if (is_degenerate_grouping(root))
3835 42 : create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3836 : else
3837 : {
3838 45356 : int flags = 0;
3839 : GroupPathExtraData extra;
3840 :
3841 : /*
3842 : * Determine whether it's possible to perform sort-based
3843 : * implementations of grouping. (Note that if processed_groupClause
3844 : * is empty, grouping_is_sortable() is trivially true, and all the
3845 : * pathkeys_contained_in() tests will succeed too, so that we'll
3846 : * consider every surviving input path.)
3847 : *
3848 : * If we have grouping sets, we might be able to sort some but not all
3849 : * of them; in this case, we need can_sort to be true as long as we
3850 : * must consider any sorted-input plan.
3851 : */
3852 45356 : if ((gd && gd->rollups != NIL)
3853 44406 : || grouping_is_sortable(root->processed_groupClause))
3854 45350 : flags |= GROUPING_CAN_USE_SORT;
3855 :
3856 : /*
3857 : * Determine whether we should consider hash-based implementations of
3858 : * grouping.
3859 : *
3860 : * Hashed aggregation only applies if we're grouping. If we have
3861 : * grouping sets, some groups might be hashable but others not; in
3862 : * this case we set can_hash true as long as there is nothing globally
3863 : * preventing us from hashing (and we should therefore consider plans
3864 : * with hashes).
3865 : *
3866 : * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3867 : * BY aggregates. (Doing so would imply storing *all* the input
3868 : * values in the hash table, and/or running many sorts in parallel,
3869 : * either of which seems like a certain loser.) We similarly don't
3870 : * support ordered-set aggregates in hashed aggregation, but that case
3871 : * is also included in the numOrderedAggs count.
3872 : *
3873 : * Note: grouping_is_hashable() is much more expensive to check than
3874 : * the other gating conditions, so we want to do it last.
3875 : */
3876 45356 : if ((parse->groupClause != NIL &&
3877 9568 : root->numOrderedAggs == 0 &&
3878 4644 : (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3879 4640 : flags |= GROUPING_CAN_USE_HASH;
3880 :
3881 : /*
3882 : * Determine whether partial aggregation is possible.
3883 : */
3884 45356 : if (can_partial_agg(root))
3885 40504 : flags |= GROUPING_CAN_PARTIAL_AGG;
3886 :
3887 45356 : extra.flags = flags;
3888 45356 : extra.target_parallel_safe = target_parallel_safe;
3889 45356 : extra.havingQual = parse->havingQual;
3890 45356 : extra.targetList = parse->targetList;
3891 45356 : extra.partial_costs_set = false;
3892 :
3893 : /*
3894 : * Determine whether partitionwise aggregation is in theory possible.
3895 : * It can be disabled by the user, and for now, we don't try to
3896 : * support grouping sets. create_ordinary_grouping_paths() will check
3897 : * additional conditions, such as whether input_rel is partitioned.
3898 : */
3899 45356 : if (enable_partitionwise_aggregate && !parse->groupingSets)
3900 700 : extra.patype = PARTITIONWISE_AGGREGATE_FULL;
3901 : else
3902 44656 : extra.patype = PARTITIONWISE_AGGREGATE_NONE;
3903 :
3904 45356 : create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3905 : &agg_costs, gd, &extra,
3906 : &partially_grouped_rel);
3907 : }
3908 :
3909 45392 : set_cheapest(grouped_rel);
3910 45392 : return grouped_rel;
3911 : }
3912 :
3913 : /*
3914 : * make_grouping_rel
3915 : *
3916 : * Create a new grouping rel and set basic properties.
3917 : *
3918 : * input_rel represents the underlying scan/join relation.
3919 : * target is the output expected from the grouping relation.
3920 : */
3921 : static RelOptInfo *
3922 47564 : make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
3923 : PathTarget *target, bool target_parallel_safe,
3924 : Node *havingQual)
3925 : {
3926 : RelOptInfo *grouped_rel;
3927 :
3928 47564 : if (IS_OTHER_REL(input_rel))
3929 : {
3930 2166 : grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
3931 : input_rel->relids);
3932 2166 : grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3933 : }
3934 : else
3935 : {
3936 : /*
3937 : * By tradition, the relids set for the main grouping relation is
3938 : * NULL. (This could be changed, but might require adjustments
3939 : * elsewhere.)
3940 : */
3941 45398 : grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3942 : }
3943 :
3944 : /* Set target. */
3945 47564 : grouped_rel->reltarget = target;
3946 :
3947 : /*
3948 : * If the input relation is not parallel-safe, then the grouped relation
3949 : * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3950 : * target list and HAVING quals are parallel-safe.
3951 : */
3952 76900 : if (input_rel->consider_parallel && target_parallel_safe &&
3953 29336 : is_parallel_safe(root, havingQual))
3954 29312 : grouped_rel->consider_parallel = true;
3955 :
3956 : /*
3957 : * If the input rel belongs to a single FDW, so does the grouped rel.
3958 : */
3959 47564 : grouped_rel->serverid = input_rel->serverid;
3960 47564 : grouped_rel->userid = input_rel->userid;
3961 47564 : grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3962 47564 : grouped_rel->fdwroutine = input_rel->fdwroutine;
3963 :
3964 47564 : return grouped_rel;
3965 : }
3966 :
3967 : /*
3968 : * is_degenerate_grouping
3969 : *
3970 : * A degenerate grouping is one in which the query has a HAVING qual and/or
3971 : * grouping sets, but no aggregates and no GROUP BY (which implies that the
3972 : * grouping sets are all empty).
3973 : */
3974 : static bool
3975 45398 : is_degenerate_grouping(PlannerInfo *root)
3976 : {
3977 45398 : Query *parse = root->parse;
3978 :
3979 44166 : return (root->hasHavingQual || parse->groupingSets) &&
3980 89564 : !parse->hasAggs && parse->groupClause == NIL;
3981 : }
3982 :
3983 : /*
3984 : * create_degenerate_grouping_paths
3985 : *
3986 : * When the grouping is degenerate (see is_degenerate_grouping), we are
3987 : * supposed to emit either zero or one row for each grouping set depending on
3988 : * whether HAVING succeeds. Furthermore, there cannot be any variables in
3989 : * either HAVING or the targetlist, so we actually do not need the FROM table
3990 : * at all! We can just throw away the plan-so-far and generate a Result node.
3991 : * This is a sufficiently unusual corner case that it's not worth contorting
3992 : * the structure of this module to avoid having to generate the earlier paths
3993 : * in the first place.
3994 : */
3995 : static void
3996 42 : create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
3997 : RelOptInfo *grouped_rel)
3998 : {
3999 42 : Query *parse = root->parse;
4000 : int nrows;
4001 : Path *path;
4002 :
4003 42 : nrows = list_length(parse->groupingSets);
4004 42 : if (nrows > 1)
4005 : {
4006 : /*
4007 : * Doesn't seem worthwhile writing code to cons up a generate_series
4008 : * or a values scan to emit multiple rows. Instead just make N clones
4009 : * and append them. (With a volatile HAVING clause, this means you
4010 : * might get between 0 and N output rows. Offhand I think that's
4011 : * desired.)
4012 : */
4013 12 : List *paths = NIL;
4014 :
4015 36 : while (--nrows >= 0)
4016 : {
4017 : path = (Path *)
4018 24 : create_group_result_path(root, grouped_rel,
4019 24 : grouped_rel->reltarget,
4020 24 : (List *) parse->havingQual);
4021 24 : paths = lappend(paths, path);
4022 : }
4023 : path = (Path *)
4024 12 : create_append_path(root,
4025 : grouped_rel,
4026 : paths,
4027 : NIL,
4028 : NIL,
4029 : NULL,
4030 : 0,
4031 : false,
4032 : -1);
4033 : }
4034 : else
4035 : {
4036 : /* No grouping sets, or just one, so one output row */
4037 : path = (Path *)
4038 30 : create_group_result_path(root, grouped_rel,
4039 30 : grouped_rel->reltarget,
4040 30 : (List *) parse->havingQual);
4041 : }
4042 :
4043 42 : add_path(grouped_rel, path);
4044 42 : }
4045 :
4046 : /*
4047 : * create_ordinary_grouping_paths
4048 : *
4049 : * Create grouping paths for the ordinary (that is, non-degenerate) case.
4050 : *
4051 : * We need to consider sorted and hashed aggregation in the same function,
4052 : * because otherwise (1) it would be harder to throw an appropriate error
4053 : * message if neither way works, and (2) we should not allow hashtable size
4054 : * considerations to dissuade us from using hashing if sorting is not possible.
4055 : *
4056 : * *partially_grouped_rel_p will be set to the partially grouped rel which this
4057 : * function creates, or to NULL if it doesn't create one.
4058 : */
4059 : static void
4060 47522 : create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
4061 : RelOptInfo *grouped_rel,
4062 : const AggClauseCosts *agg_costs,
4063 : grouping_sets_data *gd,
4064 : GroupPathExtraData *extra,
4065 : RelOptInfo **partially_grouped_rel_p)
4066 : {
4067 47522 : RelOptInfo *partially_grouped_rel = NULL;
4068 47522 : PartitionwiseAggregateType patype = PARTITIONWISE_AGGREGATE_NONE;
4069 :
4070 : /*
4071 : * If this is the topmost grouping relation or if the parent relation is
4072 : * doing some form of partitionwise aggregation, then we may be able to do
4073 : * it at this level also. However, if the input relation is not
4074 : * partitioned, partitionwise aggregate is impossible.
4075 : */
4076 47522 : if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
4077 2866 : IS_PARTITIONED_REL(input_rel))
4078 : {
4079 : /*
4080 : * If this is the topmost relation or if the parent relation is doing
4081 : * full partitionwise aggregation, then we can do full partitionwise
4082 : * aggregation provided that the GROUP BY clause contains all of the
4083 : * partitioning columns at this level and the collation used by GROUP
4084 : * BY matches the partitioning collation. Otherwise, we can do at
4085 : * most partial partitionwise aggregation. But if partial aggregation
4086 : * is not supported in general then we can't use it for partitionwise
4087 : * aggregation either.
4088 : *
4089 : * Check parse->groupClause not processed_groupClause, because it's
4090 : * okay if some of the partitioning columns were proved redundant.
4091 : */
4092 1640 : if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
4093 772 : group_by_has_partkey(input_rel, extra->targetList,
4094 772 : root->parse->groupClause))
4095 488 : patype = PARTITIONWISE_AGGREGATE_FULL;
4096 380 : else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4097 338 : patype = PARTITIONWISE_AGGREGATE_PARTIAL;
4098 : else
4099 42 : patype = PARTITIONWISE_AGGREGATE_NONE;
4100 : }
4101 :
4102 : /*
4103 : * Before generating paths for grouped_rel, we first generate any possible
4104 : * partially grouped paths; that way, later code can easily consider both
4105 : * parallel and non-parallel approaches to grouping.
4106 : */
4107 47522 : if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4108 : {
4109 : bool force_rel_creation;
4110 :
4111 : /*
4112 : * If we're doing partitionwise aggregation at this level, force
4113 : * creation of a partially_grouped_rel so we can add partitionwise
4114 : * paths to it.
4115 : */
4116 42598 : force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
4117 :
4118 : partially_grouped_rel =
4119 42598 : create_partial_grouping_paths(root,
4120 : grouped_rel,
4121 : input_rel,
4122 : gd,
4123 : extra,
4124 : force_rel_creation);
4125 : }
4126 :
4127 : /* Set out parameter. */
4128 47522 : *partially_grouped_rel_p = partially_grouped_rel;
4129 :
4130 : /* Apply partitionwise aggregation technique, if possible. */
4131 47522 : if (patype != PARTITIONWISE_AGGREGATE_NONE)
4132 826 : create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
4133 : partially_grouped_rel, agg_costs,
4134 : gd, patype, extra);
4135 :
4136 : /* If we are doing partial aggregation only, return. */
4137 47522 : if (extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
4138 : {
4139 : Assert(partially_grouped_rel);
4140 :
4141 858 : if (partially_grouped_rel->pathlist)
4142 858 : set_cheapest(partially_grouped_rel);
4143 :
4144 858 : return;
4145 : }
4146 :
4147 : /* Gather any partially grouped partial paths. */
4148 46664 : if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4149 2092 : gather_grouping_paths(root, partially_grouped_rel);
4150 :
4151 : /* Now choose the best path(s) for partially_grouped_rel. */
4152 46664 : if (partially_grouped_rel && partially_grouped_rel->pathlist)
4153 2320 : set_cheapest(partially_grouped_rel);
4154 :
4155 : /* Build final grouping paths */
4156 46664 : add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4157 : partially_grouped_rel, agg_costs, gd,
4158 : extra);
4159 :
4160 : /* Give a helpful error if we failed to find any implementation */
4161 46664 : if (grouped_rel->pathlist == NIL)
4162 6 : ereport(ERROR,
4163 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4164 : errmsg("could not implement GROUP BY"),
4165 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4166 :
4167 : /*
4168 : * If there is an FDW that's responsible for all baserels of the query,
4169 : * let it consider adding ForeignPaths.
4170 : */
4171 46658 : if (grouped_rel->fdwroutine &&
4172 336 : grouped_rel->fdwroutine->GetForeignUpperPaths)
4173 336 : grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4174 : input_rel, grouped_rel,
4175 : extra);
4176 :
4177 : /* Let extensions possibly add some more paths */
4178 46658 : if (create_upper_paths_hook)
4179 0 : (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4180 : input_rel, grouped_rel,
4181 : extra);
4182 : }
4183 :
4184 : /*
4185 : * For a given input path, consider the possible ways of doing grouping sets on
4186 : * it, by combinations of hashing and sorting. This can be called multiple
4187 : * times, so it's important that it not scribble on input. No result is
4188 : * returned, but any generated paths are added to grouped_rel.
4189 : */
4190 : static void
4191 1882 : consider_groupingsets_paths(PlannerInfo *root,
4192 : RelOptInfo *grouped_rel,
4193 : Path *path,
4194 : bool is_sorted,
4195 : bool can_hash,
4196 : grouping_sets_data *gd,
4197 : const AggClauseCosts *agg_costs,
4198 : double dNumGroups)
4199 : {
4200 1882 : Query *parse = root->parse;
4201 1882 : Size hash_mem_limit = get_hash_memory_limit();
4202 :
4203 : /*
4204 : * If we're not being offered sorted input, then only consider plans that
4205 : * can be done entirely by hashing.
4206 : *
4207 : * We can hash everything if it looks like it'll fit in hash_mem. But if
4208 : * the input is actually sorted despite not being advertised as such, we
4209 : * prefer to make use of that in order to use less memory.
4210 : *
4211 : * If none of the grouping sets are sortable, then ignore the hash_mem
4212 : * limit and generate a path anyway, since otherwise we'll just fail.
4213 : */
4214 1882 : if (!is_sorted)
4215 : {
4216 860 : List *new_rollups = NIL;
4217 860 : RollupData *unhashed_rollup = NULL;
4218 : List *sets_data;
4219 860 : List *empty_sets_data = NIL;
4220 860 : List *empty_sets = NIL;
4221 : ListCell *lc;
4222 860 : ListCell *l_start = list_head(gd->rollups);
4223 860 : AggStrategy strat = AGG_HASHED;
4224 : double hashsize;
4225 860 : double exclude_groups = 0.0;
4226 :
4227 : Assert(can_hash);
4228 :
4229 : /*
4230 : * If the input is coincidentally sorted usefully (which can happen
4231 : * even if is_sorted is false, since that only means that our caller
4232 : * has set up the sorting for us), then save some hashtable space by
4233 : * making use of that. But we need to watch out for degenerate cases:
4234 : *
4235 : * 1) If there are any empty grouping sets, then group_pathkeys might
4236 : * be NIL if all non-empty grouping sets are unsortable. In this case,
4237 : * there will be a rollup containing only empty groups, and the
4238 : * pathkeys_contained_in test is vacuously true; this is ok.
4239 : *
4240 : * XXX: the above relies on the fact that group_pathkeys is generated
4241 : * from the first rollup. If we add the ability to consider multiple
4242 : * sort orders for grouping input, this assumption might fail.
4243 : *
4244 : * 2) If there are no empty sets and only unsortable sets, then the
4245 : * rollups list will be empty (and thus l_start == NULL), and
4246 : * group_pathkeys will be NIL; we must ensure that the vacuously-true
4247 : * pathkeys_contained_in test doesn't cause us to crash.
4248 : */
4249 1714 : if (l_start != NULL &&
4250 854 : pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4251 : {
4252 12 : unhashed_rollup = lfirst_node(RollupData, l_start);
4253 12 : exclude_groups = unhashed_rollup->numGroups;
4254 12 : l_start = lnext(gd->rollups, l_start);
4255 : }
4256 :
4257 860 : hashsize = estimate_hashagg_tablesize(root,
4258 : path,
4259 : agg_costs,
4260 : dNumGroups - exclude_groups);
4261 :
4262 : /*
4263 : * gd->rollups is empty if we have only unsortable columns to work
4264 : * with. Override hash_mem in that case; otherwise, we'll rely on the
4265 : * sorted-input case to generate usable mixed paths.
4266 : */
4267 860 : if (hashsize > hash_mem_limit && gd->rollups)
4268 18 : return; /* nope, won't fit */
4269 :
4270 : /*
4271 : * We need to burst the existing rollups list into individual grouping
4272 : * sets and recompute a groupClause for each set.
4273 : */
4274 842 : sets_data = list_copy(gd->unsortable_sets);
4275 :
4276 2136 : for_each_cell(lc, gd->rollups, l_start)
4277 : {
4278 1318 : RollupData *rollup = lfirst_node(RollupData, lc);
4279 :
4280 : /*
4281 : * If we find an unhashable rollup that's not been skipped by the
4282 : * "actually sorted" check above, we can't cope; we'd need sorted
4283 : * input (with a different sort order) but we can't get that here.
4284 : * So bail out; we'll get a valid path from the is_sorted case
4285 : * instead.
4286 : *
4287 : * The mere presence of empty grouping sets doesn't make a rollup
4288 : * unhashable (see preprocess_grouping_sets), we handle those
4289 : * specially below.
4290 : */
4291 1318 : if (!rollup->hashable)
4292 24 : return;
4293 :
4294 1294 : sets_data = list_concat(sets_data, rollup->gsets_data);
4295 : }
4296 3384 : foreach(lc, sets_data)
4297 : {
4298 2566 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
4299 2566 : List *gset = gs->set;
4300 : RollupData *rollup;
4301 :
4302 2566 : if (gset == NIL)
4303 : {
4304 : /* Empty grouping sets can't be hashed. */
4305 526 : empty_sets_data = lappend(empty_sets_data, gs);
4306 526 : empty_sets = lappend(empty_sets, NIL);
4307 : }
4308 : else
4309 : {
4310 2040 : rollup = makeNode(RollupData);
4311 :
4312 2040 : rollup->groupClause = preprocess_groupclause(root, gset);
4313 2040 : rollup->gsets_data = list_make1(gs);
4314 2040 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4315 : rollup->gsets_data,
4316 : gd->tleref_to_colnum_map);
4317 2040 : rollup->numGroups = gs->numGroups;
4318 2040 : rollup->hashable = true;
4319 2040 : rollup->is_hashed = true;
4320 2040 : new_rollups = lappend(new_rollups, rollup);
4321 : }
4322 : }
4323 :
4324 : /*
4325 : * If we didn't find anything nonempty to hash, then bail. We'll
4326 : * generate a path from the is_sorted case.
4327 : */
4328 818 : if (new_rollups == NIL)
4329 0 : return;
4330 :
4331 : /*
4332 : * If there were empty grouping sets they should have been in the
4333 : * first rollup.
4334 : */
4335 : Assert(!unhashed_rollup || !empty_sets);
4336 :
4337 818 : if (unhashed_rollup)
4338 : {
4339 12 : new_rollups = lappend(new_rollups, unhashed_rollup);
4340 12 : strat = AGG_MIXED;
4341 : }
4342 806 : else if (empty_sets)
4343 : {
4344 478 : RollupData *rollup = makeNode(RollupData);
4345 :
4346 478 : rollup->groupClause = NIL;
4347 478 : rollup->gsets_data = empty_sets_data;
4348 478 : rollup->gsets = empty_sets;
4349 478 : rollup->numGroups = list_length(empty_sets);
4350 478 : rollup->hashable = false;
4351 478 : rollup->is_hashed = false;
4352 478 : new_rollups = lappend(new_rollups, rollup);
4353 478 : strat = AGG_MIXED;
4354 : }
4355 :
4356 818 : add_path(grouped_rel, (Path *)
4357 818 : create_groupingsets_path(root,
4358 : grouped_rel,
4359 : path,
4360 818 : (List *) parse->havingQual,
4361 : strat,
4362 : new_rollups,
4363 : agg_costs));
4364 818 : return;
4365 : }
4366 :
4367 : /*
4368 : * If we have sorted input but nothing we can do with it, bail.
4369 : */
4370 1022 : if (gd->rollups == NIL)
4371 0 : return;
4372 :
4373 : /*
4374 : * Given sorted input, we try and make two paths: one sorted and one mixed
4375 : * sort/hash. (We need to try both because hashagg might be disabled, or
4376 : * some columns might not be sortable.)
4377 : *
4378 : * can_hash is passed in as false if some obstacle elsewhere (such as
4379 : * ordered aggs) means that we shouldn't consider hashing at all.
4380 : */
4381 1022 : if (can_hash && gd->any_hashable)
4382 : {
4383 926 : List *rollups = NIL;
4384 926 : List *hash_sets = list_copy(gd->unsortable_sets);
4385 926 : double availspace = hash_mem_limit;
4386 : ListCell *lc;
4387 :
4388 : /*
4389 : * Account first for space needed for groups we can't sort at all.
4390 : */
4391 926 : availspace -= estimate_hashagg_tablesize(root,
4392 : path,
4393 : agg_costs,
4394 : gd->dNumHashGroups);
4395 :
4396 926 : if (availspace > 0 && list_length(gd->rollups) > 1)
4397 : {
4398 : double scale;
4399 456 : int num_rollups = list_length(gd->rollups);
4400 : int k_capacity;
4401 456 : int *k_weights = palloc(num_rollups * sizeof(int));
4402 456 : Bitmapset *hash_items = NULL;
4403 : int i;
4404 :
4405 : /*
4406 : * We treat this as a knapsack problem: the knapsack capacity
4407 : * represents hash_mem, the item weights are the estimated memory
4408 : * usage of the hashtables needed to implement a single rollup,
4409 : * and we really ought to use the cost saving as the item value;
4410 : * however, currently the costs assigned to sort nodes don't
4411 : * reflect the comparison costs well, and so we treat all items as
4412 : * of equal value (each rollup we hash instead saves us one sort).
4413 : *
4414 : * To use the discrete knapsack, we need to scale the values to a
4415 : * reasonably small bounded range. We choose to allow a 5% error
4416 : * margin; we have no more than 4096 rollups in the worst possible
4417 : * case, which with a 5% error margin will require a bit over 42MB
4418 : * of workspace. (Anyone wanting to plan queries that complex had
4419 : * better have the memory for it. In more reasonable cases, with
4420 : * no more than a couple of dozen rollups, the memory usage will
4421 : * be negligible.)
4422 : *
4423 : * k_capacity is naturally bounded, but we clamp the values for
4424 : * scale and weight (below) to avoid overflows or underflows (or
4425 : * uselessly trying to use a scale factor less than 1 byte).
4426 : */
4427 456 : scale = Max(availspace / (20.0 * num_rollups), 1.0);
4428 456 : k_capacity = (int) floor(availspace / scale);
4429 :
4430 : /*
4431 : * We leave the first rollup out of consideration since it's the
4432 : * one that matches the input sort order. We assign indexes "i"
4433 : * to only those entries considered for hashing; the second loop,
4434 : * below, must use the same condition.
4435 : */
4436 456 : i = 0;
4437 1176 : for_each_from(lc, gd->rollups, 1)
4438 : {
4439 720 : RollupData *rollup = lfirst_node(RollupData, lc);
4440 :
4441 720 : if (rollup->hashable)
4442 : {
4443 720 : double sz = estimate_hashagg_tablesize(root,
4444 : path,
4445 : agg_costs,
4446 : rollup->numGroups);
4447 :
4448 : /*
4449 : * If sz is enormous, but hash_mem (and hence scale) is
4450 : * small, avoid integer overflow here.
4451 : */
4452 720 : k_weights[i] = (int) Min(floor(sz / scale),
4453 : k_capacity + 1.0);
4454 720 : ++i;
4455 : }
4456 : }
4457 :
4458 : /*
4459 : * Apply knapsack algorithm; compute the set of items which
4460 : * maximizes the value stored (in this case the number of sorts
4461 : * saved) while keeping the total size (approximately) within
4462 : * capacity.
4463 : */
4464 456 : if (i > 0)
4465 456 : hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4466 :
4467 456 : if (!bms_is_empty(hash_items))
4468 : {
4469 456 : rollups = list_make1(linitial(gd->rollups));
4470 :
4471 456 : i = 0;
4472 1176 : for_each_from(lc, gd->rollups, 1)
4473 : {
4474 720 : RollupData *rollup = lfirst_node(RollupData, lc);
4475 :
4476 720 : if (rollup->hashable)
4477 : {
4478 720 : if (bms_is_member(i, hash_items))
4479 684 : hash_sets = list_concat(hash_sets,
4480 684 : rollup->gsets_data);
4481 : else
4482 36 : rollups = lappend(rollups, rollup);
4483 720 : ++i;
4484 : }
4485 : else
4486 0 : rollups = lappend(rollups, rollup);
4487 : }
4488 : }
4489 : }
4490 :
4491 926 : if (!rollups && hash_sets)
4492 24 : rollups = list_copy(gd->rollups);
4493 :
4494 1750 : foreach(lc, hash_sets)
4495 : {
4496 824 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
4497 824 : RollupData *rollup = makeNode(RollupData);
4498 :
4499 : Assert(gs->set != NIL);
4500 :
4501 824 : rollup->groupClause = preprocess_groupclause(root, gs->set);
4502 824 : rollup->gsets_data = list_make1(gs);
4503 824 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4504 : rollup->gsets_data,
4505 : gd->tleref_to_colnum_map);
4506 824 : rollup->numGroups = gs->numGroups;
4507 824 : rollup->hashable = true;
4508 824 : rollup->is_hashed = true;
4509 824 : rollups = lcons(rollup, rollups);
4510 : }
4511 :
4512 926 : if (rollups)
4513 : {
4514 480 : add_path(grouped_rel, (Path *)
4515 480 : create_groupingsets_path(root,
4516 : grouped_rel,
4517 : path,
4518 480 : (List *) parse->havingQual,
4519 : AGG_MIXED,
4520 : rollups,
4521 : agg_costs));
4522 : }
4523 : }
4524 :
4525 : /*
4526 : * Now try the simple sorted case.
4527 : */
4528 1022 : if (!gd->unsortable_sets)
4529 992 : add_path(grouped_rel, (Path *)
4530 992 : create_groupingsets_path(root,
4531 : grouped_rel,
4532 : path,
4533 992 : (List *) parse->havingQual,
4534 : AGG_SORTED,
4535 : gd->rollups,
4536 : agg_costs));
4537 : }
4538 :
4539 : /*
4540 : * create_window_paths
4541 : *
4542 : * Build a new upperrel containing Paths for window-function evaluation.
4543 : *
4544 : * input_rel: contains the source-data Paths
4545 : * input_target: result of make_window_input_target
4546 : * output_target: what the topmost WindowAggPath should return
4547 : * wflists: result of find_window_functions
4548 : * activeWindows: result of select_active_windows
4549 : *
4550 : * Note: all Paths in input_rel are expected to return input_target.
4551 : */
4552 : static RelOptInfo *
4553 2576 : create_window_paths(PlannerInfo *root,
4554 : RelOptInfo *input_rel,
4555 : PathTarget *input_target,
4556 : PathTarget *output_target,
4557 : bool output_target_parallel_safe,
4558 : WindowFuncLists *wflists,
4559 : List *activeWindows)
4560 : {
4561 : RelOptInfo *window_rel;
4562 : ListCell *lc;
4563 :
4564 : /* For now, do all work in the (WINDOW, NULL) upperrel */
4565 2576 : window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4566 :
4567 : /*
4568 : * If the input relation is not parallel-safe, then the window relation
4569 : * can't be parallel-safe, either. Otherwise, we need to examine the
4570 : * target list and active windows for non-parallel-safe constructs.
4571 : */
4572 2576 : if (input_rel->consider_parallel && output_target_parallel_safe &&
4573 0 : is_parallel_safe(root, (Node *) activeWindows))
4574 0 : window_rel->consider_parallel = true;
4575 :
4576 : /*
4577 : * If the input rel belongs to a single FDW, so does the window rel.
4578 : */
4579 2576 : window_rel->serverid = input_rel->serverid;
4580 2576 : window_rel->userid = input_rel->userid;
4581 2576 : window_rel->useridiscurrent = input_rel->useridiscurrent;
4582 2576 : window_rel->fdwroutine = input_rel->fdwroutine;
4583 :
4584 : /*
4585 : * Consider computing window functions starting from the existing
4586 : * cheapest-total path (which will likely require a sort) as well as any
4587 : * existing paths that satisfy or partially satisfy root->window_pathkeys.
4588 : */
4589 5492 : foreach(lc, input_rel->pathlist)
4590 : {
4591 2916 : Path *path = (Path *) lfirst(lc);
4592 : int presorted_keys;
4593 :
4594 3256 : if (path == input_rel->cheapest_total_path ||
4595 340 : pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4596 152 : &presorted_keys) ||
4597 152 : presorted_keys > 0)
4598 2790 : create_one_window_path(root,
4599 : window_rel,
4600 : path,
4601 : input_target,
4602 : output_target,
4603 : wflists,
4604 : activeWindows);
4605 : }
4606 :
4607 : /*
4608 : * If there is an FDW that's responsible for all baserels of the query,
4609 : * let it consider adding ForeignPaths.
4610 : */
4611 2576 : if (window_rel->fdwroutine &&
4612 12 : window_rel->fdwroutine->GetForeignUpperPaths)
4613 12 : window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4614 : input_rel, window_rel,
4615 : NULL);
4616 :
4617 : /* Let extensions possibly add some more paths */
4618 2576 : if (create_upper_paths_hook)
4619 0 : (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4620 : input_rel, window_rel, NULL);
4621 :
4622 : /* Now choose the best path(s) */
4623 2576 : set_cheapest(window_rel);
4624 :
4625 2576 : return window_rel;
4626 : }
4627 :
4628 : /*
4629 : * Stack window-function implementation steps atop the given Path, and
4630 : * add the result to window_rel.
4631 : *
4632 : * window_rel: upperrel to contain result
4633 : * path: input Path to use (must return input_target)
4634 : * input_target: result of make_window_input_target
4635 : * output_target: what the topmost WindowAggPath should return
4636 : * wflists: result of find_window_functions
4637 : * activeWindows: result of select_active_windows
4638 : */
4639 : static void
4640 2790 : create_one_window_path(PlannerInfo *root,
4641 : RelOptInfo *window_rel,
4642 : Path *path,
4643 : PathTarget *input_target,
4644 : PathTarget *output_target,
4645 : WindowFuncLists *wflists,
4646 : List *activeWindows)
4647 : {
4648 : PathTarget *window_target;
4649 : ListCell *l;
4650 2790 : List *topqual = NIL;
4651 :
4652 : /*
4653 : * Since each window clause could require a different sort order, we stack
4654 : * up a WindowAgg node for each clause, with sort steps between them as
4655 : * needed. (We assume that select_active_windows chose a good order for
4656 : * executing the clauses in.)
4657 : *
4658 : * input_target should contain all Vars and Aggs needed for the result.
4659 : * (In some cases we wouldn't need to propagate all of these all the way
4660 : * to the top, since they might only be needed as inputs to WindowFuncs.
4661 : * It's probably not worth trying to optimize that though.) It must also
4662 : * contain all window partitioning and sorting expressions, to ensure
4663 : * they're computed only once at the bottom of the stack (that's critical
4664 : * for volatile functions). As we climb up the stack, we'll add outputs
4665 : * for the WindowFuncs computed at each level.
4666 : */
4667 2790 : window_target = input_target;
4668 :
4669 5766 : foreach(l, activeWindows)
4670 : {
4671 2976 : WindowClause *wc = lfirst_node(WindowClause, l);
4672 : List *window_pathkeys;
4673 2976 : List *runcondition = NIL;
4674 : int presorted_keys;
4675 : bool is_sorted;
4676 : bool topwindow;
4677 : ListCell *lc2;
4678 :
4679 2976 : window_pathkeys = make_pathkeys_for_window(root,
4680 : wc,
4681 : root->processed_tlist);
4682 :
4683 2976 : is_sorted = pathkeys_count_contained_in(window_pathkeys,
4684 : path->pathkeys,
4685 : &presorted_keys);
4686 :
4687 : /* Sort if necessary */
4688 2976 : if (!is_sorted)
4689 : {
4690 : /*
4691 : * No presorted keys or incremental sort disabled, just perform a
4692 : * complete sort.
4693 : */
4694 2184 : if (presorted_keys == 0 || !enable_incremental_sort)
4695 2122 : path = (Path *) create_sort_path(root, window_rel,
4696 : path,
4697 : window_pathkeys,
4698 : -1.0);
4699 : else
4700 : {
4701 : /*
4702 : * Since we have presorted keys and incremental sort is
4703 : * enabled, just use incremental sort.
4704 : */
4705 62 : path = (Path *) create_incremental_sort_path(root,
4706 : window_rel,
4707 : path,
4708 : window_pathkeys,
4709 : presorted_keys,
4710 : -1.0);
4711 : }
4712 : }
4713 :
4714 2976 : if (lnext(activeWindows, l))
4715 : {
4716 : /*
4717 : * Add the current WindowFuncs to the output target for this
4718 : * intermediate WindowAggPath. We must copy window_target to
4719 : * avoid changing the previous path's target.
4720 : *
4721 : * Note: a WindowFunc adds nothing to the target's eval costs; but
4722 : * we do need to account for the increase in tlist width.
4723 : */
4724 186 : int64 tuple_width = window_target->width;
4725 :
4726 186 : window_target = copy_pathtarget(window_target);
4727 438 : foreach(lc2, wflists->windowFuncs[wc->winref])
4728 : {
4729 252 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4730 :
4731 252 : add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4732 252 : tuple_width += get_typavgwidth(wfunc->wintype, -1);
4733 : }
4734 186 : window_target->width = clamp_width_est(tuple_width);
4735 : }
4736 : else
4737 : {
4738 : /* Install the goal target in the topmost WindowAgg */
4739 2790 : window_target = output_target;
4740 : }
4741 :
4742 : /* mark the final item in the list as the top-level window */
4743 2976 : topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4744 :
4745 : /*
4746 : * Collect the WindowFuncRunConditions from each WindowFunc and
4747 : * convert them into OpExprs
4748 : */
4749 6822 : foreach(lc2, wflists->windowFuncs[wc->winref])
4750 : {
4751 : ListCell *lc3;
4752 3846 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4753 :
4754 4026 : foreach(lc3, wfunc->runCondition)
4755 : {
4756 180 : WindowFuncRunCondition *wfuncrc =
4757 : lfirst_node(WindowFuncRunCondition, lc3);
4758 : Expr *opexpr;
4759 : Expr *leftop;
4760 : Expr *rightop;
4761 :
4762 180 : if (wfuncrc->wfunc_left)
4763 : {
4764 162 : leftop = (Expr *) copyObject(wfunc);
4765 162 : rightop = copyObject(wfuncrc->arg);
4766 : }
4767 : else
4768 : {
4769 18 : leftop = copyObject(wfuncrc->arg);
4770 18 : rightop = (Expr *) copyObject(wfunc);
4771 : }
4772 :
4773 180 : opexpr = make_opclause(wfuncrc->opno,
4774 : BOOLOID,
4775 : false,
4776 : leftop,
4777 : rightop,
4778 : InvalidOid,
4779 : wfuncrc->inputcollid);
4780 :
4781 180 : runcondition = lappend(runcondition, opexpr);
4782 :
4783 180 : if (!topwindow)
4784 24 : topqual = lappend(topqual, opexpr);
4785 : }
4786 : }
4787 :
4788 : path = (Path *)
4789 2976 : create_windowagg_path(root, window_rel, path, window_target,
4790 2976 : wflists->windowFuncs[wc->winref],
4791 : runcondition, wc,
4792 : topwindow ? topqual : NIL, topwindow);
4793 : }
4794 :
4795 2790 : add_path(window_rel, path);
4796 2790 : }
4797 :
4798 : /*
4799 : * create_distinct_paths
4800 : *
4801 : * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
4802 : *
4803 : * input_rel: contains the source-data Paths
4804 : * target: the pathtarget for the result Paths to compute
4805 : *
4806 : * Note: input paths should already compute the desired pathtarget, since
4807 : * Sort/Unique won't project anything.
4808 : */
4809 : static RelOptInfo *
4810 3004 : create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4811 : PathTarget *target)
4812 : {
4813 : RelOptInfo *distinct_rel;
4814 :
4815 : /* For now, do all work in the (DISTINCT, NULL) upperrel */
4816 3004 : distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4817 :
4818 : /*
4819 : * We don't compute anything at this level, so distinct_rel will be
4820 : * parallel-safe if the input rel is parallel-safe. In particular, if
4821 : * there is a DISTINCT ON (...) clause, any path for the input_rel will
4822 : * output those expressions, and will not be parallel-safe unless those
4823 : * expressions are parallel-safe.
4824 : */
4825 3004 : distinct_rel->consider_parallel = input_rel->consider_parallel;
4826 :
4827 : /*
4828 : * If the input rel belongs to a single FDW, so does the distinct_rel.
4829 : */
4830 3004 : distinct_rel->serverid = input_rel->serverid;
4831 3004 : distinct_rel->userid = input_rel->userid;
4832 3004 : distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4833 3004 : distinct_rel->fdwroutine = input_rel->fdwroutine;
4834 :
4835 : /* build distinct paths based on input_rel's pathlist */
4836 3004 : create_final_distinct_paths(root, input_rel, distinct_rel);
4837 :
4838 : /* now build distinct paths based on input_rel's partial_pathlist */
4839 3004 : create_partial_distinct_paths(root, input_rel, distinct_rel, target);
4840 :
4841 : /* Give a helpful error if we failed to create any paths */
4842 3004 : if (distinct_rel->pathlist == NIL)
4843 0 : ereport(ERROR,
4844 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4845 : errmsg("could not implement DISTINCT"),
4846 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4847 :
4848 : /*
4849 : * If there is an FDW that's responsible for all baserels of the query,
4850 : * let it consider adding ForeignPaths.
4851 : */
4852 3004 : if (distinct_rel->fdwroutine &&
4853 16 : distinct_rel->fdwroutine->GetForeignUpperPaths)
4854 16 : distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4855 : UPPERREL_DISTINCT,
4856 : input_rel,
4857 : distinct_rel,
4858 : NULL);
4859 :
4860 : /* Let extensions possibly add some more paths */
4861 3004 : if (create_upper_paths_hook)
4862 0 : (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4863 : distinct_rel, NULL);
4864 :
4865 : /* Now choose the best path(s) */
4866 3004 : set_cheapest(distinct_rel);
4867 :
4868 3004 : return distinct_rel;
4869 : }
4870 :
4871 : /*
4872 : * create_partial_distinct_paths
4873 : *
4874 : * Process 'input_rel' partial paths and add unique/aggregate paths to the
4875 : * UPPERREL_PARTIAL_DISTINCT rel. For paths created, add Gather/GatherMerge
4876 : * paths on top and add a final unique/aggregate path to remove any duplicate
4877 : * produced from combining rows from parallel workers.
4878 : */
4879 : static void
4880 3004 : create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4881 : RelOptInfo *final_distinct_rel,
4882 : PathTarget *target)
4883 : {
4884 : RelOptInfo *partial_distinct_rel;
4885 : Query *parse;
4886 : List *distinctExprs;
4887 : double numDistinctRows;
4888 : Path *cheapest_partial_path;
4889 : ListCell *lc;
4890 :
4891 : /* nothing to do when there are no partial paths in the input rel */
4892 3004 : if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4893 2896 : return;
4894 :
4895 108 : parse = root->parse;
4896 :
4897 : /* can't do parallel DISTINCT ON */
4898 108 : if (parse->hasDistinctOn)
4899 0 : return;
4900 :
4901 108 : partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4902 : NULL);
4903 108 : partial_distinct_rel->reltarget = target;
4904 108 : partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4905 :
4906 : /*
4907 : * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4908 : */
4909 108 : partial_distinct_rel->serverid = input_rel->serverid;
4910 108 : partial_distinct_rel->userid = input_rel->userid;
4911 108 : partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4912 108 : partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4913 :
4914 108 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
4915 :
4916 108 : distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4917 : parse->targetList);
4918 :
4919 : /* estimate how many distinct rows we'll get from each worker */
4920 108 : numDistinctRows = estimate_num_groups(root, distinctExprs,
4921 : cheapest_partial_path->rows,
4922 : NULL, NULL);
4923 :
4924 : /*
4925 : * Try sorting the cheapest path and incrementally sorting any paths with
4926 : * presorted keys and put a unique paths atop of those. We'll also
4927 : * attempt to reorder the required pathkeys to match the input path's
4928 : * pathkeys as much as possible, in hopes of avoiding a possible need to
4929 : * re-sort.
4930 : */
4931 108 : if (grouping_is_sortable(root->processed_distinctClause))
4932 : {
4933 234 : foreach(lc, input_rel->partial_pathlist)
4934 : {
4935 126 : Path *input_path = (Path *) lfirst(lc);
4936 : Path *sorted_path;
4937 126 : List *useful_pathkeys_list = NIL;
4938 :
4939 : useful_pathkeys_list =
4940 126 : get_useful_pathkeys_for_distinct(root,
4941 : root->distinct_pathkeys,
4942 : input_path->pathkeys);
4943 : Assert(list_length(useful_pathkeys_list) > 0);
4944 :
4945 390 : foreach_node(List, useful_pathkeys, useful_pathkeys_list)
4946 : {
4947 138 : sorted_path = make_ordered_path(root,
4948 : partial_distinct_rel,
4949 : input_path,
4950 : cheapest_partial_path,
4951 : useful_pathkeys,
4952 : -1.0);
4953 :
4954 138 : if (sorted_path == NULL)
4955 12 : continue;
4956 :
4957 : /*
4958 : * An empty distinct_pathkeys means all tuples have the same
4959 : * value for the DISTINCT clause. See
4960 : * create_final_distinct_paths()
4961 : */
4962 126 : if (root->distinct_pathkeys == NIL)
4963 : {
4964 : Node *limitCount;
4965 :
4966 6 : limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
4967 : sizeof(int64),
4968 : Int64GetDatum(1), false,
4969 : true);
4970 :
4971 : /*
4972 : * Apply a LimitPath onto the partial path to restrict the
4973 : * tuples from each worker to 1.
4974 : * create_final_distinct_paths will need to apply an
4975 : * additional LimitPath to restrict this to a single row
4976 : * after the Gather node. If the query already has a
4977 : * LIMIT clause, then we could end up with three Limit
4978 : * nodes in the final plan. Consolidating the top two of
4979 : * these could be done, but does not seem worth troubling
4980 : * over.
4981 : */
4982 6 : add_partial_path(partial_distinct_rel, (Path *)
4983 6 : create_limit_path(root, partial_distinct_rel,
4984 : sorted_path,
4985 : NULL,
4986 : limitCount,
4987 : LIMIT_OPTION_COUNT,
4988 : 0, 1));
4989 : }
4990 : else
4991 : {
4992 120 : add_partial_path(partial_distinct_rel, (Path *)
4993 120 : create_unique_path(root, partial_distinct_rel,
4994 : sorted_path,
4995 120 : list_length(root->distinct_pathkeys),
4996 : numDistinctRows));
4997 : }
4998 : }
4999 : }
5000 : }
5001 :
5002 : /*
5003 : * Now try hash aggregate paths, if enabled and hashing is possible. Since
5004 : * we're not on the hook to ensure we do our best to create at least one
5005 : * path here, we treat enable_hashagg as a hard off-switch rather than the
5006 : * slightly softer variant in create_final_distinct_paths.
5007 : */
5008 108 : if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
5009 : {
5010 78 : add_partial_path(partial_distinct_rel, (Path *)
5011 78 : create_agg_path(root,
5012 : partial_distinct_rel,
5013 : cheapest_partial_path,
5014 : cheapest_partial_path->pathtarget,
5015 : AGG_HASHED,
5016 : AGGSPLIT_SIMPLE,
5017 : root->processed_distinctClause,
5018 : NIL,
5019 : NULL,
5020 : numDistinctRows));
5021 : }
5022 :
5023 : /*
5024 : * If there is an FDW that's responsible for all baserels of the query,
5025 : * let it consider adding ForeignPaths.
5026 : */
5027 108 : if (partial_distinct_rel->fdwroutine &&
5028 0 : partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
5029 0 : partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
5030 : UPPERREL_PARTIAL_DISTINCT,
5031 : input_rel,
5032 : partial_distinct_rel,
5033 : NULL);
5034 :
5035 : /* Let extensions possibly add some more partial paths */
5036 108 : if (create_upper_paths_hook)
5037 0 : (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
5038 : input_rel, partial_distinct_rel, NULL);
5039 :
5040 108 : if (partial_distinct_rel->partial_pathlist != NIL)
5041 : {
5042 108 : generate_useful_gather_paths(root, partial_distinct_rel, true);
5043 108 : set_cheapest(partial_distinct_rel);
5044 :
5045 : /*
5046 : * Finally, create paths to distinctify the final result. This step
5047 : * is needed to remove any duplicates due to combining rows from
5048 : * parallel workers.
5049 : */
5050 108 : create_final_distinct_paths(root, partial_distinct_rel,
5051 : final_distinct_rel);
5052 : }
5053 : }
5054 :
5055 : /*
5056 : * create_final_distinct_paths
5057 : * Create distinct paths in 'distinct_rel' based on 'input_rel' pathlist
5058 : *
5059 : * input_rel: contains the source-data paths
5060 : * distinct_rel: destination relation for storing created paths
5061 : */
5062 : static RelOptInfo *
5063 3112 : create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
5064 : RelOptInfo *distinct_rel)
5065 : {
5066 3112 : Query *parse = root->parse;
5067 3112 : Path *cheapest_input_path = input_rel->cheapest_total_path;
5068 : double numDistinctRows;
5069 : bool allow_hash;
5070 :
5071 : /* Estimate number of distinct rows there will be */
5072 3112 : if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
5073 3038 : root->hasHavingQual)
5074 : {
5075 : /*
5076 : * If there was grouping or aggregation, use the number of input rows
5077 : * as the estimated number of DISTINCT rows (ie, assume the input is
5078 : * already mostly unique).
5079 : */
5080 74 : numDistinctRows = cheapest_input_path->rows;
5081 : }
5082 : else
5083 : {
5084 : /*
5085 : * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
5086 : */
5087 : List *distinctExprs;
5088 :
5089 3038 : distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
5090 : parse->targetList);
5091 3038 : numDistinctRows = estimate_num_groups(root, distinctExprs,
5092 : cheapest_input_path->rows,
5093 : NULL, NULL);
5094 : }
5095 :
5096 : /*
5097 : * Consider sort-based implementations of DISTINCT, if possible.
5098 : */
5099 3112 : if (grouping_is_sortable(root->processed_distinctClause))
5100 : {
5101 : /*
5102 : * Firstly, if we have any adequately-presorted paths, just stick a
5103 : * Unique node on those. We also, consider doing an explicit sort of
5104 : * the cheapest input path and Unique'ing that. If any paths have
5105 : * presorted keys then we'll create an incremental sort atop of those
5106 : * before adding a unique node on the top. We'll also attempt to
5107 : * reorder the required pathkeys to match the input path's pathkeys as
5108 : * much as possible, in hopes of avoiding a possible need to re-sort.
5109 : *
5110 : * When we have DISTINCT ON, we must sort by the more rigorous of
5111 : * DISTINCT and ORDER BY, else it won't have the desired behavior.
5112 : * Also, if we do have to do an explicit sort, we might as well use
5113 : * the more rigorous ordering to avoid a second sort later. (Note
5114 : * that the parser will have ensured that one clause is a prefix of
5115 : * the other.)
5116 : */
5117 : List *needed_pathkeys;
5118 : ListCell *lc;
5119 3106 : double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
5120 :
5121 3356 : if (parse->hasDistinctOn &&
5122 250 : list_length(root->distinct_pathkeys) <
5123 250 : list_length(root->sort_pathkeys))
5124 54 : needed_pathkeys = root->sort_pathkeys;
5125 : else
5126 3052 : needed_pathkeys = root->distinct_pathkeys;
5127 :
5128 8392 : foreach(lc, input_rel->pathlist)
5129 : {
5130 5286 : Path *input_path = (Path *) lfirst(lc);
5131 : Path *sorted_path;
5132 5286 : List *useful_pathkeys_list = NIL;
5133 :
5134 : useful_pathkeys_list =
5135 5286 : get_useful_pathkeys_for_distinct(root,
5136 : needed_pathkeys,
5137 : input_path->pathkeys);
5138 : Assert(list_length(useful_pathkeys_list) > 0);
5139 :
5140 16678 : foreach_node(List, useful_pathkeys, useful_pathkeys_list)
5141 : {
5142 6106 : sorted_path = make_ordered_path(root,
5143 : distinct_rel,
5144 : input_path,
5145 : cheapest_input_path,
5146 : useful_pathkeys,
5147 : limittuples);
5148 :
5149 6106 : if (sorted_path == NULL)
5150 878 : continue;
5151 :
5152 : /*
5153 : * distinct_pathkeys may have become empty if all of the
5154 : * pathkeys were determined to be redundant. If all of the
5155 : * pathkeys are redundant then each DISTINCT target must only
5156 : * allow a single value, therefore all resulting tuples must
5157 : * be identical (or at least indistinguishable by an equality
5158 : * check). We can uniquify these tuples simply by just taking
5159 : * the first tuple. All we do here is add a path to do "LIMIT
5160 : * 1" atop of 'sorted_path'. When doing a DISTINCT ON we may
5161 : * still have a non-NIL sort_pathkeys list, so we must still
5162 : * only do this with paths which are correctly sorted by
5163 : * sort_pathkeys.
5164 : */
5165 5228 : if (root->distinct_pathkeys == NIL)
5166 : {
5167 : Node *limitCount;
5168 :
5169 138 : limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5170 : sizeof(int64),
5171 : Int64GetDatum(1), false,
5172 : true);
5173 :
5174 : /*
5175 : * If the query already has a LIMIT clause, then we could
5176 : * end up with a duplicate LimitPath in the final plan.
5177 : * That does not seem worth troubling over too much.
5178 : */
5179 138 : add_path(distinct_rel, (Path *)
5180 138 : create_limit_path(root, distinct_rel, sorted_path,
5181 : NULL, limitCount,
5182 : LIMIT_OPTION_COUNT, 0, 1));
5183 : }
5184 : else
5185 : {
5186 5090 : add_path(distinct_rel, (Path *)
5187 5090 : create_unique_path(root, distinct_rel,
5188 : sorted_path,
5189 5090 : list_length(root->distinct_pathkeys),
5190 : numDistinctRows));
5191 : }
5192 : }
5193 : }
5194 : }
5195 :
5196 : /*
5197 : * Consider hash-based implementations of DISTINCT, if possible.
5198 : *
5199 : * If we were not able to make any other types of path, we *must* hash or
5200 : * die trying. If we do have other choices, there are two things that
5201 : * should prevent selection of hashing: if the query uses DISTINCT ON
5202 : * (because it won't really have the expected behavior if we hash), or if
5203 : * enable_hashagg is off.
5204 : *
5205 : * Note: grouping_is_hashable() is much more expensive to check than the
5206 : * other gating conditions, so we want to do it last.
5207 : */
5208 3112 : if (distinct_rel->pathlist == NIL)
5209 6 : allow_hash = true; /* we have no alternatives */
5210 3106 : else if (parse->hasDistinctOn || !enable_hashagg)
5211 400 : allow_hash = false; /* policy-based decision not to hash */
5212 : else
5213 2706 : allow_hash = true; /* default */
5214 :
5215 3112 : if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5216 : {
5217 : /* Generate hashed aggregate path --- no sort needed */
5218 2712 : add_path(distinct_rel, (Path *)
5219 2712 : create_agg_path(root,
5220 : distinct_rel,
5221 : cheapest_input_path,
5222 : cheapest_input_path->pathtarget,
5223 : AGG_HASHED,
5224 : AGGSPLIT_SIMPLE,
5225 : root->processed_distinctClause,
5226 : NIL,
5227 : NULL,
5228 : numDistinctRows));
5229 : }
5230 :
5231 3112 : return distinct_rel;
5232 : }
5233 :
5234 : /*
5235 : * get_useful_pathkeys_for_distinct
5236 : * Get useful orderings of pathkeys for distinctClause by reordering
5237 : * 'needed_pathkeys' to match the given 'path_pathkeys' as much as possible.
5238 : *
5239 : * This returns a list of pathkeys that can be useful for DISTINCT or DISTINCT
5240 : * ON clause. For convenience, it always includes the given 'needed_pathkeys'.
5241 : */
5242 : static List *
5243 5412 : get_useful_pathkeys_for_distinct(PlannerInfo *root, List *needed_pathkeys,
5244 : List *path_pathkeys)
5245 : {
5246 5412 : List *useful_pathkeys_list = NIL;
5247 5412 : List *useful_pathkeys = NIL;
5248 :
5249 : /* always include the given 'needed_pathkeys' */
5250 5412 : useful_pathkeys_list = lappend(useful_pathkeys_list,
5251 : needed_pathkeys);
5252 :
5253 5412 : if (!enable_distinct_reordering)
5254 0 : return useful_pathkeys_list;
5255 :
5256 : /*
5257 : * Scan the given 'path_pathkeys' and construct a list of PathKey nodes
5258 : * that match 'needed_pathkeys', but only up to the longest matching
5259 : * prefix.
5260 : *
5261 : * When we have DISTINCT ON, we must ensure that the resulting pathkey
5262 : * list matches initial distinctClause pathkeys; otherwise, it won't have
5263 : * the desired behavior.
5264 : */
5265 13408 : foreach_node(PathKey, pathkey, path_pathkeys)
5266 : {
5267 : /*
5268 : * The PathKey nodes are canonical, so they can be checked for
5269 : * equality by simple pointer comparison.
5270 : */
5271 2612 : if (!list_member_ptr(needed_pathkeys, pathkey))
5272 10 : break;
5273 2602 : if (root->parse->hasDistinctOn &&
5274 202 : !list_member_ptr(root->distinct_pathkeys, pathkey))
5275 18 : break;
5276 :
5277 2584 : useful_pathkeys = lappend(useful_pathkeys, pathkey);
5278 : }
5279 :
5280 : /* If no match at all, no point in reordering needed_pathkeys */
5281 5412 : if (useful_pathkeys == NIL)
5282 3092 : return useful_pathkeys_list;
5283 :
5284 : /*
5285 : * If not full match, the resulting pathkey list is not useful without
5286 : * incremental sort.
5287 : */
5288 2320 : if (list_length(useful_pathkeys) < list_length(needed_pathkeys) &&
5289 1562 : !enable_incremental_sort)
5290 60 : return useful_pathkeys_list;
5291 :
5292 : /* Append the remaining PathKey nodes in needed_pathkeys */
5293 2260 : useful_pathkeys = list_concat_unique_ptr(useful_pathkeys,
5294 : needed_pathkeys);
5295 :
5296 : /*
5297 : * If the resulting pathkey list is the same as the 'needed_pathkeys',
5298 : * just drop it.
5299 : */
5300 2260 : if (compare_pathkeys(needed_pathkeys,
5301 : useful_pathkeys) == PATHKEYS_EQUAL)
5302 1428 : return useful_pathkeys_list;
5303 :
5304 832 : useful_pathkeys_list = lappend(useful_pathkeys_list,
5305 : useful_pathkeys);
5306 :
5307 832 : return useful_pathkeys_list;
5308 : }
5309 :
5310 : /*
5311 : * create_ordered_paths
5312 : *
5313 : * Build a new upperrel containing Paths for ORDER BY evaluation.
5314 : *
5315 : * All paths in the result must satisfy the ORDER BY ordering.
5316 : * The only new paths we need consider are an explicit full sort
5317 : * and incremental sort on the cheapest-total existing path.
5318 : *
5319 : * input_rel: contains the source-data Paths
5320 : * target: the output tlist the result Paths must emit
5321 : * limit_tuples: estimated bound on the number of output tuples,
5322 : * or -1 if no LIMIT or couldn't estimate
5323 : *
5324 : * XXX This only looks at sort_pathkeys. I wonder if it needs to look at the
5325 : * other pathkeys (grouping, ...) like generate_useful_gather_paths.
5326 : */
5327 : static RelOptInfo *
5328 75624 : create_ordered_paths(PlannerInfo *root,
5329 : RelOptInfo *input_rel,
5330 : PathTarget *target,
5331 : bool target_parallel_safe,
5332 : double limit_tuples)
5333 : {
5334 75624 : Path *cheapest_input_path = input_rel->cheapest_total_path;
5335 : RelOptInfo *ordered_rel;
5336 : ListCell *lc;
5337 :
5338 : /* For now, do all work in the (ORDERED, NULL) upperrel */
5339 75624 : ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5340 :
5341 : /*
5342 : * If the input relation is not parallel-safe, then the ordered relation
5343 : * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5344 : * target list is parallel-safe.
5345 : */
5346 75624 : if (input_rel->consider_parallel && target_parallel_safe)
5347 52704 : ordered_rel->consider_parallel = true;
5348 :
5349 : /*
5350 : * If the input rel belongs to a single FDW, so does the ordered_rel.
5351 : */
5352 75624 : ordered_rel->serverid = input_rel->serverid;
5353 75624 : ordered_rel->userid = input_rel->userid;
5354 75624 : ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5355 75624 : ordered_rel->fdwroutine = input_rel->fdwroutine;
5356 :
5357 191478 : foreach(lc, input_rel->pathlist)
5358 : {
5359 115854 : Path *input_path = (Path *) lfirst(lc);
5360 : Path *sorted_path;
5361 : bool is_sorted;
5362 : int presorted_keys;
5363 :
5364 115854 : is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5365 : input_path->pathkeys, &presorted_keys);
5366 :
5367 115854 : if (is_sorted)
5368 43538 : sorted_path = input_path;
5369 : else
5370 : {
5371 : /*
5372 : * Try at least sorting the cheapest path and also try
5373 : * incrementally sorting any path which is partially sorted
5374 : * already (no need to deal with paths which have presorted keys
5375 : * when incremental sort is disabled unless it's the cheapest
5376 : * input path).
5377 : */
5378 72316 : if (input_path != cheapest_input_path &&
5379 6326 : (presorted_keys == 0 || !enable_incremental_sort))
5380 2210 : continue;
5381 :
5382 : /*
5383 : * We've no need to consider both a sort and incremental sort.
5384 : * We'll just do a sort if there are no presorted keys and an
5385 : * incremental sort when there are presorted keys.
5386 : */
5387 70106 : if (presorted_keys == 0 || !enable_incremental_sort)
5388 65366 : sorted_path = (Path *) create_sort_path(root,
5389 : ordered_rel,
5390 : input_path,
5391 : root->sort_pathkeys,
5392 : limit_tuples);
5393 : else
5394 4740 : sorted_path = (Path *) create_incremental_sort_path(root,
5395 : ordered_rel,
5396 : input_path,
5397 : root->sort_pathkeys,
5398 : presorted_keys,
5399 : limit_tuples);
5400 : }
5401 :
5402 : /*
5403 : * If the pathtarget of the result path has different expressions from
5404 : * the target to be applied, a projection step is needed.
5405 : */
5406 113644 : if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5407 294 : sorted_path = apply_projection_to_path(root, ordered_rel,
5408 : sorted_path, target);
5409 :
5410 113644 : add_path(ordered_rel, sorted_path);
5411 : }
5412 :
5413 : /*
5414 : * generate_gather_paths() will have already generated a simple Gather
5415 : * path for the best parallel path, if any, and the loop above will have
5416 : * considered sorting it. Similarly, generate_gather_paths() will also
5417 : * have generated order-preserving Gather Merge plans which can be used
5418 : * without sorting if they happen to match the sort_pathkeys, and the loop
5419 : * above will have handled those as well. However, there's one more
5420 : * possibility: it may make sense to sort the cheapest partial path or
5421 : * incrementally sort any partial path that is partially sorted according
5422 : * to the required output order and then use Gather Merge.
5423 : */
5424 75624 : if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5425 52500 : input_rel->partial_pathlist != NIL)
5426 : {
5427 : Path *cheapest_partial_path;
5428 :
5429 2868 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
5430 :
5431 6530 : foreach(lc, input_rel->partial_pathlist)
5432 : {
5433 3662 : Path *input_path = (Path *) lfirst(lc);
5434 : Path *sorted_path;
5435 : bool is_sorted;
5436 : int presorted_keys;
5437 : double total_groups;
5438 :
5439 3662 : is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5440 : input_path->pathkeys,
5441 : &presorted_keys);
5442 :
5443 3662 : if (is_sorted)
5444 674 : continue;
5445 :
5446 : /*
5447 : * Try at least sorting the cheapest path and also try
5448 : * incrementally sorting any path which is partially sorted
5449 : * already (no need to deal with paths which have presorted keys
5450 : * when incremental sort is disabled unless it's the cheapest
5451 : * partial path).
5452 : */
5453 2988 : if (input_path != cheapest_partial_path &&
5454 150 : (presorted_keys == 0 || !enable_incremental_sort))
5455 0 : continue;
5456 :
5457 : /*
5458 : * We've no need to consider both a sort and incremental sort.
5459 : * We'll just do a sort if there are no presorted keys and an
5460 : * incremental sort when there are presorted keys.
5461 : */
5462 2988 : if (presorted_keys == 0 || !enable_incremental_sort)
5463 2820 : sorted_path = (Path *) create_sort_path(root,
5464 : ordered_rel,
5465 : input_path,
5466 : root->sort_pathkeys,
5467 : limit_tuples);
5468 : else
5469 168 : sorted_path = (Path *) create_incremental_sort_path(root,
5470 : ordered_rel,
5471 : input_path,
5472 : root->sort_pathkeys,
5473 : presorted_keys,
5474 : limit_tuples);
5475 2988 : total_groups = compute_gather_rows(sorted_path);
5476 : sorted_path = (Path *)
5477 2988 : create_gather_merge_path(root, ordered_rel,
5478 : sorted_path,
5479 : sorted_path->pathtarget,
5480 : root->sort_pathkeys, NULL,
5481 : &total_groups);
5482 :
5483 : /*
5484 : * If the pathtarget of the result path has different expressions
5485 : * from the target to be applied, a projection step is needed.
5486 : */
5487 2988 : if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5488 6 : sorted_path = apply_projection_to_path(root, ordered_rel,
5489 : sorted_path, target);
5490 :
5491 2988 : add_path(ordered_rel, sorted_path);
5492 : }
5493 : }
5494 :
5495 : /*
5496 : * If there is an FDW that's responsible for all baserels of the query,
5497 : * let it consider adding ForeignPaths.
5498 : */
5499 75624 : if (ordered_rel->fdwroutine &&
5500 384 : ordered_rel->fdwroutine->GetForeignUpperPaths)
5501 370 : ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5502 : input_rel, ordered_rel,
5503 : NULL);
5504 :
5505 : /* Let extensions possibly add some more paths */
5506 75624 : if (create_upper_paths_hook)
5507 0 : (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5508 : input_rel, ordered_rel, NULL);
5509 :
5510 : /*
5511 : * No need to bother with set_cheapest here; grouping_planner does not
5512 : * need us to do it.
5513 : */
5514 : Assert(ordered_rel->pathlist != NIL);
5515 :
5516 75624 : return ordered_rel;
5517 : }
5518 :
5519 :
5520 : /*
5521 : * make_group_input_target
5522 : * Generate appropriate PathTarget for initial input to grouping nodes.
5523 : *
5524 : * If there is grouping or aggregation, the scan/join subplan cannot emit
5525 : * the query's final targetlist; for example, it certainly can't emit any
5526 : * aggregate function calls. This routine generates the correct target
5527 : * for the scan/join subplan.
5528 : *
5529 : * The query target list passed from the parser already contains entries
5530 : * for all ORDER BY and GROUP BY expressions, but it will not have entries
5531 : * for variables used only in HAVING clauses; so we need to add those
5532 : * variables to the subplan target list. Also, we flatten all expressions
5533 : * except GROUP BY items into their component variables; other expressions
5534 : * will be computed by the upper plan nodes rather than by the subplan.
5535 : * For example, given a query like
5536 : * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
5537 : * we want to pass this targetlist to the subplan:
5538 : * a+b,c,d
5539 : * where the a+b target will be used by the Sort/Group steps, and the
5540 : * other targets will be used for computing the final results.
5541 : *
5542 : * 'final_target' is the query's final target list (in PathTarget form)
5543 : *
5544 : * The result is the PathTarget to be computed by the Paths returned from
5545 : * query_planner().
5546 : */
5547 : static PathTarget *
5548 45398 : make_group_input_target(PlannerInfo *root, PathTarget *final_target)
5549 : {
5550 45398 : Query *parse = root->parse;
5551 : PathTarget *input_target;
5552 : List *non_group_cols;
5553 : List *non_group_vars;
5554 : int i;
5555 : ListCell *lc;
5556 :
5557 : /*
5558 : * We must build a target containing all grouping columns, plus any other
5559 : * Vars mentioned in the query's targetlist and HAVING qual.
5560 : */
5561 45398 : input_target = create_empty_pathtarget();
5562 45398 : non_group_cols = NIL;
5563 :
5564 45398 : i = 0;
5565 111782 : foreach(lc, final_target->exprs)
5566 : {
5567 66384 : Expr *expr = (Expr *) lfirst(lc);
5568 66384 : Index sgref = get_pathtarget_sortgroupref(final_target, i);
5569 :
5570 75904 : if (sgref && root->processed_groupClause &&
5571 9520 : get_sortgroupref_clause_noerr(sgref,
5572 : root->processed_groupClause) != NULL)
5573 : {
5574 : /*
5575 : * It's a grouping column, so add it to the input target as-is.
5576 : *
5577 : * Note that the target is logically below the grouping step. So
5578 : * with grouping sets we need to remove the RT index of the
5579 : * grouping step if there is any from the target expression.
5580 : */
5581 7650 : if (parse->hasGroupRTE && parse->groupingSets != NIL)
5582 : {
5583 : Assert(root->group_rtindex > 0);
5584 : expr = (Expr *)
5585 1950 : remove_nulling_relids((Node *) expr,
5586 1950 : bms_make_singleton(root->group_rtindex),
5587 : NULL);
5588 : }
5589 7650 : add_column_to_pathtarget(input_target, expr, sgref);
5590 : }
5591 : else
5592 : {
5593 : /*
5594 : * Non-grouping column, so just remember the expression for later
5595 : * call to pull_var_clause.
5596 : */
5597 58734 : non_group_cols = lappend(non_group_cols, expr);
5598 : }
5599 :
5600 66384 : i++;
5601 : }
5602 :
5603 : /*
5604 : * If there's a HAVING clause, we'll need the Vars it uses, too.
5605 : */
5606 45398 : if (parse->havingQual)
5607 962 : non_group_cols = lappend(non_group_cols, parse->havingQual);
5608 :
5609 : /*
5610 : * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5611 : * add them to the input target if not already present. (A Var used
5612 : * directly as a GROUP BY item will be present already.) Note this
5613 : * includes Vars used in resjunk items, so we are covering the needs of
5614 : * ORDER BY and window specifications. Vars used within Aggrefs and
5615 : * WindowFuncs will be pulled out here, too.
5616 : *
5617 : * Note that the target is logically below the grouping step. So with
5618 : * grouping sets we need to remove the RT index of the grouping step if
5619 : * there is any from the non-group Vars.
5620 : */
5621 45398 : non_group_vars = pull_var_clause((Node *) non_group_cols,
5622 : PVC_RECURSE_AGGREGATES |
5623 : PVC_RECURSE_WINDOWFUNCS |
5624 : PVC_INCLUDE_PLACEHOLDERS);
5625 45398 : if (parse->hasGroupRTE && parse->groupingSets != NIL)
5626 : {
5627 : Assert(root->group_rtindex > 0);
5628 : non_group_vars = (List *)
5629 896 : remove_nulling_relids((Node *) non_group_vars,
5630 896 : bms_make_singleton(root->group_rtindex),
5631 : NULL);
5632 : }
5633 45398 : add_new_columns_to_pathtarget(input_target, non_group_vars);
5634 :
5635 : /* clean up cruft */
5636 45398 : list_free(non_group_vars);
5637 45398 : list_free(non_group_cols);
5638 :
5639 : /* XXX this causes some redundant cost calculation ... */
5640 45398 : return set_pathtarget_cost_width(root, input_target);
5641 : }
5642 :
5643 : /*
5644 : * make_partial_grouping_target
5645 : * Generate appropriate PathTarget for output of partial aggregate
5646 : * (or partial grouping, if there are no aggregates) nodes.
5647 : *
5648 : * A partial aggregation node needs to emit all the same aggregates that
5649 : * a regular aggregation node would, plus any aggregates used in HAVING;
5650 : * except that the Aggref nodes should be marked as partial aggregates.
5651 : *
5652 : * In addition, we'd better emit any Vars and PlaceHolderVars that are
5653 : * used outside of Aggrefs in the aggregation tlist and HAVING. (Presumably,
5654 : * these would be Vars that are grouped by or used in grouping expressions.)
5655 : *
5656 : * grouping_target is the tlist to be emitted by the topmost aggregation step.
5657 : * havingQual represents the HAVING clause.
5658 : */
5659 : static PathTarget *
5660 3178 : make_partial_grouping_target(PlannerInfo *root,
5661 : PathTarget *grouping_target,
5662 : Node *havingQual)
5663 : {
5664 : PathTarget *partial_target;
5665 : List *non_group_cols;
5666 : List *non_group_exprs;
5667 : int i;
5668 : ListCell *lc;
5669 :
5670 3178 : partial_target = create_empty_pathtarget();
5671 3178 : non_group_cols = NIL;
5672 :
5673 3178 : i = 0;
5674 11392 : foreach(lc, grouping_target->exprs)
5675 : {
5676 8214 : Expr *expr = (Expr *) lfirst(lc);
5677 8214 : Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5678 :
5679 12978 : if (sgref && root->processed_groupClause &&
5680 4764 : get_sortgroupref_clause_noerr(sgref,
5681 : root->processed_groupClause) != NULL)
5682 : {
5683 : /*
5684 : * It's a grouping column, so add it to the partial_target as-is.
5685 : * (This allows the upper agg step to repeat the grouping calcs.)
5686 : */
5687 2822 : add_column_to_pathtarget(partial_target, expr, sgref);
5688 : }
5689 : else
5690 : {
5691 : /*
5692 : * Non-grouping column, so just remember the expression for later
5693 : * call to pull_var_clause.
5694 : */
5695 5392 : non_group_cols = lappend(non_group_cols, expr);
5696 : }
5697 :
5698 8214 : i++;
5699 : }
5700 :
5701 : /*
5702 : * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5703 : */
5704 3178 : if (havingQual)
5705 878 : non_group_cols = lappend(non_group_cols, havingQual);
5706 :
5707 : /*
5708 : * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5709 : * non-group cols (plus HAVING), and add them to the partial_target if not
5710 : * already present. (An expression used directly as a GROUP BY item will
5711 : * be present already.) Note this includes Vars used in resjunk items, so
5712 : * we are covering the needs of ORDER BY and window specifications.
5713 : */
5714 3178 : non_group_exprs = pull_var_clause((Node *) non_group_cols,
5715 : PVC_INCLUDE_AGGREGATES |
5716 : PVC_RECURSE_WINDOWFUNCS |
5717 : PVC_INCLUDE_PLACEHOLDERS);
5718 :
5719 3178 : add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5720 :
5721 : /*
5722 : * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5723 : * are at the top level of the target list, so we can just scan the list
5724 : * rather than recursing through the expression trees.
5725 : */
5726 11988 : foreach(lc, partial_target->exprs)
5727 : {
5728 8810 : Aggref *aggref = (Aggref *) lfirst(lc);
5729 :
5730 8810 : if (IsA(aggref, Aggref))
5731 : {
5732 : Aggref *newaggref;
5733 :
5734 : /*
5735 : * We shouldn't need to copy the substructure of the Aggref node,
5736 : * but flat-copy the node itself to avoid damaging other trees.
5737 : */
5738 5958 : newaggref = makeNode(Aggref);
5739 5958 : memcpy(newaggref, aggref, sizeof(Aggref));
5740 :
5741 : /* For now, assume serialization is required */
5742 5958 : mark_partial_aggref(newaggref, AGGSPLIT_INITIAL_SERIAL);
5743 :
5744 5958 : lfirst(lc) = newaggref;
5745 : }
5746 : }
5747 :
5748 : /* clean up cruft */
5749 3178 : list_free(non_group_exprs);
5750 3178 : list_free(non_group_cols);
5751 :
5752 : /* XXX this causes some redundant cost calculation ... */
5753 3178 : return set_pathtarget_cost_width(root, partial_target);
5754 : }
5755 :
5756 : /*
5757 : * mark_partial_aggref
5758 : * Adjust an Aggref to make it represent a partial-aggregation step.
5759 : *
5760 : * The Aggref node is modified in-place; caller must do any copying required.
5761 : */
5762 : void
5763 17902 : mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
5764 : {
5765 : /* aggtranstype should be computed by this point */
5766 : Assert(OidIsValid(agg->aggtranstype));
5767 : /* ... but aggsplit should still be as the parser left it */
5768 : Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5769 :
5770 : /* Mark the Aggref with the intended partial-aggregation mode */
5771 17902 : agg->aggsplit = aggsplit;
5772 :
5773 : /*
5774 : * Adjust result type if needed. Normally, a partial aggregate returns
5775 : * the aggregate's transition type; but if that's INTERNAL and we're
5776 : * serializing, it returns BYTEA instead.
5777 : */
5778 17902 : if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5779 : {
5780 15596 : if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5781 314 : agg->aggtype = BYTEAOID;
5782 : else
5783 15282 : agg->aggtype = agg->aggtranstype;
5784 : }
5785 17902 : }
5786 :
5787 : /*
5788 : * postprocess_setop_tlist
5789 : * Fix up targetlist returned by plan_set_operations().
5790 : *
5791 : * We need to transpose sort key info from the orig_tlist into new_tlist.
5792 : * NOTE: this would not be good enough if we supported resjunk sort keys
5793 : * for results of set operations --- then, we'd need to project a whole
5794 : * new tlist to evaluate the resjunk columns. For now, just ereport if we
5795 : * find any resjunk columns in orig_tlist.
5796 : */
5797 : static List *
5798 6206 : postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
5799 : {
5800 : ListCell *l;
5801 6206 : ListCell *orig_tlist_item = list_head(orig_tlist);
5802 :
5803 23786 : foreach(l, new_tlist)
5804 : {
5805 17580 : TargetEntry *new_tle = lfirst_node(TargetEntry, l);
5806 : TargetEntry *orig_tle;
5807 :
5808 : /* ignore resjunk columns in setop result */
5809 17580 : if (new_tle->resjunk)
5810 0 : continue;
5811 :
5812 : Assert(orig_tlist_item != NULL);
5813 17580 : orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
5814 17580 : orig_tlist_item = lnext(orig_tlist, orig_tlist_item);
5815 17580 : if (orig_tle->resjunk) /* should not happen */
5816 0 : elog(ERROR, "resjunk output columns are not implemented");
5817 : Assert(new_tle->resno == orig_tle->resno);
5818 17580 : new_tle->ressortgroupref = orig_tle->ressortgroupref;
5819 : }
5820 6206 : if (orig_tlist_item != NULL)
5821 0 : elog(ERROR, "resjunk output columns are not implemented");
5822 6206 : return new_tlist;
5823 : }
5824 :
5825 : /*
5826 : * optimize_window_clauses
5827 : * Call each WindowFunc's prosupport function to see if we're able to
5828 : * make any adjustments to any of the WindowClause's so that the executor
5829 : * can execute the window functions in a more optimal way.
5830 : *
5831 : * Currently we only allow adjustments to the WindowClause's frameOptions. We
5832 : * may allow more things to be done here in the future.
5833 : */
5834 : static void
5835 2576 : optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
5836 : {
5837 2576 : List *windowClause = root->parse->windowClause;
5838 : ListCell *lc;
5839 :
5840 5398 : foreach(lc, windowClause)
5841 : {
5842 2822 : WindowClause *wc = lfirst_node(WindowClause, lc);
5843 : ListCell *lc2;
5844 2822 : int optimizedFrameOptions = 0;
5845 :
5846 : Assert(wc->winref <= wflists->maxWinRef);
5847 :
5848 : /* skip any WindowClauses that have no WindowFuncs */
5849 2822 : if (wflists->windowFuncs[wc->winref] == NIL)
5850 24 : continue;
5851 :
5852 3452 : foreach(lc2, wflists->windowFuncs[wc->winref])
5853 : {
5854 : SupportRequestOptimizeWindowClause req;
5855 : SupportRequestOptimizeWindowClause *res;
5856 2840 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
5857 : Oid prosupport;
5858 :
5859 2840 : prosupport = get_func_support(wfunc->winfnoid);
5860 :
5861 : /* Check if there's a support function for 'wfunc' */
5862 2840 : if (!OidIsValid(prosupport))
5863 2186 : break; /* can't optimize this WindowClause */
5864 :
5865 880 : req.type = T_SupportRequestOptimizeWindowClause;
5866 880 : req.window_clause = wc;
5867 880 : req.window_func = wfunc;
5868 880 : req.frameOptions = wc->frameOptions;
5869 :
5870 : /* call the support function */
5871 : res = (SupportRequestOptimizeWindowClause *)
5872 880 : DatumGetPointer(OidFunctionCall1(prosupport,
5873 : PointerGetDatum(&req)));
5874 :
5875 : /*
5876 : * Skip to next WindowClause if the support function does not
5877 : * support this request type.
5878 : */
5879 880 : if (res == NULL)
5880 226 : break;
5881 :
5882 : /*
5883 : * Save these frameOptions for the first WindowFunc for this
5884 : * WindowClause.
5885 : */
5886 654 : if (foreach_current_index(lc2) == 0)
5887 630 : optimizedFrameOptions = res->frameOptions;
5888 :
5889 : /*
5890 : * On subsequent WindowFuncs, if the frameOptions are not the same
5891 : * then we're unable to optimize the frameOptions for this
5892 : * WindowClause.
5893 : */
5894 24 : else if (optimizedFrameOptions != res->frameOptions)
5895 0 : break; /* skip to the next WindowClause, if any */
5896 : }
5897 :
5898 : /* adjust the frameOptions if all WindowFunc's agree that it's ok */
5899 2798 : if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5900 : {
5901 : ListCell *lc3;
5902 :
5903 : /* apply the new frame options */
5904 612 : wc->frameOptions = optimizedFrameOptions;
5905 :
5906 : /*
5907 : * We now check to see if changing the frameOptions has caused
5908 : * this WindowClause to be a duplicate of some other WindowClause.
5909 : * This can only happen if we have multiple WindowClauses, so
5910 : * don't bother if there's only 1.
5911 : */
5912 612 : if (list_length(windowClause) == 1)
5913 522 : continue;
5914 :
5915 : /*
5916 : * Do the duplicate check and reuse the existing WindowClause if
5917 : * we find a duplicate.
5918 : */
5919 228 : foreach(lc3, windowClause)
5920 : {
5921 174 : WindowClause *existing_wc = lfirst_node(WindowClause, lc3);
5922 :
5923 : /* skip over the WindowClause we're currently editing */
5924 174 : if (existing_wc == wc)
5925 54 : continue;
5926 :
5927 : /*
5928 : * Perform the same duplicate check that is done in
5929 : * transformWindowFuncCall.
5930 : */
5931 240 : if (equal(wc->partitionClause, existing_wc->partitionClause) &&
5932 120 : equal(wc->orderClause, existing_wc->orderClause) &&
5933 120 : wc->frameOptions == existing_wc->frameOptions &&
5934 72 : equal(wc->startOffset, existing_wc->startOffset) &&
5935 36 : equal(wc->endOffset, existing_wc->endOffset))
5936 : {
5937 : ListCell *lc4;
5938 :
5939 : /*
5940 : * Now move each WindowFunc in 'wc' into 'existing_wc'.
5941 : * This required adjusting each WindowFunc's winref and
5942 : * moving the WindowFuncs in 'wc' to the list of
5943 : * WindowFuncs in 'existing_wc'.
5944 : */
5945 78 : foreach(lc4, wflists->windowFuncs[wc->winref])
5946 : {
5947 42 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc4);
5948 :
5949 42 : wfunc->winref = existing_wc->winref;
5950 : }
5951 :
5952 : /* move list items */
5953 72 : wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
5954 36 : wflists->windowFuncs[wc->winref]);
5955 36 : wflists->windowFuncs[wc->winref] = NIL;
5956 :
5957 : /*
5958 : * transformWindowFuncCall() should have made sure there
5959 : * are no other duplicates, so we needn't bother looking
5960 : * any further.
5961 : */
5962 36 : break;
5963 : }
5964 : }
5965 : }
5966 : }
5967 2576 : }
5968 :
5969 : /*
5970 : * select_active_windows
5971 : * Create a list of the "active" window clauses (ie, those referenced
5972 : * by non-deleted WindowFuncs) in the order they are to be executed.
5973 : */
5974 : static List *
5975 2576 : select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
5976 : {
5977 2576 : List *windowClause = root->parse->windowClause;
5978 2576 : List *result = NIL;
5979 : ListCell *lc;
5980 2576 : int nActive = 0;
5981 2576 : WindowClauseSortData *actives = palloc_array(WindowClauseSortData,
5982 : list_length(windowClause));
5983 :
5984 : /* First, construct an array of the active windows */
5985 5398 : foreach(lc, windowClause)
5986 : {
5987 2822 : WindowClause *wc = lfirst_node(WindowClause, lc);
5988 :
5989 : /* It's only active if wflists shows some related WindowFuncs */
5990 : Assert(wc->winref <= wflists->maxWinRef);
5991 2822 : if (wflists->windowFuncs[wc->winref] == NIL)
5992 60 : continue;
5993 :
5994 2762 : actives[nActive].wc = wc; /* original clause */
5995 :
5996 : /*
5997 : * For sorting, we want the list of partition keys followed by the
5998 : * list of sort keys. But pathkeys construction will remove duplicates
5999 : * between the two, so we can as well (even though we can't detect all
6000 : * of the duplicates, since some may come from ECs - that might mean
6001 : * we miss optimization chances here). We must, however, ensure that
6002 : * the order of entries is preserved with respect to the ones we do
6003 : * keep.
6004 : *
6005 : * partitionClause and orderClause had their own duplicates removed in
6006 : * parse analysis, so we're only concerned here with removing
6007 : * orderClause entries that also appear in partitionClause.
6008 : */
6009 5524 : actives[nActive].uniqueOrder =
6010 2762 : list_concat_unique(list_copy(wc->partitionClause),
6011 2762 : wc->orderClause);
6012 2762 : nActive++;
6013 : }
6014 :
6015 : /*
6016 : * Sort active windows by their partitioning/ordering clauses, ignoring
6017 : * any framing clauses, so that the windows that need the same sorting are
6018 : * adjacent in the list. When we come to generate paths, this will avoid
6019 : * inserting additional Sort nodes.
6020 : *
6021 : * This is how we implement a specific requirement from the SQL standard,
6022 : * which says that when two or more windows are order-equivalent (i.e.
6023 : * have matching partition and order clauses, even if their names or
6024 : * framing clauses differ), then all peer rows must be presented in the
6025 : * same order in all of them. If we allowed multiple sort nodes for such
6026 : * cases, we'd risk having the peer rows end up in different orders in
6027 : * equivalent windows due to sort instability. (See General Rule 4 of
6028 : * <window clause> in SQL2008 - SQL2016.)
6029 : *
6030 : * Additionally, if the entire list of clauses of one window is a prefix
6031 : * of another, put first the window with stronger sorting requirements.
6032 : * This way we will first sort for stronger window, and won't have to sort
6033 : * again for the weaker one.
6034 : */
6035 2576 : qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp);
6036 :
6037 : /* build ordered list of the original WindowClause nodes */
6038 5338 : for (int i = 0; i < nActive; i++)
6039 2762 : result = lappend(result, actives[i].wc);
6040 :
6041 2576 : pfree(actives);
6042 :
6043 2576 : return result;
6044 : }
6045 :
6046 : /*
6047 : * name_active_windows
6048 : * Ensure all active windows have unique names.
6049 : *
6050 : * The parser will have checked that user-assigned window names are unique
6051 : * within the Query. Here we assign made-up names to any unnamed
6052 : * WindowClauses for the benefit of EXPLAIN. (We don't want to do this
6053 : * at parse time, because it'd mess up decompilation of views.)
6054 : *
6055 : * activeWindows: result of select_active_windows
6056 : */
6057 : static void
6058 2576 : name_active_windows(List *activeWindows)
6059 : {
6060 2576 : int next_n = 1;
6061 : char newname[16];
6062 : ListCell *lc;
6063 :
6064 5338 : foreach(lc, activeWindows)
6065 : {
6066 2762 : WindowClause *wc = lfirst_node(WindowClause, lc);
6067 :
6068 : /* Nothing to do if it has a name already. */
6069 2762 : if (wc->name)
6070 576 : continue;
6071 :
6072 : /* Select a name not currently present in the list. */
6073 : for (;;)
6074 6 : {
6075 : ListCell *lc2;
6076 :
6077 2192 : snprintf(newname, sizeof(newname), "w%d", next_n++);
6078 4732 : foreach(lc2, activeWindows)
6079 : {
6080 2546 : WindowClause *wc2 = lfirst_node(WindowClause, lc2);
6081 :
6082 2546 : if (wc2->name && strcmp(wc2->name, newname) == 0)
6083 6 : break; /* matched */
6084 : }
6085 2192 : if (lc2 == NULL)
6086 2186 : break; /* reached the end with no match */
6087 : }
6088 2186 : wc->name = pstrdup(newname);
6089 : }
6090 2576 : }
6091 :
6092 : /*
6093 : * common_prefix_cmp
6094 : * QSort comparison function for WindowClauseSortData
6095 : *
6096 : * Sort the windows by the required sorting clauses. First, compare the sort
6097 : * clauses themselves. Second, if one window's clauses are a prefix of another
6098 : * one's clauses, put the window with more sort clauses first.
6099 : *
6100 : * We purposefully sort by the highest tleSortGroupRef first. Since
6101 : * tleSortGroupRefs are assigned for the query's DISTINCT and ORDER BY first
6102 : * and because here we sort the lowest tleSortGroupRefs last, if a
6103 : * WindowClause is sharing a tleSortGroupRef with the query's DISTINCT or
6104 : * ORDER BY clause, this makes it more likely that the final WindowAgg will
6105 : * provide presorted input for the query's DISTINCT or ORDER BY clause, thus
6106 : * reducing the total number of sorts required for the query.
6107 : */
6108 : static int
6109 204 : common_prefix_cmp(const void *a, const void *b)
6110 : {
6111 204 : const WindowClauseSortData *wcsa = a;
6112 204 : const WindowClauseSortData *wcsb = b;
6113 : ListCell *item_a;
6114 : ListCell *item_b;
6115 :
6116 366 : forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
6117 : {
6118 264 : SortGroupClause *sca = lfirst_node(SortGroupClause, item_a);
6119 264 : SortGroupClause *scb = lfirst_node(SortGroupClause, item_b);
6120 :
6121 264 : if (sca->tleSortGroupRef > scb->tleSortGroupRef)
6122 102 : return -1;
6123 252 : else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
6124 66 : return 1;
6125 186 : else if (sca->sortop > scb->sortop)
6126 0 : return -1;
6127 186 : else if (sca->sortop < scb->sortop)
6128 24 : return 1;
6129 162 : else if (sca->nulls_first && !scb->nulls_first)
6130 0 : return -1;
6131 162 : else if (!sca->nulls_first && scb->nulls_first)
6132 0 : return 1;
6133 : /* no need to compare eqop, since it is fully determined by sortop */
6134 : }
6135 :
6136 102 : if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
6137 6 : return -1;
6138 96 : else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
6139 30 : return 1;
6140 :
6141 66 : return 0;
6142 : }
6143 :
6144 : /*
6145 : * make_window_input_target
6146 : * Generate appropriate PathTarget for initial input to WindowAgg nodes.
6147 : *
6148 : * When the query has window functions, this function computes the desired
6149 : * target to be computed by the node just below the first WindowAgg.
6150 : * This tlist must contain all values needed to evaluate the window functions,
6151 : * compute the final target list, and perform any required final sort step.
6152 : * If multiple WindowAggs are needed, each intermediate one adds its window
6153 : * function results onto this base tlist; only the topmost WindowAgg computes
6154 : * the actual desired target list.
6155 : *
6156 : * This function is much like make_group_input_target, though not quite enough
6157 : * like it to share code. As in that function, we flatten most expressions
6158 : * into their component variables. But we do not want to flatten window
6159 : * PARTITION BY/ORDER BY clauses, since that might result in multiple
6160 : * evaluations of them, which would be bad (possibly even resulting in
6161 : * inconsistent answers, if they contain volatile functions).
6162 : * Also, we must not flatten GROUP BY clauses that were left unflattened by
6163 : * make_group_input_target, because we may no longer have access to the
6164 : * individual Vars in them.
6165 : *
6166 : * Another key difference from make_group_input_target is that we don't
6167 : * flatten Aggref expressions, since those are to be computed below the
6168 : * window functions and just referenced like Vars above that.
6169 : *
6170 : * 'final_target' is the query's final target list (in PathTarget form)
6171 : * 'activeWindows' is the list of active windows previously identified by
6172 : * select_active_windows.
6173 : *
6174 : * The result is the PathTarget to be computed by the plan node immediately
6175 : * below the first WindowAgg node.
6176 : */
6177 : static PathTarget *
6178 2576 : make_window_input_target(PlannerInfo *root,
6179 : PathTarget *final_target,
6180 : List *activeWindows)
6181 : {
6182 : PathTarget *input_target;
6183 : Bitmapset *sgrefs;
6184 : List *flattenable_cols;
6185 : List *flattenable_vars;
6186 : int i;
6187 : ListCell *lc;
6188 :
6189 : Assert(root->parse->hasWindowFuncs);
6190 :
6191 : /*
6192 : * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
6193 : * into a bitmapset for convenient reference below.
6194 : */
6195 2576 : sgrefs = NULL;
6196 5338 : foreach(lc, activeWindows)
6197 : {
6198 2762 : WindowClause *wc = lfirst_node(WindowClause, lc);
6199 : ListCell *lc2;
6200 :
6201 3518 : foreach(lc2, wc->partitionClause)
6202 : {
6203 756 : SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
6204 :
6205 756 : sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6206 : }
6207 5034 : foreach(lc2, wc->orderClause)
6208 : {
6209 2272 : SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
6210 :
6211 2272 : sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6212 : }
6213 : }
6214 :
6215 : /* Add in sortgroupref numbers of GROUP BY clauses, too */
6216 2768 : foreach(lc, root->processed_groupClause)
6217 : {
6218 192 : SortGroupClause *grpcl = lfirst_node(SortGroupClause, lc);
6219 :
6220 192 : sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
6221 : }
6222 :
6223 : /*
6224 : * Construct a target containing all the non-flattenable targetlist items,
6225 : * and save aside the others for a moment.
6226 : */
6227 2576 : input_target = create_empty_pathtarget();
6228 2576 : flattenable_cols = NIL;
6229 :
6230 2576 : i = 0;
6231 10928 : foreach(lc, final_target->exprs)
6232 : {
6233 8352 : Expr *expr = (Expr *) lfirst(lc);
6234 8352 : Index sgref = get_pathtarget_sortgroupref(final_target, i);
6235 :
6236 : /*
6237 : * Don't want to deconstruct window clauses or GROUP BY items. (Note
6238 : * that such items can't contain window functions, so it's okay to
6239 : * compute them below the WindowAgg nodes.)
6240 : */
6241 8352 : if (sgref != 0 && bms_is_member(sgref, sgrefs))
6242 : {
6243 : /*
6244 : * Don't want to deconstruct this value, so add it to the input
6245 : * target as-is.
6246 : */
6247 2864 : add_column_to_pathtarget(input_target, expr, sgref);
6248 : }
6249 : else
6250 : {
6251 : /*
6252 : * Column is to be flattened, so just remember the expression for
6253 : * later call to pull_var_clause.
6254 : */
6255 5488 : flattenable_cols = lappend(flattenable_cols, expr);
6256 : }
6257 :
6258 8352 : i++;
6259 : }
6260 :
6261 : /*
6262 : * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
6263 : * add them to the input target if not already present. (Some might be
6264 : * there already because they're used directly as window/group clauses.)
6265 : *
6266 : * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
6267 : * Aggrefs are placed in the Agg node's tlist and not left to be computed
6268 : * at higher levels. On the other hand, we should recurse into
6269 : * WindowFuncs to make sure their input expressions are available.
6270 : */
6271 2576 : flattenable_vars = pull_var_clause((Node *) flattenable_cols,
6272 : PVC_INCLUDE_AGGREGATES |
6273 : PVC_RECURSE_WINDOWFUNCS |
6274 : PVC_INCLUDE_PLACEHOLDERS);
6275 2576 : add_new_columns_to_pathtarget(input_target, flattenable_vars);
6276 :
6277 : /* clean up cruft */
6278 2576 : list_free(flattenable_vars);
6279 2576 : list_free(flattenable_cols);
6280 :
6281 : /* XXX this causes some redundant cost calculation ... */
6282 2576 : return set_pathtarget_cost_width(root, input_target);
6283 : }
6284 :
6285 : /*
6286 : * make_pathkeys_for_window
6287 : * Create a pathkeys list describing the required input ordering
6288 : * for the given WindowClause.
6289 : *
6290 : * Modifies wc's partitionClause to remove any clauses which are deemed
6291 : * redundant by the pathkey logic.
6292 : *
6293 : * The required ordering is first the PARTITION keys, then the ORDER keys.
6294 : * In the future we might try to implement windowing using hashing, in which
6295 : * case the ordering could be relaxed, but for now we always sort.
6296 : */
6297 : static List *
6298 5552 : make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
6299 : List *tlist)
6300 : {
6301 5552 : List *window_pathkeys = NIL;
6302 :
6303 : /* Throw error if can't sort */
6304 5552 : if (!grouping_is_sortable(wc->partitionClause))
6305 0 : ereport(ERROR,
6306 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6307 : errmsg("could not implement window PARTITION BY"),
6308 : errdetail("Window partitioning columns must be of sortable datatypes.")));
6309 5552 : if (!grouping_is_sortable(wc->orderClause))
6310 0 : ereport(ERROR,
6311 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6312 : errmsg("could not implement window ORDER BY"),
6313 : errdetail("Window ordering columns must be of sortable datatypes.")));
6314 :
6315 : /*
6316 : * First fetch the pathkeys for the PARTITION BY clause. We can safely
6317 : * remove any clauses from the wc->partitionClause for redundant pathkeys.
6318 : */
6319 5552 : if (wc->partitionClause != NIL)
6320 : {
6321 : bool sortable;
6322 :
6323 1320 : window_pathkeys = make_pathkeys_for_sortclauses_extended(root,
6324 : &wc->partitionClause,
6325 : tlist,
6326 : true,
6327 : false,
6328 : &sortable,
6329 : false);
6330 :
6331 : Assert(sortable);
6332 : }
6333 :
6334 : /*
6335 : * In principle, we could also consider removing redundant ORDER BY items
6336 : * too as doing so does not alter the result of peer row checks done by
6337 : * the executor. However, we must *not* remove the ordering column for
6338 : * RANGE OFFSET cases, as the executor needs that for in_range tests even
6339 : * if it's known to be equal to some partitioning column.
6340 : */
6341 5552 : if (wc->orderClause != NIL)
6342 : {
6343 : List *orderby_pathkeys;
6344 :
6345 4450 : orderby_pathkeys = make_pathkeys_for_sortclauses(root,
6346 : wc->orderClause,
6347 : tlist);
6348 :
6349 : /* Okay, make the combined pathkeys */
6350 4450 : if (window_pathkeys != NIL)
6351 946 : window_pathkeys = append_pathkeys(window_pathkeys, orderby_pathkeys);
6352 : else
6353 3504 : window_pathkeys = orderby_pathkeys;
6354 : }
6355 :
6356 5552 : return window_pathkeys;
6357 : }
6358 :
6359 : /*
6360 : * make_sort_input_target
6361 : * Generate appropriate PathTarget for initial input to Sort step.
6362 : *
6363 : * If the query has ORDER BY, this function chooses the target to be computed
6364 : * by the node just below the Sort (and DISTINCT, if any, since Unique can't
6365 : * project) steps. This might or might not be identical to the query's final
6366 : * output target.
6367 : *
6368 : * The main argument for keeping the sort-input tlist the same as the final
6369 : * is that we avoid a separate projection node (which will be needed if
6370 : * they're different, because Sort can't project). However, there are also
6371 : * advantages to postponing tlist evaluation till after the Sort: it ensures
6372 : * a consistent order of evaluation for any volatile functions in the tlist,
6373 : * and if there's also a LIMIT, we can stop the query without ever computing
6374 : * tlist functions for later rows, which is beneficial for both volatile and
6375 : * expensive functions.
6376 : *
6377 : * Our current policy is to postpone volatile expressions till after the sort
6378 : * unconditionally (assuming that that's possible, ie they are in plain tlist
6379 : * columns and not ORDER BY/GROUP BY/DISTINCT columns). We also prefer to
6380 : * postpone set-returning expressions, because running them beforehand would
6381 : * bloat the sort dataset, and because it might cause unexpected output order
6382 : * if the sort isn't stable. However there's a constraint on that: all SRFs
6383 : * in the tlist should be evaluated at the same plan step, so that they can
6384 : * run in sync in nodeProjectSet. So if any SRFs are in sort columns, we
6385 : * mustn't postpone any SRFs. (Note that in principle that policy should
6386 : * probably get applied to the group/window input targetlists too, but we
6387 : * have not done that historically.) Lastly, expensive expressions are
6388 : * postponed if there is a LIMIT, or if root->tuple_fraction shows that
6389 : * partial evaluation of the query is possible (if neither is true, we expect
6390 : * to have to evaluate the expressions for every row anyway), or if there are
6391 : * any volatile or set-returning expressions (since once we've put in a
6392 : * projection at all, it won't cost any more to postpone more stuff).
6393 : *
6394 : * Another issue that could potentially be considered here is that
6395 : * evaluating tlist expressions could result in data that's either wider
6396 : * or narrower than the input Vars, thus changing the volume of data that
6397 : * has to go through the Sort. However, we usually have only a very bad
6398 : * idea of the output width of any expression more complex than a Var,
6399 : * so for now it seems too risky to try to optimize on that basis.
6400 : *
6401 : * Note that if we do produce a modified sort-input target, and then the
6402 : * query ends up not using an explicit Sort, no particular harm is done:
6403 : * we'll initially use the modified target for the preceding path nodes,
6404 : * but then change them to the final target with apply_projection_to_path.
6405 : * Moreover, in such a case the guarantees about evaluation order of
6406 : * volatile functions still hold, since the rows are sorted already.
6407 : *
6408 : * This function has some things in common with make_group_input_target and
6409 : * make_window_input_target, though the detailed rules for what to do are
6410 : * different. We never flatten/postpone any grouping or ordering columns;
6411 : * those are needed before the sort. If we do flatten a particular
6412 : * expression, we leave Aggref and WindowFunc nodes alone, since those were
6413 : * computed earlier.
6414 : *
6415 : * 'final_target' is the query's final target list (in PathTarget form)
6416 : * 'have_postponed_srfs' is an output argument, see below
6417 : *
6418 : * The result is the PathTarget to be computed by the plan node immediately
6419 : * below the Sort step (and the Distinct step, if any). This will be
6420 : * exactly final_target if we decide a projection step wouldn't be helpful.
6421 : *
6422 : * In addition, *have_postponed_srfs is set to true if we choose to postpone
6423 : * any set-returning functions to after the Sort.
6424 : */
6425 : static PathTarget *
6426 71652 : make_sort_input_target(PlannerInfo *root,
6427 : PathTarget *final_target,
6428 : bool *have_postponed_srfs)
6429 : {
6430 71652 : Query *parse = root->parse;
6431 : PathTarget *input_target;
6432 : int ncols;
6433 : bool *col_is_srf;
6434 : bool *postpone_col;
6435 : bool have_srf;
6436 : bool have_volatile;
6437 : bool have_expensive;
6438 : bool have_srf_sortcols;
6439 : bool postpone_srfs;
6440 : List *postponable_cols;
6441 : List *postponable_vars;
6442 : int i;
6443 : ListCell *lc;
6444 :
6445 : /* Shouldn't get here unless query has ORDER BY */
6446 : Assert(parse->sortClause);
6447 :
6448 71652 : *have_postponed_srfs = false; /* default result */
6449 :
6450 : /* Inspect tlist and collect per-column information */
6451 71652 : ncols = list_length(final_target->exprs);
6452 71652 : col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6453 71652 : postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6454 71652 : have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
6455 :
6456 71652 : i = 0;
6457 432866 : foreach(lc, final_target->exprs)
6458 : {
6459 361214 : Expr *expr = (Expr *) lfirst(lc);
6460 :
6461 : /*
6462 : * If the column has a sortgroupref, assume it has to be evaluated
6463 : * before sorting. Generally such columns would be ORDER BY, GROUP
6464 : * BY, etc targets. One exception is columns that were removed from
6465 : * GROUP BY by remove_useless_groupby_columns() ... but those would
6466 : * only be Vars anyway. There don't seem to be any cases where it
6467 : * would be worth the trouble to double-check.
6468 : */
6469 361214 : if (get_pathtarget_sortgroupref(final_target, i) == 0)
6470 : {
6471 : /*
6472 : * Check for SRF or volatile functions. Check the SRF case first
6473 : * because we must know whether we have any postponed SRFs.
6474 : */
6475 260298 : if (parse->hasTargetSRFs &&
6476 216 : expression_returns_set((Node *) expr))
6477 : {
6478 : /* We'll decide below whether these are postponable */
6479 96 : col_is_srf[i] = true;
6480 96 : have_srf = true;
6481 : }
6482 259986 : else if (contain_volatile_functions((Node *) expr))
6483 : {
6484 : /* Unconditionally postpone */
6485 148 : postpone_col[i] = true;
6486 148 : have_volatile = true;
6487 : }
6488 : else
6489 : {
6490 : /*
6491 : * Else check the cost. XXX it's annoying to have to do this
6492 : * when set_pathtarget_cost_width() just did it. Refactor to
6493 : * allow sharing the work?
6494 : */
6495 : QualCost cost;
6496 :
6497 259838 : cost_qual_eval_node(&cost, (Node *) expr, root);
6498 :
6499 : /*
6500 : * We arbitrarily define "expensive" as "more than 10X
6501 : * cpu_operator_cost". Note this will take in any PL function
6502 : * with default cost.
6503 : */
6504 259838 : if (cost.per_tuple > 10 * cpu_operator_cost)
6505 : {
6506 16884 : postpone_col[i] = true;
6507 16884 : have_expensive = true;
6508 : }
6509 : }
6510 : }
6511 : else
6512 : {
6513 : /* For sortgroupref cols, just check if any contain SRFs */
6514 101132 : if (!have_srf_sortcols &&
6515 101442 : parse->hasTargetSRFs &&
6516 334 : expression_returns_set((Node *) expr))
6517 148 : have_srf_sortcols = true;
6518 : }
6519 :
6520 361214 : i++;
6521 : }
6522 :
6523 : /*
6524 : * We can postpone SRFs if we have some but none are in sortgroupref cols.
6525 : */
6526 71652 : postpone_srfs = (have_srf && !have_srf_sortcols);
6527 :
6528 : /*
6529 : * If we don't need a post-sort projection, just return final_target.
6530 : */
6531 71652 : if (!(postpone_srfs || have_volatile ||
6532 71448 : (have_expensive &&
6533 9918 : (parse->limitCount || root->tuple_fraction > 0))))
6534 71412 : return final_target;
6535 :
6536 : /*
6537 : * Report whether the post-sort projection will contain set-returning
6538 : * functions. This is important because it affects whether the Sort can
6539 : * rely on the query's LIMIT (if any) to bound the number of rows it needs
6540 : * to return.
6541 : */
6542 240 : *have_postponed_srfs = postpone_srfs;
6543 :
6544 : /*
6545 : * Construct the sort-input target, taking all non-postponable columns and
6546 : * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6547 : * the postponable ones.
6548 : */
6549 240 : input_target = create_empty_pathtarget();
6550 240 : postponable_cols = NIL;
6551 :
6552 240 : i = 0;
6553 1990 : foreach(lc, final_target->exprs)
6554 : {
6555 1750 : Expr *expr = (Expr *) lfirst(lc);
6556 :
6557 1750 : if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
6558 298 : postponable_cols = lappend(postponable_cols, expr);
6559 : else
6560 1452 : add_column_to_pathtarget(input_target, expr,
6561 1452 : get_pathtarget_sortgroupref(final_target, i));
6562 :
6563 1750 : i++;
6564 : }
6565 :
6566 : /*
6567 : * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6568 : * postponable columns, and add them to the sort-input target if not
6569 : * already present. (Some might be there already.) We mustn't
6570 : * deconstruct Aggrefs or WindowFuncs here, since the projection node
6571 : * would be unable to recompute them.
6572 : */
6573 240 : postponable_vars = pull_var_clause((Node *) postponable_cols,
6574 : PVC_INCLUDE_AGGREGATES |
6575 : PVC_INCLUDE_WINDOWFUNCS |
6576 : PVC_INCLUDE_PLACEHOLDERS);
6577 240 : add_new_columns_to_pathtarget(input_target, postponable_vars);
6578 :
6579 : /* clean up cruft */
6580 240 : list_free(postponable_vars);
6581 240 : list_free(postponable_cols);
6582 :
6583 : /* XXX this represents even more redundant cost calculation ... */
6584 240 : return set_pathtarget_cost_width(root, input_target);
6585 : }
6586 :
6587 : /*
6588 : * get_cheapest_fractional_path
6589 : * Find the cheapest path for retrieving a specified fraction of all
6590 : * the tuples expected to be returned by the given relation.
6591 : *
6592 : * Do not consider parameterized paths. If the caller needs a path for upper
6593 : * rel, it can't have parameterized paths. If the caller needs an append
6594 : * subpath, it could become limited by the treatment of similar
6595 : * parameterization of all the subpaths.
6596 : *
6597 : * We interpret tuple_fraction the same way as grouping_planner.
6598 : *
6599 : * We assume set_cheapest() has been run on the given rel.
6600 : */
6601 : Path *
6602 497754 : get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
6603 : {
6604 497754 : Path *best_path = rel->cheapest_total_path;
6605 : ListCell *l;
6606 :
6607 : /* If all tuples will be retrieved, just return the cheapest-total path */
6608 497754 : if (tuple_fraction <= 0.0)
6609 488212 : return best_path;
6610 :
6611 : /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6612 9542 : if (tuple_fraction >= 1.0 && best_path->rows > 0)
6613 3948 : tuple_fraction /= best_path->rows;
6614 :
6615 24878 : foreach(l, rel->pathlist)
6616 : {
6617 15336 : Path *path = (Path *) lfirst(l);
6618 :
6619 15336 : if (path->param_info)
6620 200 : continue;
6621 :
6622 20730 : if (path == rel->cheapest_total_path ||
6623 5594 : compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6624 14620 : continue;
6625 :
6626 516 : best_path = path;
6627 : }
6628 :
6629 9542 : return best_path;
6630 : }
6631 :
6632 : /*
6633 : * adjust_paths_for_srfs
6634 : * Fix up the Paths of the given upperrel to handle tSRFs properly.
6635 : *
6636 : * The executor can only handle set-returning functions that appear at the
6637 : * top level of the targetlist of a ProjectSet plan node. If we have any SRFs
6638 : * that are not at top level, we need to split up the evaluation into multiple
6639 : * plan levels in which each level satisfies this constraint. This function
6640 : * modifies each Path of an upperrel that (might) compute any SRFs in its
6641 : * output tlist to insert appropriate projection steps.
6642 : *
6643 : * The given targets and targets_contain_srfs lists are from
6644 : * split_pathtarget_at_srfs(). We assume the existing Paths emit the first
6645 : * target in targets.
6646 : */
6647 : static void
6648 12726 : adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
6649 : List *targets, List *targets_contain_srfs)
6650 : {
6651 : ListCell *lc;
6652 :
6653 : Assert(list_length(targets) == list_length(targets_contain_srfs));
6654 : Assert(!linitial_int(targets_contain_srfs));
6655 :
6656 : /* If no SRFs appear at this plan level, nothing to do */
6657 12726 : if (list_length(targets) == 1)
6658 696 : return;
6659 :
6660 : /*
6661 : * Stack SRF-evaluation nodes atop each path for the rel.
6662 : *
6663 : * In principle we should re-run set_cheapest() here to identify the
6664 : * cheapest path, but it seems unlikely that adding the same tlist eval
6665 : * costs to all the paths would change that, so we don't bother. Instead,
6666 : * just assume that the cheapest-startup and cheapest-total paths remain
6667 : * so. (There should be no parameterized paths anymore, so we needn't
6668 : * worry about updating cheapest_parameterized_paths.)
6669 : */
6670 24096 : foreach(lc, rel->pathlist)
6671 : {
6672 12066 : Path *subpath = (Path *) lfirst(lc);
6673 12066 : Path *newpath = subpath;
6674 : ListCell *lc1,
6675 : *lc2;
6676 :
6677 : Assert(subpath->param_info == NULL);
6678 37394 : forboth(lc1, targets, lc2, targets_contain_srfs)
6679 : {
6680 25328 : PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6681 25328 : bool contains_srfs = (bool) lfirst_int(lc2);
6682 :
6683 : /* If this level doesn't contain SRFs, do regular projection */
6684 25328 : if (contains_srfs)
6685 12126 : newpath = (Path *) create_set_projection_path(root,
6686 : rel,
6687 : newpath,
6688 : thistarget);
6689 : else
6690 13202 : newpath = (Path *) apply_projection_to_path(root,
6691 : rel,
6692 : newpath,
6693 : thistarget);
6694 : }
6695 12066 : lfirst(lc) = newpath;
6696 12066 : if (subpath == rel->cheapest_startup_path)
6697 404 : rel->cheapest_startup_path = newpath;
6698 12066 : if (subpath == rel->cheapest_total_path)
6699 404 : rel->cheapest_total_path = newpath;
6700 : }
6701 :
6702 : /* Likewise for partial paths, if any */
6703 12036 : foreach(lc, rel->partial_pathlist)
6704 : {
6705 6 : Path *subpath = (Path *) lfirst(lc);
6706 6 : Path *newpath = subpath;
6707 : ListCell *lc1,
6708 : *lc2;
6709 :
6710 : Assert(subpath->param_info == NULL);
6711 24 : forboth(lc1, targets, lc2, targets_contain_srfs)
6712 : {
6713 18 : PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6714 18 : bool contains_srfs = (bool) lfirst_int(lc2);
6715 :
6716 : /* If this level doesn't contain SRFs, do regular projection */
6717 18 : if (contains_srfs)
6718 6 : newpath = (Path *) create_set_projection_path(root,
6719 : rel,
6720 : newpath,
6721 : thistarget);
6722 : else
6723 : {
6724 : /* avoid apply_projection_to_path, in case of multiple refs */
6725 12 : newpath = (Path *) create_projection_path(root,
6726 : rel,
6727 : newpath,
6728 : thistarget);
6729 : }
6730 : }
6731 6 : lfirst(lc) = newpath;
6732 : }
6733 : }
6734 :
6735 : /*
6736 : * expression_planner
6737 : * Perform planner's transformations on a standalone expression.
6738 : *
6739 : * Various utility commands need to evaluate expressions that are not part
6740 : * of a plannable query. They can do so using the executor's regular
6741 : * expression-execution machinery, but first the expression has to be fed
6742 : * through here to transform it from parser output to something executable.
6743 : *
6744 : * Currently, we disallow sublinks in standalone expressions, so there's no
6745 : * real "planning" involved here. (That might not always be true though.)
6746 : * What we must do is run eval_const_expressions to ensure that any function
6747 : * calls are converted to positional notation and function default arguments
6748 : * get inserted. The fact that constant subexpressions get simplified is a
6749 : * side-effect that is useful when the expression will get evaluated more than
6750 : * once. Also, we must fix operator function IDs.
6751 : *
6752 : * This does not return any information about dependencies of the expression.
6753 : * Hence callers should use the results only for the duration of the current
6754 : * query. Callers that would like to cache the results for longer should use
6755 : * expression_planner_with_deps, probably via the plancache.
6756 : *
6757 : * Note: this must not make any damaging changes to the passed-in expression
6758 : * tree. (It would actually be okay to apply fix_opfuncids to it, but since
6759 : * we first do an expression_tree_mutator-based walk, what is returned will
6760 : * be a new node tree.) The result is constructed in the current memory
6761 : * context; beware that this can leak a lot of additional stuff there, too.
6762 : */
6763 : Expr *
6764 243506 : expression_planner(Expr *expr)
6765 : {
6766 : Node *result;
6767 :
6768 : /*
6769 : * Convert named-argument function calls, insert default arguments and
6770 : * simplify constant subexprs
6771 : */
6772 243506 : result = eval_const_expressions(NULL, (Node *) expr);
6773 :
6774 : /* Fill in opfuncid values if missing */
6775 243488 : fix_opfuncids(result);
6776 :
6777 243488 : return (Expr *) result;
6778 : }
6779 :
6780 : /*
6781 : * expression_planner_with_deps
6782 : * Perform planner's transformations on a standalone expression,
6783 : * returning expression dependency information along with the result.
6784 : *
6785 : * This is identical to expression_planner() except that it also returns
6786 : * information about possible dependencies of the expression, ie identities of
6787 : * objects whose definitions affect the result. As in a PlannedStmt, these
6788 : * are expressed as a list of relation Oids and a list of PlanInvalItems.
6789 : */
6790 : Expr *
6791 364 : expression_planner_with_deps(Expr *expr,
6792 : List **relationOids,
6793 : List **invalItems)
6794 : {
6795 : Node *result;
6796 : PlannerGlobal glob;
6797 : PlannerInfo root;
6798 :
6799 : /* Make up dummy planner state so we can use setrefs machinery */
6800 9464 : MemSet(&glob, 0, sizeof(glob));
6801 364 : glob.type = T_PlannerGlobal;
6802 364 : glob.relationOids = NIL;
6803 364 : glob.invalItems = NIL;
6804 :
6805 33852 : MemSet(&root, 0, sizeof(root));
6806 364 : root.type = T_PlannerInfo;
6807 364 : root.glob = &glob;
6808 :
6809 : /*
6810 : * Convert named-argument function calls, insert default arguments and
6811 : * simplify constant subexprs. Collect identities of inlined functions
6812 : * and elided domains, too.
6813 : */
6814 364 : result = eval_const_expressions(&root, (Node *) expr);
6815 :
6816 : /* Fill in opfuncid values if missing */
6817 364 : fix_opfuncids(result);
6818 :
6819 : /*
6820 : * Now walk the finished expression to find anything else we ought to
6821 : * record as an expression dependency.
6822 : */
6823 364 : (void) extract_query_dependencies_walker(result, &root);
6824 :
6825 364 : *relationOids = glob.relationOids;
6826 364 : *invalItems = glob.invalItems;
6827 :
6828 364 : return (Expr *) result;
6829 : }
6830 :
6831 :
6832 : /*
6833 : * plan_cluster_use_sort
6834 : * Use the planner to decide how CLUSTER should implement sorting
6835 : *
6836 : * tableOid is the OID of a table to be clustered on its index indexOid
6837 : * (which is already known to be a btree index). Decide whether it's
6838 : * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
6839 : * Return true to use sorting, false to use an indexscan.
6840 : *
6841 : * Note: caller had better already hold some type of lock on the table.
6842 : */
6843 : bool
6844 194 : plan_cluster_use_sort(Oid tableOid, Oid indexOid)
6845 : {
6846 : PlannerInfo *root;
6847 : Query *query;
6848 : PlannerGlobal *glob;
6849 : RangeTblEntry *rte;
6850 : RelOptInfo *rel;
6851 : IndexOptInfo *indexInfo;
6852 : QualCost indexExprCost;
6853 : Cost comparisonCost;
6854 : Path *seqScanPath;
6855 : Path seqScanAndSortPath;
6856 : IndexPath *indexScanPath;
6857 : ListCell *lc;
6858 :
6859 : /* We can short-circuit the cost comparison if indexscans are disabled */
6860 194 : if (!enable_indexscan)
6861 30 : return true; /* use sort */
6862 :
6863 : /* Set up mostly-dummy planner state */
6864 164 : query = makeNode(Query);
6865 164 : query->commandType = CMD_SELECT;
6866 :
6867 164 : glob = makeNode(PlannerGlobal);
6868 :
6869 164 : root = makeNode(PlannerInfo);
6870 164 : root->parse = query;
6871 164 : root->glob = glob;
6872 164 : root->query_level = 1;
6873 164 : root->planner_cxt = CurrentMemoryContext;
6874 164 : root->wt_param_id = -1;
6875 164 : root->join_domains = list_make1(makeNode(JoinDomain));
6876 :
6877 : /* Build a minimal RTE for the rel */
6878 164 : rte = makeNode(RangeTblEntry);
6879 164 : rte->rtekind = RTE_RELATION;
6880 164 : rte->relid = tableOid;
6881 164 : rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6882 164 : rte->rellockmode = AccessShareLock;
6883 164 : rte->lateral = false;
6884 164 : rte->inh = false;
6885 164 : rte->inFromCl = true;
6886 164 : query->rtable = list_make1(rte);
6887 164 : addRTEPermissionInfo(&query->rteperminfos, rte);
6888 :
6889 : /* Set up RTE/RelOptInfo arrays */
6890 164 : setup_simple_rel_arrays(root);
6891 :
6892 : /* Build RelOptInfo */
6893 164 : rel = build_simple_rel(root, 1, NULL);
6894 :
6895 : /* Locate IndexOptInfo for the target index */
6896 164 : indexInfo = NULL;
6897 202 : foreach(lc, rel->indexlist)
6898 : {
6899 202 : indexInfo = lfirst_node(IndexOptInfo, lc);
6900 202 : if (indexInfo->indexoid == indexOid)
6901 164 : break;
6902 : }
6903 :
6904 : /*
6905 : * It's possible that get_relation_info did not generate an IndexOptInfo
6906 : * for the desired index; this could happen if it's not yet reached its
6907 : * indcheckxmin usability horizon, or if it's a system index and we're
6908 : * ignoring system indexes. In such cases we should tell CLUSTER to not
6909 : * trust the index contents but use seqscan-and-sort.
6910 : */
6911 164 : if (lc == NULL) /* not in the list? */
6912 0 : return true; /* use sort */
6913 :
6914 : /*
6915 : * Rather than doing all the pushups that would be needed to use
6916 : * set_baserel_size_estimates, just do a quick hack for rows and width.
6917 : */
6918 164 : rel->rows = rel->tuples;
6919 164 : rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6920 :
6921 164 : root->total_table_pages = rel->pages;
6922 :
6923 : /*
6924 : * Determine eval cost of the index expressions, if any. We need to
6925 : * charge twice that amount for each tuple comparison that happens during
6926 : * the sort, since tuplesort.c will have to re-evaluate the index
6927 : * expressions each time. (XXX that's pretty inefficient...)
6928 : */
6929 164 : cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6930 164 : comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6931 :
6932 : /* Estimate the cost of seq scan + sort */
6933 164 : seqScanPath = create_seqscan_path(root, rel, NULL, 0);
6934 164 : cost_sort(&seqScanAndSortPath, root, NIL,
6935 : seqScanPath->disabled_nodes,
6936 164 : seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6937 : comparisonCost, maintenance_work_mem, -1.0);
6938 :
6939 : /* Estimate the cost of index scan */
6940 164 : indexScanPath = create_index_path(root, indexInfo,
6941 : NIL, NIL, NIL, NIL,
6942 : ForwardScanDirection, false,
6943 : NULL, 1.0, false);
6944 :
6945 164 : return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
6946 : }
6947 :
6948 : /*
6949 : * plan_create_index_workers
6950 : * Use the planner to decide how many parallel worker processes
6951 : * CREATE INDEX should request for use
6952 : *
6953 : * tableOid is the table on which the index is to be built. indexOid is the
6954 : * OID of an index to be created or reindexed (which must be an index with
6955 : * support for parallel builds - currently btree, GIN, or BRIN).
6956 : *
6957 : * Return value is the number of parallel worker processes to request. It
6958 : * may be unsafe to proceed if this is 0. Note that this does not include the
6959 : * leader participating as a worker (value is always a number of parallel
6960 : * worker processes).
6961 : *
6962 : * Note: caller had better already hold some type of lock on the table and
6963 : * index.
6964 : */
6965 : int
6966 36434 : plan_create_index_workers(Oid tableOid, Oid indexOid)
6967 : {
6968 : PlannerInfo *root;
6969 : Query *query;
6970 : PlannerGlobal *glob;
6971 : RangeTblEntry *rte;
6972 : Relation heap;
6973 : Relation index;
6974 : RelOptInfo *rel;
6975 : int parallel_workers;
6976 : BlockNumber heap_blocks;
6977 : double reltuples;
6978 : double allvisfrac;
6979 :
6980 : /*
6981 : * We don't allow performing parallel operation in standalone backend or
6982 : * when parallelism is disabled.
6983 : */
6984 36434 : if (!IsUnderPostmaster || max_parallel_maintenance_workers == 0)
6985 514 : return 0;
6986 :
6987 : /* Set up largely-dummy planner state */
6988 35920 : query = makeNode(Query);
6989 35920 : query->commandType = CMD_SELECT;
6990 :
6991 35920 : glob = makeNode(PlannerGlobal);
6992 :
6993 35920 : root = makeNode(PlannerInfo);
6994 35920 : root->parse = query;
6995 35920 : root->glob = glob;
6996 35920 : root->query_level = 1;
6997 35920 : root->planner_cxt = CurrentMemoryContext;
6998 35920 : root->wt_param_id = -1;
6999 35920 : root->join_domains = list_make1(makeNode(JoinDomain));
7000 :
7001 : /*
7002 : * Build a minimal RTE.
7003 : *
7004 : * Mark the RTE with inh = true. This is a kludge to prevent
7005 : * get_relation_info() from fetching index info, which is necessary
7006 : * because it does not expect that any IndexOptInfo is currently
7007 : * undergoing REINDEX.
7008 : */
7009 35920 : rte = makeNode(RangeTblEntry);
7010 35920 : rte->rtekind = RTE_RELATION;
7011 35920 : rte->relid = tableOid;
7012 35920 : rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
7013 35920 : rte->rellockmode = AccessShareLock;
7014 35920 : rte->lateral = false;
7015 35920 : rte->inh = true;
7016 35920 : rte->inFromCl = true;
7017 35920 : query->rtable = list_make1(rte);
7018 35920 : addRTEPermissionInfo(&query->rteperminfos, rte);
7019 :
7020 : /* Set up RTE/RelOptInfo arrays */
7021 35920 : setup_simple_rel_arrays(root);
7022 :
7023 : /* Build RelOptInfo */
7024 35920 : rel = build_simple_rel(root, 1, NULL);
7025 :
7026 : /* Rels are assumed already locked by the caller */
7027 35920 : heap = table_open(tableOid, NoLock);
7028 35920 : index = index_open(indexOid, NoLock);
7029 :
7030 : /*
7031 : * Determine if it's safe to proceed.
7032 : *
7033 : * Currently, parallel workers can't access the leader's temporary tables.
7034 : * Furthermore, any index predicate or index expressions must be parallel
7035 : * safe.
7036 : */
7037 35920 : if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
7038 33842 : !is_parallel_safe(root, (Node *) RelationGetIndexExpressions(index)) ||
7039 33702 : !is_parallel_safe(root, (Node *) RelationGetIndexPredicate(index)))
7040 : {
7041 2218 : parallel_workers = 0;
7042 2218 : goto done;
7043 : }
7044 :
7045 : /*
7046 : * If parallel_workers storage parameter is set for the table, accept that
7047 : * as the number of parallel worker processes to launch (though still cap
7048 : * at max_parallel_maintenance_workers). Note that we deliberately do not
7049 : * consider any other factor when parallel_workers is set. (e.g., memory
7050 : * use by workers.)
7051 : */
7052 33702 : if (rel->rel_parallel_workers != -1)
7053 : {
7054 20 : parallel_workers = Min(rel->rel_parallel_workers,
7055 : max_parallel_maintenance_workers);
7056 20 : goto done;
7057 : }
7058 :
7059 : /*
7060 : * Estimate heap relation size ourselves, since rel->pages cannot be
7061 : * trusted (heap RTE was marked as inheritance parent)
7062 : */
7063 33682 : estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
7064 :
7065 : /*
7066 : * Determine number of workers to scan the heap relation using generic
7067 : * model
7068 : */
7069 33682 : parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
7070 : max_parallel_maintenance_workers);
7071 :
7072 : /*
7073 : * Cap workers based on available maintenance_work_mem as needed.
7074 : *
7075 : * Note that each tuplesort participant receives an even share of the
7076 : * total maintenance_work_mem budget. Aim to leave participants
7077 : * (including the leader as a participant) with no less than 32MB of
7078 : * memory. This leaves cases where maintenance_work_mem is set to 64MB
7079 : * immediately past the threshold of being capable of launching a single
7080 : * parallel worker to sort.
7081 : */
7082 33844 : while (parallel_workers > 0 &&
7083 326 : maintenance_work_mem / (parallel_workers + 1) < 32 * 1024)
7084 162 : parallel_workers--;
7085 :
7086 33682 : done:
7087 35920 : index_close(index, NoLock);
7088 35920 : table_close(heap, NoLock);
7089 :
7090 35920 : return parallel_workers;
7091 : }
7092 :
7093 : /*
7094 : * add_paths_to_grouping_rel
7095 : *
7096 : * Add non-partial paths to grouping relation.
7097 : */
7098 : static void
7099 46664 : add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
7100 : RelOptInfo *grouped_rel,
7101 : RelOptInfo *partially_grouped_rel,
7102 : const AggClauseCosts *agg_costs,
7103 : grouping_sets_data *gd,
7104 : GroupPathExtraData *extra)
7105 : {
7106 46664 : Query *parse = root->parse;
7107 46664 : Path *cheapest_path = input_rel->cheapest_total_path;
7108 46664 : Path *cheapest_partially_grouped_path = NULL;
7109 : ListCell *lc;
7110 46664 : bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7111 46664 : bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7112 46664 : List *havingQual = (List *) extra->havingQual;
7113 46664 : AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7114 46664 : double dNumGroups = 0;
7115 46664 : double dNumFinalGroups = 0;
7116 :
7117 : /*
7118 : * Estimate number of groups for non-split aggregation.
7119 : */
7120 46664 : dNumGroups = get_number_of_groups(root,
7121 : cheapest_path->rows,
7122 : gd,
7123 : extra->targetList);
7124 :
7125 46664 : if (partially_grouped_rel && partially_grouped_rel->pathlist)
7126 : {
7127 2320 : cheapest_partially_grouped_path =
7128 : partially_grouped_rel->cheapest_total_path;
7129 :
7130 : /*
7131 : * Estimate number of groups for final phase of partial aggregation.
7132 : */
7133 : dNumFinalGroups =
7134 2320 : get_number_of_groups(root,
7135 : cheapest_partially_grouped_path->rows,
7136 : gd,
7137 : extra->targetList);
7138 : }
7139 :
7140 46664 : if (can_sort)
7141 : {
7142 : /*
7143 : * Use any available suitably-sorted path as input, and also consider
7144 : * sorting the cheapest-total path and incremental sort on any paths
7145 : * with presorted keys.
7146 : */
7147 96720 : foreach(lc, input_rel->pathlist)
7148 : {
7149 : ListCell *lc2;
7150 50062 : Path *path = (Path *) lfirst(lc);
7151 50062 : Path *path_save = path;
7152 50062 : List *pathkey_orderings = NIL;
7153 :
7154 : /* generate alternative group orderings that might be useful */
7155 50062 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7156 :
7157 : Assert(list_length(pathkey_orderings) > 0);
7158 :
7159 100268 : foreach(lc2, pathkey_orderings)
7160 : {
7161 50206 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7162 :
7163 : /* restore the path (we replace it in the loop) */
7164 50206 : path = path_save;
7165 :
7166 50206 : path = make_ordered_path(root,
7167 : grouped_rel,
7168 : path,
7169 : cheapest_path,
7170 : info->pathkeys,
7171 : -1.0);
7172 50206 : if (path == NULL)
7173 380 : continue;
7174 :
7175 : /* Now decide what to stick atop it */
7176 49826 : if (parse->groupingSets)
7177 : {
7178 1022 : consider_groupingsets_paths(root, grouped_rel,
7179 : path, true, can_hash,
7180 : gd, agg_costs, dNumGroups);
7181 : }
7182 48804 : else if (parse->hasAggs)
7183 : {
7184 : /*
7185 : * We have aggregation, possibly with plain GROUP BY. Make
7186 : * an AggPath.
7187 : */
7188 48020 : add_path(grouped_rel, (Path *)
7189 48020 : create_agg_path(root,
7190 : grouped_rel,
7191 : path,
7192 48020 : grouped_rel->reltarget,
7193 48020 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7194 : AGGSPLIT_SIMPLE,
7195 : info->clauses,
7196 : havingQual,
7197 : agg_costs,
7198 : dNumGroups));
7199 : }
7200 784 : else if (parse->groupClause)
7201 : {
7202 : /*
7203 : * We have GROUP BY without aggregation or grouping sets.
7204 : * Make a GroupPath.
7205 : */
7206 784 : add_path(grouped_rel, (Path *)
7207 784 : create_group_path(root,
7208 : grouped_rel,
7209 : path,
7210 : info->clauses,
7211 : havingQual,
7212 : dNumGroups));
7213 : }
7214 : else
7215 : {
7216 : /* Other cases should have been handled above */
7217 : Assert(false);
7218 : }
7219 : }
7220 : }
7221 :
7222 : /*
7223 : * Instead of operating directly on the input relation, we can
7224 : * consider finalizing a partially aggregated path.
7225 : */
7226 46658 : if (partially_grouped_rel != NULL)
7227 : {
7228 6322 : foreach(lc, partially_grouped_rel->pathlist)
7229 : {
7230 : ListCell *lc2;
7231 4002 : Path *path = (Path *) lfirst(lc);
7232 4002 : Path *path_save = path;
7233 4002 : List *pathkey_orderings = NIL;
7234 :
7235 : /* generate alternative group orderings that might be useful */
7236 4002 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7237 :
7238 : Assert(list_length(pathkey_orderings) > 0);
7239 :
7240 : /* process all potentially interesting grouping reorderings */
7241 8004 : foreach(lc2, pathkey_orderings)
7242 : {
7243 4002 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7244 :
7245 : /* restore the path (we replace it in the loop) */
7246 4002 : path = path_save;
7247 :
7248 4002 : path = make_ordered_path(root,
7249 : grouped_rel,
7250 : path,
7251 : cheapest_partially_grouped_path,
7252 : info->pathkeys,
7253 : -1.0);
7254 :
7255 4002 : if (path == NULL)
7256 204 : continue;
7257 :
7258 3798 : if (parse->hasAggs)
7259 3550 : add_path(grouped_rel, (Path *)
7260 3550 : create_agg_path(root,
7261 : grouped_rel,
7262 : path,
7263 3550 : grouped_rel->reltarget,
7264 3550 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7265 : AGGSPLIT_FINAL_DESERIAL,
7266 : info->clauses,
7267 : havingQual,
7268 : agg_final_costs,
7269 : dNumFinalGroups));
7270 : else
7271 248 : add_path(grouped_rel, (Path *)
7272 248 : create_group_path(root,
7273 : grouped_rel,
7274 : path,
7275 : info->clauses,
7276 : havingQual,
7277 : dNumFinalGroups));
7278 :
7279 : }
7280 : }
7281 : }
7282 : }
7283 :
7284 46664 : if (can_hash)
7285 : {
7286 5876 : if (parse->groupingSets)
7287 : {
7288 : /*
7289 : * Try for a hash-only groupingsets path over unsorted input.
7290 : */
7291 860 : consider_groupingsets_paths(root, grouped_rel,
7292 : cheapest_path, false, true,
7293 : gd, agg_costs, dNumGroups);
7294 : }
7295 : else
7296 : {
7297 : /*
7298 : * Generate a HashAgg Path. We just need an Agg over the
7299 : * cheapest-total input path, since input order won't matter.
7300 : */
7301 5016 : add_path(grouped_rel, (Path *)
7302 5016 : create_agg_path(root, grouped_rel,
7303 : cheapest_path,
7304 5016 : grouped_rel->reltarget,
7305 : AGG_HASHED,
7306 : AGGSPLIT_SIMPLE,
7307 : root->processed_groupClause,
7308 : havingQual,
7309 : agg_costs,
7310 : dNumGroups));
7311 : }
7312 :
7313 : /*
7314 : * Generate a Finalize HashAgg Path atop of the cheapest partially
7315 : * grouped path, assuming there is one
7316 : */
7317 5876 : if (partially_grouped_rel && partially_grouped_rel->pathlist)
7318 : {
7319 1442 : add_path(grouped_rel, (Path *)
7320 1442 : create_agg_path(root,
7321 : grouped_rel,
7322 : cheapest_partially_grouped_path,
7323 1442 : grouped_rel->reltarget,
7324 : AGG_HASHED,
7325 : AGGSPLIT_FINAL_DESERIAL,
7326 : root->processed_groupClause,
7327 : havingQual,
7328 : agg_final_costs,
7329 : dNumFinalGroups));
7330 : }
7331 : }
7332 :
7333 : /*
7334 : * When partitionwise aggregate is used, we might have fully aggregated
7335 : * paths in the partial pathlist, because add_paths_to_append_rel() will
7336 : * consider a path for grouped_rel consisting of a Parallel Append of
7337 : * non-partial paths from each child.
7338 : */
7339 46664 : if (grouped_rel->partial_pathlist != NIL)
7340 318 : gather_grouping_paths(root, grouped_rel);
7341 46664 : }
7342 :
7343 : /*
7344 : * create_partial_grouping_paths
7345 : *
7346 : * Create a new upper relation representing the result of partial aggregation
7347 : * and populate it with appropriate paths. Note that we don't finalize the
7348 : * lists of paths here, so the caller can add additional partial or non-partial
7349 : * paths and must afterward call gather_grouping_paths and set_cheapest on
7350 : * the returned upper relation.
7351 : *
7352 : * All paths for this new upper relation -- both partial and non-partial --
7353 : * have been partially aggregated but require a subsequent FinalizeAggregate
7354 : * step.
7355 : *
7356 : * NB: This function is allowed to return NULL if it determines that there is
7357 : * no real need to create a new RelOptInfo.
7358 : */
7359 : static RelOptInfo *
7360 42598 : create_partial_grouping_paths(PlannerInfo *root,
7361 : RelOptInfo *grouped_rel,
7362 : RelOptInfo *input_rel,
7363 : grouping_sets_data *gd,
7364 : GroupPathExtraData *extra,
7365 : bool force_rel_creation)
7366 : {
7367 42598 : Query *parse = root->parse;
7368 : RelOptInfo *partially_grouped_rel;
7369 42598 : RelOptInfo *eager_agg_rel = NULL;
7370 42598 : AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7371 42598 : AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7372 42598 : Path *cheapest_partial_path = NULL;
7373 42598 : Path *cheapest_total_path = NULL;
7374 42598 : double dNumPartialGroups = 0;
7375 42598 : double dNumPartialPartialGroups = 0;
7376 : ListCell *lc;
7377 42598 : bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7378 42598 : bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7379 :
7380 : /*
7381 : * Check whether any partially aggregated paths have been generated
7382 : * through eager aggregation.
7383 : */
7384 42598 : if (input_rel->grouped_rel &&
7385 958 : !IS_DUMMY_REL(input_rel->grouped_rel) &&
7386 958 : input_rel->grouped_rel->pathlist != NIL)
7387 898 : eager_agg_rel = input_rel->grouped_rel;
7388 :
7389 : /*
7390 : * Consider whether we should generate partially aggregated non-partial
7391 : * paths. We can only do this if we have a non-partial path, and only if
7392 : * the parent of the input rel is performing partial partitionwise
7393 : * aggregation. (Note that extra->patype is the type of partitionwise
7394 : * aggregation being used at the parent level, not this level.)
7395 : */
7396 42598 : if (input_rel->pathlist != NIL &&
7397 42598 : extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
7398 858 : cheapest_total_path = input_rel->cheapest_total_path;
7399 :
7400 : /*
7401 : * If parallelism is possible for grouped_rel, then we should consider
7402 : * generating partially-grouped partial paths. However, if the input rel
7403 : * has no partial paths, then we can't.
7404 : */
7405 42598 : if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7406 2596 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
7407 :
7408 : /*
7409 : * If we can't partially aggregate partial paths, and we can't partially
7410 : * aggregate non-partial paths, and no partially aggregated paths were
7411 : * generated by eager aggregation, then don't bother creating the new
7412 : * RelOptInfo at all, unless the caller specified force_rel_creation.
7413 : */
7414 42598 : if (cheapest_total_path == NULL &&
7415 39648 : cheapest_partial_path == NULL &&
7416 39518 : eager_agg_rel == NULL &&
7417 39518 : !force_rel_creation)
7418 39420 : return NULL;
7419 :
7420 : /*
7421 : * Build a new upper relation to represent the result of partially
7422 : * aggregating the rows from the input relation.
7423 : */
7424 3178 : partially_grouped_rel = fetch_upper_rel(root,
7425 : UPPERREL_PARTIAL_GROUP_AGG,
7426 : grouped_rel->relids);
7427 3178 : partially_grouped_rel->consider_parallel =
7428 3178 : grouped_rel->consider_parallel;
7429 3178 : partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7430 3178 : partially_grouped_rel->serverid = grouped_rel->serverid;
7431 3178 : partially_grouped_rel->userid = grouped_rel->userid;
7432 3178 : partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7433 3178 : partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7434 :
7435 : /*
7436 : * Build target list for partial aggregate paths. These paths cannot just
7437 : * emit the same tlist as regular aggregate paths, because (1) we must
7438 : * include Vars and Aggrefs needed in HAVING, which might not appear in
7439 : * the result tlist, and (2) the Aggrefs must be set in partial mode.
7440 : */
7441 3178 : partially_grouped_rel->reltarget =
7442 3178 : make_partial_grouping_target(root, grouped_rel->reltarget,
7443 : extra->havingQual);
7444 :
7445 3178 : if (!extra->partial_costs_set)
7446 : {
7447 : /*
7448 : * Collect statistics about aggregates for estimating costs of
7449 : * performing aggregation in parallel.
7450 : */
7451 9816 : MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7452 9816 : MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7453 1636 : if (parse->hasAggs)
7454 : {
7455 : /* partial phase */
7456 1502 : get_agg_clause_costs(root, AGGSPLIT_INITIAL_SERIAL,
7457 : agg_partial_costs);
7458 :
7459 : /* final phase */
7460 1502 : get_agg_clause_costs(root, AGGSPLIT_FINAL_DESERIAL,
7461 : agg_final_costs);
7462 : }
7463 :
7464 1636 : extra->partial_costs_set = true;
7465 : }
7466 :
7467 : /* Estimate number of partial groups. */
7468 3178 : if (cheapest_total_path != NULL)
7469 : dNumPartialGroups =
7470 858 : get_number_of_groups(root,
7471 : cheapest_total_path->rows,
7472 : gd,
7473 : extra->targetList);
7474 3178 : if (cheapest_partial_path != NULL)
7475 : dNumPartialPartialGroups =
7476 2596 : get_number_of_groups(root,
7477 : cheapest_partial_path->rows,
7478 : gd,
7479 : extra->targetList);
7480 :
7481 3178 : if (can_sort && cheapest_total_path != NULL)
7482 : {
7483 : /* This should have been checked previously */
7484 : Assert(parse->hasAggs || parse->groupClause);
7485 :
7486 : /*
7487 : * Use any available suitably-sorted path as input, and also consider
7488 : * sorting the cheapest partial path.
7489 : */
7490 1716 : foreach(lc, input_rel->pathlist)
7491 : {
7492 : ListCell *lc2;
7493 858 : Path *path = (Path *) lfirst(lc);
7494 858 : Path *path_save = path;
7495 858 : List *pathkey_orderings = NIL;
7496 :
7497 : /* generate alternative group orderings that might be useful */
7498 858 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7499 :
7500 : Assert(list_length(pathkey_orderings) > 0);
7501 :
7502 : /* process all potentially interesting grouping reorderings */
7503 1716 : foreach(lc2, pathkey_orderings)
7504 : {
7505 858 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7506 :
7507 : /* restore the path (we replace it in the loop) */
7508 858 : path = path_save;
7509 :
7510 858 : path = make_ordered_path(root,
7511 : partially_grouped_rel,
7512 : path,
7513 : cheapest_total_path,
7514 : info->pathkeys,
7515 : -1.0);
7516 :
7517 858 : if (path == NULL)
7518 0 : continue;
7519 :
7520 858 : if (parse->hasAggs)
7521 786 : add_path(partially_grouped_rel, (Path *)
7522 786 : create_agg_path(root,
7523 : partially_grouped_rel,
7524 : path,
7525 786 : partially_grouped_rel->reltarget,
7526 786 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7527 : AGGSPLIT_INITIAL_SERIAL,
7528 : info->clauses,
7529 : NIL,
7530 : agg_partial_costs,
7531 : dNumPartialGroups));
7532 : else
7533 72 : add_path(partially_grouped_rel, (Path *)
7534 72 : create_group_path(root,
7535 : partially_grouped_rel,
7536 : path,
7537 : info->clauses,
7538 : NIL,
7539 : dNumPartialGroups));
7540 : }
7541 : }
7542 : }
7543 :
7544 3178 : if (can_sort && cheapest_partial_path != NULL)
7545 : {
7546 : /* Similar to above logic, but for partial paths. */
7547 5714 : foreach(lc, input_rel->partial_pathlist)
7548 : {
7549 : ListCell *lc2;
7550 3118 : Path *path = (Path *) lfirst(lc);
7551 3118 : Path *path_save = path;
7552 3118 : List *pathkey_orderings = NIL;
7553 :
7554 : /* generate alternative group orderings that might be useful */
7555 3118 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7556 :
7557 : Assert(list_length(pathkey_orderings) > 0);
7558 :
7559 : /* process all potentially interesting grouping reorderings */
7560 6236 : foreach(lc2, pathkey_orderings)
7561 : {
7562 3118 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7563 :
7564 :
7565 : /* restore the path (we replace it in the loop) */
7566 3118 : path = path_save;
7567 :
7568 3118 : path = make_ordered_path(root,
7569 : partially_grouped_rel,
7570 : path,
7571 : cheapest_partial_path,
7572 : info->pathkeys,
7573 : -1.0);
7574 :
7575 3118 : if (path == NULL)
7576 6 : continue;
7577 :
7578 3112 : if (parse->hasAggs)
7579 2990 : add_partial_path(partially_grouped_rel, (Path *)
7580 2990 : create_agg_path(root,
7581 : partially_grouped_rel,
7582 : path,
7583 2990 : partially_grouped_rel->reltarget,
7584 2990 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7585 : AGGSPLIT_INITIAL_SERIAL,
7586 : info->clauses,
7587 : NIL,
7588 : agg_partial_costs,
7589 : dNumPartialPartialGroups));
7590 : else
7591 122 : add_partial_path(partially_grouped_rel, (Path *)
7592 122 : create_group_path(root,
7593 : partially_grouped_rel,
7594 : path,
7595 : info->clauses,
7596 : NIL,
7597 : dNumPartialPartialGroups));
7598 : }
7599 : }
7600 : }
7601 :
7602 : /*
7603 : * Add a partially-grouped HashAgg Path where possible
7604 : */
7605 3178 : if (can_hash && cheapest_total_path != NULL)
7606 : {
7607 : /* Checked above */
7608 : Assert(parse->hasAggs || parse->groupClause);
7609 :
7610 858 : add_path(partially_grouped_rel, (Path *)
7611 858 : create_agg_path(root,
7612 : partially_grouped_rel,
7613 : cheapest_total_path,
7614 858 : partially_grouped_rel->reltarget,
7615 : AGG_HASHED,
7616 : AGGSPLIT_INITIAL_SERIAL,
7617 : root->processed_groupClause,
7618 : NIL,
7619 : agg_partial_costs,
7620 : dNumPartialGroups));
7621 : }
7622 :
7623 : /*
7624 : * Now add a partially-grouped HashAgg partial Path where possible
7625 : */
7626 3178 : if (can_hash && cheapest_partial_path != NULL)
7627 : {
7628 1718 : add_partial_path(partially_grouped_rel, (Path *)
7629 1718 : create_agg_path(root,
7630 : partially_grouped_rel,
7631 : cheapest_partial_path,
7632 1718 : partially_grouped_rel->reltarget,
7633 : AGG_HASHED,
7634 : AGGSPLIT_INITIAL_SERIAL,
7635 : root->processed_groupClause,
7636 : NIL,
7637 : agg_partial_costs,
7638 : dNumPartialPartialGroups));
7639 : }
7640 :
7641 : /*
7642 : * Add any partially aggregated paths generated by eager aggregation to
7643 : * the new upper relation after applying projection steps as needed.
7644 : */
7645 3178 : if (eager_agg_rel)
7646 : {
7647 : /* Add the paths */
7648 2348 : foreach(lc, eager_agg_rel->pathlist)
7649 : {
7650 1450 : Path *path = (Path *) lfirst(lc);
7651 :
7652 : /* Shouldn't have any parameterized paths anymore */
7653 : Assert(path->param_info == NULL);
7654 :
7655 1450 : path = (Path *) create_projection_path(root,
7656 : partially_grouped_rel,
7657 : path,
7658 1450 : partially_grouped_rel->reltarget);
7659 :
7660 1450 : add_path(partially_grouped_rel, path);
7661 : }
7662 :
7663 : /*
7664 : * Likewise add the partial paths, but only if parallelism is possible
7665 : * for partially_grouped_rel.
7666 : */
7667 898 : if (partially_grouped_rel->consider_parallel)
7668 : {
7669 2028 : foreach(lc, eager_agg_rel->partial_pathlist)
7670 : {
7671 1212 : Path *path = (Path *) lfirst(lc);
7672 :
7673 : /* Shouldn't have any parameterized paths anymore */
7674 : Assert(path->param_info == NULL);
7675 :
7676 1212 : path = (Path *) create_projection_path(root,
7677 : partially_grouped_rel,
7678 : path,
7679 1212 : partially_grouped_rel->reltarget);
7680 :
7681 1212 : add_partial_path(partially_grouped_rel, path);
7682 : }
7683 : }
7684 : }
7685 :
7686 : /*
7687 : * If there is an FDW that's responsible for all baserels of the query,
7688 : * let it consider adding partially grouped ForeignPaths.
7689 : */
7690 3178 : if (partially_grouped_rel->fdwroutine &&
7691 6 : partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7692 : {
7693 6 : FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7694 :
7695 6 : fdwroutine->GetForeignUpperPaths(root,
7696 : UPPERREL_PARTIAL_GROUP_AGG,
7697 : input_rel, partially_grouped_rel,
7698 : extra);
7699 : }
7700 :
7701 3178 : return partially_grouped_rel;
7702 : }
7703 :
7704 : /*
7705 : * make_ordered_path
7706 : * Return a path ordered by 'pathkeys' based on the given 'path'. May
7707 : * return NULL if it doesn't make sense to generate an ordered path in
7708 : * this case.
7709 : */
7710 : static Path *
7711 64428 : make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path,
7712 : Path *cheapest_path, List *pathkeys, double limit_tuples)
7713 : {
7714 : bool is_sorted;
7715 : int presorted_keys;
7716 :
7717 64428 : is_sorted = pathkeys_count_contained_in(pathkeys,
7718 : path->pathkeys,
7719 : &presorted_keys);
7720 :
7721 64428 : if (!is_sorted)
7722 : {
7723 : /*
7724 : * Try at least sorting the cheapest path and also try incrementally
7725 : * sorting any path which is partially sorted already (no need to deal
7726 : * with paths which have presorted keys when incremental sort is
7727 : * disabled unless it's the cheapest input path).
7728 : */
7729 17010 : if (path != cheapest_path &&
7730 3308 : (presorted_keys == 0 || !enable_incremental_sort))
7731 1480 : return NULL;
7732 :
7733 : /*
7734 : * We've no need to consider both a sort and incremental sort. We'll
7735 : * just do a sort if there are no presorted keys and an incremental
7736 : * sort when there are presorted keys.
7737 : */
7738 15530 : if (presorted_keys == 0 || !enable_incremental_sort)
7739 13528 : path = (Path *) create_sort_path(root,
7740 : rel,
7741 : path,
7742 : pathkeys,
7743 : limit_tuples);
7744 : else
7745 2002 : path = (Path *) create_incremental_sort_path(root,
7746 : rel,
7747 : path,
7748 : pathkeys,
7749 : presorted_keys,
7750 : limit_tuples);
7751 : }
7752 :
7753 62948 : return path;
7754 : }
7755 :
7756 : /*
7757 : * Generate Gather and Gather Merge paths for a grouping relation or partial
7758 : * grouping relation.
7759 : *
7760 : * generate_useful_gather_paths does most of the work, but we also consider a
7761 : * special case: we could try sorting the data by the group_pathkeys and then
7762 : * applying Gather Merge.
7763 : *
7764 : * NB: This function shouldn't be used for anything other than a grouped or
7765 : * partially grouped relation not only because of the fact that it explicitly
7766 : * references group_pathkeys but we pass "true" as the third argument to
7767 : * generate_useful_gather_paths().
7768 : */
7769 : static void
7770 2410 : gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
7771 : {
7772 : ListCell *lc;
7773 : Path *cheapest_partial_path;
7774 : List *groupby_pathkeys;
7775 :
7776 : /*
7777 : * This occurs after any partial aggregation has taken place, so trim off
7778 : * any pathkeys added for ORDER BY / DISTINCT aggregates.
7779 : */
7780 2410 : if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7781 18 : groupby_pathkeys = list_copy_head(root->group_pathkeys,
7782 : root->num_groupby_pathkeys);
7783 : else
7784 2392 : groupby_pathkeys = root->group_pathkeys;
7785 :
7786 : /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7787 2410 : generate_useful_gather_paths(root, rel, true);
7788 :
7789 2410 : cheapest_partial_path = linitial(rel->partial_pathlist);
7790 :
7791 : /* XXX Shouldn't this also consider the group-key-reordering? */
7792 5942 : foreach(lc, rel->partial_pathlist)
7793 : {
7794 3532 : Path *path = (Path *) lfirst(lc);
7795 : bool is_sorted;
7796 : int presorted_keys;
7797 : double total_groups;
7798 :
7799 3532 : is_sorted = pathkeys_count_contained_in(groupby_pathkeys,
7800 : path->pathkeys,
7801 : &presorted_keys);
7802 :
7803 3532 : if (is_sorted)
7804 2080 : continue;
7805 :
7806 : /*
7807 : * Try at least sorting the cheapest path and also try incrementally
7808 : * sorting any path which is partially sorted already (no need to deal
7809 : * with paths which have presorted keys when incremental sort is
7810 : * disabled unless it's the cheapest input path).
7811 : */
7812 1452 : if (path != cheapest_partial_path &&
7813 0 : (presorted_keys == 0 || !enable_incremental_sort))
7814 0 : continue;
7815 :
7816 : /*
7817 : * We've no need to consider both a sort and incremental sort. We'll
7818 : * just do a sort if there are no presorted keys and an incremental
7819 : * sort when there are presorted keys.
7820 : */
7821 1452 : if (presorted_keys == 0 || !enable_incremental_sort)
7822 1452 : path = (Path *) create_sort_path(root, rel, path,
7823 : groupby_pathkeys,
7824 : -1.0);
7825 : else
7826 0 : path = (Path *) create_incremental_sort_path(root,
7827 : rel,
7828 : path,
7829 : groupby_pathkeys,
7830 : presorted_keys,
7831 : -1.0);
7832 1452 : total_groups = compute_gather_rows(path);
7833 : path = (Path *)
7834 1452 : create_gather_merge_path(root,
7835 : rel,
7836 : path,
7837 1452 : rel->reltarget,
7838 : groupby_pathkeys,
7839 : NULL,
7840 : &total_groups);
7841 :
7842 1452 : add_path(rel, path);
7843 : }
7844 2410 : }
7845 :
7846 : /*
7847 : * can_partial_agg
7848 : *
7849 : * Determines whether or not partial grouping and/or aggregation is possible.
7850 : * Returns true when possible, false otherwise.
7851 : */
7852 : static bool
7853 45356 : can_partial_agg(PlannerInfo *root)
7854 : {
7855 45356 : Query *parse = root->parse;
7856 :
7857 45356 : if (!parse->hasAggs && parse->groupClause == NIL)
7858 : {
7859 : /*
7860 : * We don't know how to do parallel aggregation unless we have either
7861 : * some aggregates or a grouping clause.
7862 : */
7863 0 : return false;
7864 : }
7865 45356 : else if (parse->groupingSets)
7866 : {
7867 : /* We don't know how to do grouping sets in parallel. */
7868 956 : return false;
7869 : }
7870 44400 : else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7871 : {
7872 : /* Insufficient support for partial mode. */
7873 3896 : return false;
7874 : }
7875 :
7876 : /* Everything looks good. */
7877 40504 : return true;
7878 : }
7879 :
7880 : /*
7881 : * apply_scanjoin_target_to_paths
7882 : *
7883 : * Adjust the final scan/join relation, and recursively all of its children,
7884 : * to generate the final scan/join target. It would be more correct to model
7885 : * this as a separate planning step with a new RelOptInfo at the toplevel and
7886 : * for each child relation, but doing it this way is noticeably cheaper.
7887 : * Maybe that problem can be solved at some point, but for now we do this.
7888 : *
7889 : * If tlist_same_exprs is true, then the scan/join target to be applied has
7890 : * the same expressions as the existing reltarget, so we need only insert the
7891 : * appropriate sortgroupref information. By avoiding the creation of
7892 : * projection paths we save effort both immediately and at plan creation time.
7893 : */
7894 : static void
7895 558450 : apply_scanjoin_target_to_paths(PlannerInfo *root,
7896 : RelOptInfo *rel,
7897 : List *scanjoin_targets,
7898 : List *scanjoin_targets_contain_srfs,
7899 : bool scanjoin_target_parallel_safe,
7900 : bool tlist_same_exprs)
7901 : {
7902 558450 : bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7903 : PathTarget *scanjoin_target;
7904 : ListCell *lc;
7905 :
7906 : /* This recurses, so be paranoid. */
7907 558450 : check_stack_depth();
7908 :
7909 : /*
7910 : * If the rel only has Append and MergeAppend paths, we want to drop its
7911 : * existing paths and generate new ones. This function would still be
7912 : * correct if we kept the existing paths: we'd modify them to generate the
7913 : * correct target above the partitioning Append, and then they'd compete
7914 : * on cost with paths generating the target below the Append. However, in
7915 : * our current cost model the latter way is always the same or cheaper
7916 : * cost, so modifying the existing paths would just be useless work.
7917 : * Moreover, when the cost is the same, varying roundoff errors might
7918 : * sometimes allow an existing path to be picked, resulting in undesirable
7919 : * cross-platform plan variations. So we drop old paths and thereby force
7920 : * the work to be done below the Append.
7921 : *
7922 : * However, there are several cases when this optimization is not safe. If
7923 : * the rel isn't partitioned, then none of the paths will be Append or
7924 : * MergeAppend paths, so we should definitely not do this. If it is
7925 : * parititoned but is a joinrel, it may have Append and MergeAppend paths,
7926 : * but it can also have join paths that we can't afford to discard.
7927 : *
7928 : * Some care is needed, because we have to allow
7929 : * generate_useful_gather_paths to see the old partial paths in the next
7930 : * stanza. Hence, zap the main pathlist here, then allow
7931 : * generate_useful_gather_paths to add path(s) to the main list, and
7932 : * finally zap the partial pathlist.
7933 : */
7934 558450 : if (rel_is_partitioned && IS_SIMPLE_REL(rel))
7935 11700 : rel->pathlist = NIL;
7936 :
7937 : /*
7938 : * If the scan/join target is not parallel-safe, partial paths cannot
7939 : * generate it.
7940 : */
7941 558450 : if (!scanjoin_target_parallel_safe)
7942 : {
7943 : /*
7944 : * Since we can't generate the final scan/join target in parallel
7945 : * workers, this is our last opportunity to use any partial paths that
7946 : * exist; so build Gather path(s) that use them and emit whatever the
7947 : * current reltarget is. We don't do this in the case where the
7948 : * target is parallel-safe, since we will be able to generate superior
7949 : * paths by doing it after the final scan/join target has been
7950 : * applied.
7951 : */
7952 80602 : generate_useful_gather_paths(root, rel, false);
7953 :
7954 : /* Can't use parallel query above this level. */
7955 80602 : rel->partial_pathlist = NIL;
7956 80602 : rel->consider_parallel = false;
7957 : }
7958 :
7959 : /* Finish dropping old paths for a partitioned rel, per comment above */
7960 558450 : if (rel_is_partitioned && IS_SIMPLE_REL(rel))
7961 11700 : rel->partial_pathlist = NIL;
7962 :
7963 : /* Extract SRF-free scan/join target. */
7964 558450 : scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7965 :
7966 : /*
7967 : * Apply the SRF-free scan/join target to each existing path.
7968 : *
7969 : * If the tlist exprs are the same, we can just inject the sortgroupref
7970 : * information into the existing pathtargets. Otherwise, replace each
7971 : * path with a projection path that generates the SRF-free scan/join
7972 : * target. This can't change the ordering of paths within rel->pathlist,
7973 : * so we just modify the list in place.
7974 : */
7975 1161038 : foreach(lc, rel->pathlist)
7976 : {
7977 602588 : Path *subpath = (Path *) lfirst(lc);
7978 :
7979 : /* Shouldn't have any parameterized paths anymore */
7980 : Assert(subpath->param_info == NULL);
7981 :
7982 602588 : if (tlist_same_exprs)
7983 215340 : subpath->pathtarget->sortgrouprefs =
7984 215340 : scanjoin_target->sortgrouprefs;
7985 : else
7986 : {
7987 : Path *newpath;
7988 :
7989 387248 : newpath = (Path *) create_projection_path(root, rel, subpath,
7990 : scanjoin_target);
7991 387248 : lfirst(lc) = newpath;
7992 : }
7993 : }
7994 :
7995 : /* Likewise adjust the targets for any partial paths. */
7996 582882 : foreach(lc, rel->partial_pathlist)
7997 : {
7998 24432 : Path *subpath = (Path *) lfirst(lc);
7999 :
8000 : /* Shouldn't have any parameterized paths anymore */
8001 : Assert(subpath->param_info == NULL);
8002 :
8003 24432 : if (tlist_same_exprs)
8004 19440 : subpath->pathtarget->sortgrouprefs =
8005 19440 : scanjoin_target->sortgrouprefs;
8006 : else
8007 : {
8008 : Path *newpath;
8009 :
8010 4992 : newpath = (Path *) create_projection_path(root, rel, subpath,
8011 : scanjoin_target);
8012 4992 : lfirst(lc) = newpath;
8013 : }
8014 : }
8015 :
8016 : /*
8017 : * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
8018 : * atop each existing path. (Note that this function doesn't look at the
8019 : * cheapest-path fields, which is a good thing because they're bogus right
8020 : * now.)
8021 : */
8022 558450 : if (root->parse->hasTargetSRFs)
8023 12030 : adjust_paths_for_srfs(root, rel,
8024 : scanjoin_targets,
8025 : scanjoin_targets_contain_srfs);
8026 :
8027 : /*
8028 : * Update the rel's target to be the final (with SRFs) scan/join target.
8029 : * This now matches the actual output of all the paths, and we might get
8030 : * confused in createplan.c if they don't agree. We must do this now so
8031 : * that any append paths made in the next part will use the correct
8032 : * pathtarget (cf. create_append_path).
8033 : *
8034 : * Note that this is also necessary if GetForeignUpperPaths() gets called
8035 : * on the final scan/join relation or on any of its children, since the
8036 : * FDW might look at the rel's target to create ForeignPaths.
8037 : */
8038 558450 : rel->reltarget = llast_node(PathTarget, scanjoin_targets);
8039 :
8040 : /*
8041 : * If the relation is partitioned, recursively apply the scan/join target
8042 : * to all partitions, and generate brand-new Append paths in which the
8043 : * scan/join target is computed below the Append rather than above it.
8044 : * Since Append is not projection-capable, that might save a separate
8045 : * Result node, and it also is important for partitionwise aggregate.
8046 : */
8047 558450 : if (rel_is_partitioned)
8048 : {
8049 13266 : List *live_children = NIL;
8050 : int i;
8051 :
8052 : /* Adjust each partition. */
8053 13266 : i = -1;
8054 38012 : while ((i = bms_next_member(rel->live_parts, i)) >= 0)
8055 : {
8056 24746 : RelOptInfo *child_rel = rel->part_rels[i];
8057 : AppendRelInfo **appinfos;
8058 : int nappinfos;
8059 24746 : List *child_scanjoin_targets = NIL;
8060 :
8061 : Assert(child_rel != NULL);
8062 :
8063 : /* Dummy children can be ignored. */
8064 24746 : if (IS_DUMMY_REL(child_rel))
8065 42 : continue;
8066 :
8067 : /* Translate scan/join targets for this child. */
8068 24704 : appinfos = find_appinfos_by_relids(root, child_rel->relids,
8069 : &nappinfos);
8070 49408 : foreach(lc, scanjoin_targets)
8071 : {
8072 24704 : PathTarget *target = lfirst_node(PathTarget, lc);
8073 :
8074 24704 : target = copy_pathtarget(target);
8075 24704 : target->exprs = (List *)
8076 24704 : adjust_appendrel_attrs(root,
8077 24704 : (Node *) target->exprs,
8078 : nappinfos, appinfos);
8079 24704 : child_scanjoin_targets = lappend(child_scanjoin_targets,
8080 : target);
8081 : }
8082 24704 : pfree(appinfos);
8083 :
8084 : /* Recursion does the real work. */
8085 24704 : apply_scanjoin_target_to_paths(root, child_rel,
8086 : child_scanjoin_targets,
8087 : scanjoin_targets_contain_srfs,
8088 : scanjoin_target_parallel_safe,
8089 : tlist_same_exprs);
8090 :
8091 : /* Save non-dummy children for Append paths. */
8092 24704 : if (!IS_DUMMY_REL(child_rel))
8093 24704 : live_children = lappend(live_children, child_rel);
8094 : }
8095 :
8096 : /* Build new paths for this relation by appending child paths. */
8097 13266 : add_paths_to_append_rel(root, rel, live_children);
8098 : }
8099 :
8100 : /*
8101 : * Consider generating Gather or Gather Merge paths. We must only do this
8102 : * if the relation is parallel safe, and we don't do it for child rels to
8103 : * avoid creating multiple Gather nodes within the same plan. We must do
8104 : * this after all paths have been generated and before set_cheapest, since
8105 : * one of the generated paths may turn out to be the cheapest one.
8106 : */
8107 558450 : if (rel->consider_parallel && !IS_OTHER_REL(rel))
8108 179654 : generate_useful_gather_paths(root, rel, false);
8109 :
8110 : /*
8111 : * Reassess which paths are the cheapest, now that we've potentially added
8112 : * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
8113 : * this relation.
8114 : */
8115 558450 : set_cheapest(rel);
8116 558450 : }
8117 :
8118 : /*
8119 : * create_partitionwise_grouping_paths
8120 : *
8121 : * If the partition keys of input relation are part of the GROUP BY clause, all
8122 : * the rows belonging to a given group come from a single partition. This
8123 : * allows aggregation/grouping over a partitioned relation to be broken down
8124 : * into aggregation/grouping on each partition. This should be no worse, and
8125 : * often better, than the normal approach.
8126 : *
8127 : * However, if the GROUP BY clause does not contain all the partition keys,
8128 : * rows from a given group may be spread across multiple partitions. In that
8129 : * case, we perform partial aggregation for each group, append the results,
8130 : * and then finalize aggregation. This is less certain to win than the
8131 : * previous case. It may win if the PartialAggregate stage greatly reduces
8132 : * the number of groups, because fewer rows will pass through the Append node.
8133 : * It may lose if we have lots of small groups.
8134 : */
8135 : static void
8136 826 : create_partitionwise_grouping_paths(PlannerInfo *root,
8137 : RelOptInfo *input_rel,
8138 : RelOptInfo *grouped_rel,
8139 : RelOptInfo *partially_grouped_rel,
8140 : const AggClauseCosts *agg_costs,
8141 : grouping_sets_data *gd,
8142 : PartitionwiseAggregateType patype,
8143 : GroupPathExtraData *extra)
8144 : {
8145 826 : List *grouped_live_children = NIL;
8146 826 : List *partially_grouped_live_children = NIL;
8147 826 : PathTarget *target = grouped_rel->reltarget;
8148 826 : bool partial_grouping_valid = true;
8149 : int i;
8150 :
8151 : Assert(patype != PARTITIONWISE_AGGREGATE_NONE);
8152 : Assert(patype != PARTITIONWISE_AGGREGATE_PARTIAL ||
8153 : partially_grouped_rel != NULL);
8154 :
8155 : /* Add paths for partitionwise aggregation/grouping. */
8156 826 : i = -1;
8157 2992 : while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
8158 : {
8159 2166 : RelOptInfo *child_input_rel = input_rel->part_rels[i];
8160 : PathTarget *child_target;
8161 : AppendRelInfo **appinfos;
8162 : int nappinfos;
8163 : GroupPathExtraData child_extra;
8164 : RelOptInfo *child_grouped_rel;
8165 : RelOptInfo *child_partially_grouped_rel;
8166 :
8167 : Assert(child_input_rel != NULL);
8168 :
8169 : /* Dummy children can be ignored. */
8170 2166 : if (IS_DUMMY_REL(child_input_rel))
8171 0 : continue;
8172 :
8173 2166 : child_target = copy_pathtarget(target);
8174 :
8175 : /*
8176 : * Copy the given "extra" structure as is and then override the
8177 : * members specific to this child.
8178 : */
8179 2166 : memcpy(&child_extra, extra, sizeof(child_extra));
8180 :
8181 2166 : appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
8182 : &nappinfos);
8183 :
8184 2166 : child_target->exprs = (List *)
8185 2166 : adjust_appendrel_attrs(root,
8186 2166 : (Node *) target->exprs,
8187 : nappinfos, appinfos);
8188 :
8189 : /* Translate havingQual and targetList. */
8190 2166 : child_extra.havingQual = (Node *)
8191 : adjust_appendrel_attrs(root,
8192 : extra->havingQual,
8193 : nappinfos, appinfos);
8194 2166 : child_extra.targetList = (List *)
8195 2166 : adjust_appendrel_attrs(root,
8196 2166 : (Node *) extra->targetList,
8197 : nappinfos, appinfos);
8198 :
8199 : /*
8200 : * extra->patype was the value computed for our parent rel; patype is
8201 : * the value for this relation. For the child, our value is its
8202 : * parent rel's value.
8203 : */
8204 2166 : child_extra.patype = patype;
8205 :
8206 : /*
8207 : * Create grouping relation to hold fully aggregated grouping and/or
8208 : * aggregation paths for the child.
8209 : */
8210 2166 : child_grouped_rel = make_grouping_rel(root, child_input_rel,
8211 : child_target,
8212 2166 : extra->target_parallel_safe,
8213 : child_extra.havingQual);
8214 :
8215 : /* Create grouping paths for this child relation. */
8216 2166 : create_ordinary_grouping_paths(root, child_input_rel,
8217 : child_grouped_rel,
8218 : agg_costs, gd, &child_extra,
8219 : &child_partially_grouped_rel);
8220 :
8221 2166 : if (child_partially_grouped_rel)
8222 : {
8223 : partially_grouped_live_children =
8224 1542 : lappend(partially_grouped_live_children,
8225 : child_partially_grouped_rel);
8226 : }
8227 : else
8228 624 : partial_grouping_valid = false;
8229 :
8230 2166 : if (patype == PARTITIONWISE_AGGREGATE_FULL)
8231 : {
8232 1308 : set_cheapest(child_grouped_rel);
8233 1308 : grouped_live_children = lappend(grouped_live_children,
8234 : child_grouped_rel);
8235 : }
8236 :
8237 2166 : pfree(appinfos);
8238 : }
8239 :
8240 : /*
8241 : * Try to create append paths for partially grouped children. For full
8242 : * partitionwise aggregation, we might have paths in the partial_pathlist
8243 : * if parallel aggregation is possible. For partial partitionwise
8244 : * aggregation, we may have paths in both pathlist and partial_pathlist.
8245 : *
8246 : * NB: We must have a partially grouped path for every child in order to
8247 : * generate a partially grouped path for this relation.
8248 : */
8249 826 : if (partially_grouped_rel && partial_grouping_valid)
8250 : {
8251 : Assert(partially_grouped_live_children != NIL);
8252 :
8253 602 : add_paths_to_append_rel(root, partially_grouped_rel,
8254 : partially_grouped_live_children);
8255 : }
8256 :
8257 : /* If possible, create append paths for fully grouped children. */
8258 826 : if (patype == PARTITIONWISE_AGGREGATE_FULL)
8259 : {
8260 : Assert(grouped_live_children != NIL);
8261 :
8262 488 : add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
8263 : }
8264 826 : }
8265 :
8266 : /*
8267 : * group_by_has_partkey
8268 : *
8269 : * Returns true if all the partition keys of the given relation are part of
8270 : * the GROUP BY clauses, including having matching collation, false otherwise.
8271 : */
8272 : static bool
8273 772 : group_by_has_partkey(RelOptInfo *input_rel,
8274 : List *targetList,
8275 : List *groupClause)
8276 : {
8277 772 : List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8278 772 : int cnt = 0;
8279 : int partnatts;
8280 :
8281 : /* Input relation should be partitioned. */
8282 : Assert(input_rel->part_scheme);
8283 :
8284 : /* Rule out early, if there are no partition keys present. */
8285 772 : if (!input_rel->partexprs)
8286 0 : return false;
8287 :
8288 772 : partnatts = input_rel->part_scheme->partnatts;
8289 :
8290 1296 : for (cnt = 0; cnt < partnatts; cnt++)
8291 : {
8292 808 : List *partexprs = input_rel->partexprs[cnt];
8293 : ListCell *lc;
8294 808 : bool found = false;
8295 :
8296 1206 : foreach(lc, partexprs)
8297 : {
8298 : ListCell *lg;
8299 934 : Expr *partexpr = lfirst(lc);
8300 934 : Oid partcoll = input_rel->part_scheme->partcollation[cnt];
8301 :
8302 1452 : foreach(lg, groupexprs)
8303 : {
8304 1054 : Expr *groupexpr = lfirst(lg);
8305 1054 : Oid groupcoll = exprCollation((Node *) groupexpr);
8306 :
8307 : /*
8308 : * Note: we can assume there is at most one RelabelType node;
8309 : * eval_const_expressions() will have simplified if more than
8310 : * one.
8311 : */
8312 1054 : if (IsA(groupexpr, RelabelType))
8313 24 : groupexpr = ((RelabelType *) groupexpr)->arg;
8314 :
8315 1054 : if (equal(groupexpr, partexpr))
8316 : {
8317 : /*
8318 : * Reject a match if the grouping collation does not match
8319 : * the partitioning collation.
8320 : */
8321 536 : if (OidIsValid(partcoll) && OidIsValid(groupcoll) &&
8322 : partcoll != groupcoll)
8323 12 : return false;
8324 :
8325 524 : found = true;
8326 524 : break;
8327 : }
8328 : }
8329 :
8330 922 : if (found)
8331 524 : break;
8332 : }
8333 :
8334 : /*
8335 : * If none of the partition key expressions match with any of the
8336 : * GROUP BY expression, return false.
8337 : */
8338 796 : if (!found)
8339 272 : return false;
8340 : }
8341 :
8342 488 : return true;
8343 : }
8344 :
8345 : /*
8346 : * generate_setop_child_grouplist
8347 : * Build a SortGroupClause list defining the sort/grouping properties
8348 : * of the child of a set operation.
8349 : *
8350 : * This is similar to generate_setop_grouplist() but differs as the setop
8351 : * child query's targetlist entries may already have a tleSortGroupRef
8352 : * assigned for other purposes, such as GROUP BYs. Here we keep the
8353 : * SortGroupClause list in the same order as 'op' groupClauses and just adjust
8354 : * the tleSortGroupRef to reference the TargetEntry's 'ressortgroupref'. If
8355 : * any of the columns in the targetlist don't match to the setop's colTypes
8356 : * then we return an empty list. This may leave some TLEs with unreferenced
8357 : * ressortgroupref markings, but that's harmless.
8358 : */
8359 : static List *
8360 12762 : generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist)
8361 : {
8362 12762 : List *grouplist = copyObject(op->groupClauses);
8363 : ListCell *lg;
8364 : ListCell *lt;
8365 : ListCell *ct;
8366 :
8367 12762 : lg = list_head(grouplist);
8368 12762 : ct = list_head(op->colTypes);
8369 49146 : foreach(lt, targetlist)
8370 : {
8371 36806 : TargetEntry *tle = (TargetEntry *) lfirst(lt);
8372 : SortGroupClause *sgc;
8373 : Oid coltype;
8374 :
8375 : /* resjunk columns could have sortgrouprefs. Leave these alone */
8376 36806 : if (tle->resjunk)
8377 0 : continue;
8378 :
8379 : /*
8380 : * We expect every non-resjunk target to have a SortGroupClause and
8381 : * colTypes.
8382 : */
8383 : Assert(lg != NULL);
8384 : Assert(ct != NULL);
8385 36806 : sgc = (SortGroupClause *) lfirst(lg);
8386 36806 : coltype = lfirst_oid(ct);
8387 :
8388 : /* reject if target type isn't the same as the setop target type */
8389 36806 : if (coltype != exprType((Node *) tle->expr))
8390 422 : return NIL;
8391 :
8392 36384 : lg = lnext(grouplist, lg);
8393 36384 : ct = lnext(op->colTypes, ct);
8394 :
8395 : /* assign a tleSortGroupRef, or reuse the existing one */
8396 36384 : sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8397 : }
8398 :
8399 : Assert(lg == NULL);
8400 : Assert(ct == NULL);
8401 :
8402 12340 : return grouplist;
8403 : }
8404 :
8405 : /*
8406 : * create_unique_paths
8407 : * Build a new RelOptInfo containing Paths that represent elimination of
8408 : * distinct rows from the input data. Distinct-ness is defined according to
8409 : * the needs of the semijoin represented by sjinfo. If it is not possible
8410 : * to identify how to make the data unique, NULL is returned.
8411 : *
8412 : * If used at all, this is likely to be called repeatedly on the same rel,
8413 : * so we cache the result.
8414 : */
8415 : RelOptInfo *
8416 8926 : create_unique_paths(PlannerInfo *root, RelOptInfo *rel, SpecialJoinInfo *sjinfo)
8417 : {
8418 : RelOptInfo *unique_rel;
8419 8926 : List *sortPathkeys = NIL;
8420 8926 : List *groupClause = NIL;
8421 : MemoryContext oldcontext;
8422 :
8423 : /* Caller made a mistake if SpecialJoinInfo is the wrong one */
8424 : Assert(sjinfo->jointype == JOIN_SEMI);
8425 : Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
8426 :
8427 : /* If result already cached, return it */
8428 8926 : if (rel->unique_rel)
8429 1836 : return rel->unique_rel;
8430 :
8431 : /* If it's not possible to unique-ify, return NULL */
8432 7090 : if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
8433 132 : return NULL;
8434 :
8435 : /*
8436 : * Punt if this is a child relation and we failed to build a unique-ified
8437 : * relation for its parent. This can happen if all the RHS columns were
8438 : * found to be equated to constants when unique-ifying the parent table,
8439 : * leaving no columns to unique-ify.
8440 : */
8441 6958 : if (IS_OTHER_REL(rel) && rel->top_parent->unique_rel == NULL)
8442 12 : return NULL;
8443 :
8444 : /*
8445 : * When called during GEQO join planning, we are in a short-lived memory
8446 : * context. We must make sure that the unique rel and any subsidiary data
8447 : * structures created for a baserel survive the GEQO cycle, else the
8448 : * baserel is trashed for future GEQO cycles. On the other hand, when we
8449 : * are creating those for a joinrel during GEQO, we don't want them to
8450 : * clutter the main planning context. Upshot is that the best solution is
8451 : * to explicitly allocate memory in the same context the given RelOptInfo
8452 : * is in.
8453 : */
8454 6946 : oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
8455 :
8456 6946 : unique_rel = makeNode(RelOptInfo);
8457 6946 : memcpy(unique_rel, rel, sizeof(RelOptInfo));
8458 :
8459 : /*
8460 : * clear path info
8461 : */
8462 6946 : unique_rel->pathlist = NIL;
8463 6946 : unique_rel->ppilist = NIL;
8464 6946 : unique_rel->partial_pathlist = NIL;
8465 6946 : unique_rel->cheapest_startup_path = NULL;
8466 6946 : unique_rel->cheapest_total_path = NULL;
8467 6946 : unique_rel->cheapest_parameterized_paths = NIL;
8468 :
8469 : /*
8470 : * Build the target list for the unique rel. We also build the pathkeys
8471 : * that represent the ordering requirements for the sort-based
8472 : * implementation, and the list of SortGroupClause nodes that represent
8473 : * the columns to be grouped on for the hash-based implementation.
8474 : *
8475 : * For a child rel, we can construct these fields from those of its
8476 : * parent.
8477 : */
8478 6946 : if (IS_OTHER_REL(rel))
8479 432 : {
8480 : PathTarget *child_unique_target;
8481 : PathTarget *parent_unique_target;
8482 :
8483 432 : parent_unique_target = rel->top_parent->unique_rel->reltarget;
8484 :
8485 432 : child_unique_target = copy_pathtarget(parent_unique_target);
8486 :
8487 : /* Translate the target expressions */
8488 432 : child_unique_target->exprs = (List *)
8489 432 : adjust_appendrel_attrs_multilevel(root,
8490 432 : (Node *) parent_unique_target->exprs,
8491 : rel,
8492 432 : rel->top_parent);
8493 :
8494 432 : unique_rel->reltarget = child_unique_target;
8495 :
8496 432 : sortPathkeys = rel->top_parent->unique_pathkeys;
8497 432 : groupClause = rel->top_parent->unique_groupclause;
8498 : }
8499 : else
8500 : {
8501 : List *newtlist;
8502 : int nextresno;
8503 6514 : List *sortList = NIL;
8504 : ListCell *lc1;
8505 : ListCell *lc2;
8506 :
8507 : /*
8508 : * The values we are supposed to unique-ify may be expressions in the
8509 : * variables of the input rel's targetlist. We have to add any such
8510 : * expressions to the unique rel's targetlist.
8511 : *
8512 : * To complicate matters, some of the values to be unique-ified may be
8513 : * known redundant by the EquivalenceClass machinery (e.g., because
8514 : * they have been equated to constants). There is no need to compare
8515 : * such values during unique-ification, and indeed we had better not
8516 : * try because the Vars involved may not have propagated as high as
8517 : * the semijoin's level. We use make_pathkeys_for_sortclauses to
8518 : * detect such cases, which is a tad inefficient but it doesn't seem
8519 : * worth building specialized infrastructure for this.
8520 : */
8521 6514 : newtlist = make_tlist_from_pathtarget(rel->reltarget);
8522 6514 : nextresno = list_length(newtlist) + 1;
8523 :
8524 13262 : forboth(lc1, sjinfo->semi_rhs_exprs, lc2, sjinfo->semi_operators)
8525 : {
8526 6748 : Expr *uniqexpr = lfirst(lc1);
8527 6748 : Oid in_oper = lfirst_oid(lc2);
8528 : Oid sortop;
8529 : TargetEntry *tle;
8530 6748 : bool made_tle = false;
8531 :
8532 6748 : tle = tlist_member(uniqexpr, newtlist);
8533 6748 : if (!tle)
8534 : {
8535 3268 : tle = makeTargetEntry(uniqexpr,
8536 : nextresno,
8537 : NULL,
8538 : false);
8539 3268 : newtlist = lappend(newtlist, tle);
8540 3268 : nextresno++;
8541 3268 : made_tle = true;
8542 : }
8543 :
8544 : /*
8545 : * Try to build an ORDER BY list to sort the input compatibly. We
8546 : * do this for each sortable clause even when the clauses are not
8547 : * all sortable, so that we can detect clauses that are redundant
8548 : * according to the pathkey machinery.
8549 : */
8550 6748 : sortop = get_ordering_op_for_equality_op(in_oper, false);
8551 6748 : if (OidIsValid(sortop))
8552 : {
8553 : Oid eqop;
8554 : SortGroupClause *sortcl;
8555 :
8556 : /*
8557 : * The Unique node will need equality operators. Normally
8558 : * these are the same as the IN clause operators, but if those
8559 : * are cross-type operators then the equality operators are
8560 : * the ones for the IN clause operators' RHS datatype.
8561 : */
8562 6748 : eqop = get_equality_op_for_ordering_op(sortop, NULL);
8563 6748 : if (!OidIsValid(eqop)) /* shouldn't happen */
8564 0 : elog(ERROR, "could not find equality operator for ordering operator %u",
8565 : sortop);
8566 :
8567 6748 : sortcl = makeNode(SortGroupClause);
8568 6748 : sortcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8569 6748 : sortcl->eqop = eqop;
8570 6748 : sortcl->sortop = sortop;
8571 6748 : sortcl->reverse_sort = false;
8572 6748 : sortcl->nulls_first = false;
8573 6748 : sortcl->hashable = false; /* no need to make this accurate */
8574 6748 : sortList = lappend(sortList, sortcl);
8575 :
8576 : /*
8577 : * At each step, convert the SortGroupClause list to pathkey
8578 : * form. If the just-added SortGroupClause is redundant, the
8579 : * result will be shorter than the SortGroupClause list.
8580 : */
8581 6748 : sortPathkeys = make_pathkeys_for_sortclauses(root, sortList,
8582 : newtlist);
8583 6748 : if (list_length(sortPathkeys) != list_length(sortList))
8584 : {
8585 : /* Drop the redundant SortGroupClause */
8586 2052 : sortList = list_delete_last(sortList);
8587 : Assert(list_length(sortPathkeys) == list_length(sortList));
8588 : /* Undo tlist addition, if we made one */
8589 2052 : if (made_tle)
8590 : {
8591 12 : newtlist = list_delete_last(newtlist);
8592 12 : nextresno--;
8593 : }
8594 : /* We need not consider this clause for hashing, either */
8595 2052 : continue;
8596 : }
8597 : }
8598 0 : else if (sjinfo->semi_can_btree) /* shouldn't happen */
8599 0 : elog(ERROR, "could not find ordering operator for equality operator %u",
8600 : in_oper);
8601 :
8602 4696 : if (sjinfo->semi_can_hash)
8603 : {
8604 : /* Create a GROUP BY list for the Agg node to use */
8605 : Oid eq_oper;
8606 : SortGroupClause *groupcl;
8607 :
8608 : /*
8609 : * Get the hashable equality operators for the Agg node to
8610 : * use. Normally these are the same as the IN clause
8611 : * operators, but if those are cross-type operators then the
8612 : * equality operators are the ones for the IN clause
8613 : * operators' RHS datatype.
8614 : */
8615 4696 : if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
8616 0 : elog(ERROR, "could not find compatible hash operator for operator %u",
8617 : in_oper);
8618 :
8619 4696 : groupcl = makeNode(SortGroupClause);
8620 4696 : groupcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8621 4696 : groupcl->eqop = eq_oper;
8622 4696 : groupcl->sortop = sortop;
8623 4696 : groupcl->reverse_sort = false;
8624 4696 : groupcl->nulls_first = false;
8625 4696 : groupcl->hashable = true;
8626 4696 : groupClause = lappend(groupClause, groupcl);
8627 : }
8628 : }
8629 :
8630 : /*
8631 : * Done building the sortPathkeys and groupClause. But the
8632 : * sortPathkeys are bogus if not all the clauses were sortable.
8633 : */
8634 6514 : if (!sjinfo->semi_can_btree)
8635 0 : sortPathkeys = NIL;
8636 :
8637 : /*
8638 : * It can happen that all the RHS columns are equated to constants.
8639 : * We'd have to do something special to unique-ify in that case, and
8640 : * it's such an unlikely-in-the-real-world case that it's not worth
8641 : * the effort. So just punt if we found no columns to unique-ify.
8642 : */
8643 6514 : if (sortPathkeys == NIL && groupClause == NIL)
8644 : {
8645 1950 : MemoryContextSwitchTo(oldcontext);
8646 1950 : return NULL;
8647 : }
8648 :
8649 : /* Convert the required targetlist back to PathTarget form */
8650 4564 : unique_rel->reltarget = create_pathtarget(root, newtlist);
8651 : }
8652 :
8653 : /* build unique paths based on input rel's pathlist */
8654 4996 : create_final_unique_paths(root, rel, sortPathkeys, groupClause,
8655 : sjinfo, unique_rel);
8656 :
8657 : /* build unique paths based on input rel's partial_pathlist */
8658 4996 : create_partial_unique_paths(root, rel, sortPathkeys, groupClause,
8659 : sjinfo, unique_rel);
8660 :
8661 : /* Now choose the best path(s) */
8662 4996 : set_cheapest(unique_rel);
8663 :
8664 : /*
8665 : * There shouldn't be any partial paths for the unique relation;
8666 : * otherwise, we won't be able to properly guarantee uniqueness.
8667 : */
8668 : Assert(unique_rel->partial_pathlist == NIL);
8669 :
8670 : /* Cache the result */
8671 4996 : rel->unique_rel = unique_rel;
8672 4996 : rel->unique_pathkeys = sortPathkeys;
8673 4996 : rel->unique_groupclause = groupClause;
8674 :
8675 4996 : MemoryContextSwitchTo(oldcontext);
8676 :
8677 4996 : return unique_rel;
8678 : }
8679 :
8680 : /*
8681 : * create_final_unique_paths
8682 : * Create unique paths in 'unique_rel' based on 'input_rel' pathlist
8683 : */
8684 : static void
8685 8744 : create_final_unique_paths(PlannerInfo *root, RelOptInfo *input_rel,
8686 : List *sortPathkeys, List *groupClause,
8687 : SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
8688 : {
8689 8744 : Path *cheapest_input_path = input_rel->cheapest_total_path;
8690 :
8691 : /* Estimate number of output rows */
8692 8744 : unique_rel->rows = estimate_num_groups(root,
8693 : sjinfo->semi_rhs_exprs,
8694 : cheapest_input_path->rows,
8695 : NULL,
8696 : NULL);
8697 :
8698 : /* Consider sort-based implementations, if possible. */
8699 8744 : if (sjinfo->semi_can_btree)
8700 : {
8701 : ListCell *lc;
8702 :
8703 : /*
8704 : * Use any available suitably-sorted path as input, and also consider
8705 : * sorting the cheapest-total path and incremental sort on any paths
8706 : * with presorted keys.
8707 : *
8708 : * To save planning time, we ignore parameterized input paths unless
8709 : * they are the cheapest-total path.
8710 : */
8711 19056 : foreach(lc, input_rel->pathlist)
8712 : {
8713 10312 : Path *input_path = (Path *) lfirst(lc);
8714 : Path *path;
8715 : bool is_sorted;
8716 : int presorted_keys;
8717 :
8718 : /*
8719 : * Ignore parameterized paths that are not the cheapest-total
8720 : * path.
8721 : */
8722 10312 : if (input_path->param_info &&
8723 : input_path != cheapest_input_path)
8724 922 : continue;
8725 :
8726 9440 : is_sorted = pathkeys_count_contained_in(sortPathkeys,
8727 : input_path->pathkeys,
8728 : &presorted_keys);
8729 :
8730 : /*
8731 : * Ignore paths that are not suitably or partially sorted, unless
8732 : * they are the cheapest total path (no need to deal with paths
8733 : * which have presorted keys when incremental sort is disabled).
8734 : */
8735 9440 : if (!is_sorted && input_path != cheapest_input_path &&
8736 98 : (presorted_keys == 0 || !enable_incremental_sort))
8737 50 : continue;
8738 :
8739 : /*
8740 : * Make a separate ProjectionPath in case we need a Result node.
8741 : */
8742 9390 : path = (Path *) create_projection_path(root,
8743 : unique_rel,
8744 : input_path,
8745 9390 : unique_rel->reltarget);
8746 :
8747 9390 : if (!is_sorted)
8748 : {
8749 : /*
8750 : * We've no need to consider both a sort and incremental sort.
8751 : * We'll just do a sort if there are no presorted keys and an
8752 : * incremental sort when there are presorted keys.
8753 : */
8754 4982 : if (presorted_keys == 0 || !enable_incremental_sort)
8755 4934 : path = (Path *) create_sort_path(root,
8756 : unique_rel,
8757 : path,
8758 : sortPathkeys,
8759 : -1.0);
8760 : else
8761 48 : path = (Path *) create_incremental_sort_path(root,
8762 : unique_rel,
8763 : path,
8764 : sortPathkeys,
8765 : presorted_keys,
8766 : -1.0);
8767 : }
8768 :
8769 9390 : path = (Path *) create_unique_path(root, unique_rel, path,
8770 : list_length(sortPathkeys),
8771 : unique_rel->rows);
8772 :
8773 9390 : add_path(unique_rel, path);
8774 : }
8775 : }
8776 :
8777 : /* Consider hash-based implementation, if possible. */
8778 8744 : if (sjinfo->semi_can_hash)
8779 : {
8780 : Path *path;
8781 :
8782 : /*
8783 : * Make a separate ProjectionPath in case we need a Result node.
8784 : */
8785 8744 : path = (Path *) create_projection_path(root,
8786 : unique_rel,
8787 : cheapest_input_path,
8788 8744 : unique_rel->reltarget);
8789 :
8790 8744 : path = (Path *) create_agg_path(root,
8791 : unique_rel,
8792 : path,
8793 : cheapest_input_path->pathtarget,
8794 : AGG_HASHED,
8795 : AGGSPLIT_SIMPLE,
8796 : groupClause,
8797 : NIL,
8798 : NULL,
8799 : unique_rel->rows);
8800 :
8801 8744 : add_path(unique_rel, path);
8802 : }
8803 8744 : }
8804 :
8805 : /*
8806 : * create_partial_unique_paths
8807 : * Create unique paths in 'unique_rel' based on 'input_rel' partial_pathlist
8808 : */
8809 : static void
8810 4996 : create_partial_unique_paths(PlannerInfo *root, RelOptInfo *input_rel,
8811 : List *sortPathkeys, List *groupClause,
8812 : SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
8813 : {
8814 : RelOptInfo *partial_unique_rel;
8815 : Path *cheapest_partial_path;
8816 :
8817 : /* nothing to do when there are no partial paths in the input rel */
8818 4996 : if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
8819 1248 : return;
8820 :
8821 : /*
8822 : * nothing to do if there's anything in the targetlist that's
8823 : * parallel-restricted.
8824 : */
8825 3748 : if (!is_parallel_safe(root, (Node *) unique_rel->reltarget->exprs))
8826 0 : return;
8827 :
8828 3748 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
8829 :
8830 3748 : partial_unique_rel = makeNode(RelOptInfo);
8831 3748 : memcpy(partial_unique_rel, input_rel, sizeof(RelOptInfo));
8832 :
8833 : /*
8834 : * clear path info
8835 : */
8836 3748 : partial_unique_rel->pathlist = NIL;
8837 3748 : partial_unique_rel->ppilist = NIL;
8838 3748 : partial_unique_rel->partial_pathlist = NIL;
8839 3748 : partial_unique_rel->cheapest_startup_path = NULL;
8840 3748 : partial_unique_rel->cheapest_total_path = NULL;
8841 3748 : partial_unique_rel->cheapest_parameterized_paths = NIL;
8842 :
8843 : /* Estimate number of output rows */
8844 3748 : partial_unique_rel->rows = estimate_num_groups(root,
8845 : sjinfo->semi_rhs_exprs,
8846 : cheapest_partial_path->rows,
8847 : NULL,
8848 : NULL);
8849 3748 : partial_unique_rel->reltarget = unique_rel->reltarget;
8850 :
8851 : /* Consider sort-based implementations, if possible. */
8852 3748 : if (sjinfo->semi_can_btree)
8853 : {
8854 : ListCell *lc;
8855 :
8856 : /*
8857 : * Use any available suitably-sorted path as input, and also consider
8858 : * sorting the cheapest partial path and incremental sort on any paths
8859 : * with presorted keys.
8860 : */
8861 7808 : foreach(lc, input_rel->partial_pathlist)
8862 : {
8863 4060 : Path *input_path = (Path *) lfirst(lc);
8864 : Path *path;
8865 : bool is_sorted;
8866 : int presorted_keys;
8867 :
8868 4060 : is_sorted = pathkeys_count_contained_in(sortPathkeys,
8869 : input_path->pathkeys,
8870 : &presorted_keys);
8871 :
8872 : /*
8873 : * Ignore paths that are not suitably or partially sorted, unless
8874 : * they are the cheapest partial path (no need to deal with paths
8875 : * which have presorted keys when incremental sort is disabled).
8876 : */
8877 4060 : if (!is_sorted && input_path != cheapest_partial_path &&
8878 0 : (presorted_keys == 0 || !enable_incremental_sort))
8879 0 : continue;
8880 :
8881 : /*
8882 : * Make a separate ProjectionPath in case we need a Result node.
8883 : */
8884 4060 : path = (Path *) create_projection_path(root,
8885 : partial_unique_rel,
8886 : input_path,
8887 4060 : partial_unique_rel->reltarget);
8888 :
8889 4060 : if (!is_sorted)
8890 : {
8891 : /*
8892 : * We've no need to consider both a sort and incremental sort.
8893 : * We'll just do a sort if there are no presorted keys and an
8894 : * incremental sort when there are presorted keys.
8895 : */
8896 3700 : if (presorted_keys == 0 || !enable_incremental_sort)
8897 3700 : path = (Path *) create_sort_path(root,
8898 : partial_unique_rel,
8899 : path,
8900 : sortPathkeys,
8901 : -1.0);
8902 : else
8903 0 : path = (Path *) create_incremental_sort_path(root,
8904 : partial_unique_rel,
8905 : path,
8906 : sortPathkeys,
8907 : presorted_keys,
8908 : -1.0);
8909 : }
8910 :
8911 4060 : path = (Path *) create_unique_path(root, partial_unique_rel, path,
8912 : list_length(sortPathkeys),
8913 : partial_unique_rel->rows);
8914 :
8915 4060 : add_partial_path(partial_unique_rel, path);
8916 : }
8917 : }
8918 :
8919 : /* Consider hash-based implementation, if possible. */
8920 3748 : if (sjinfo->semi_can_hash)
8921 : {
8922 : Path *path;
8923 :
8924 : /*
8925 : * Make a separate ProjectionPath in case we need a Result node.
8926 : */
8927 3748 : path = (Path *) create_projection_path(root,
8928 : partial_unique_rel,
8929 : cheapest_partial_path,
8930 3748 : partial_unique_rel->reltarget);
8931 :
8932 3748 : path = (Path *) create_agg_path(root,
8933 : partial_unique_rel,
8934 : path,
8935 : cheapest_partial_path->pathtarget,
8936 : AGG_HASHED,
8937 : AGGSPLIT_SIMPLE,
8938 : groupClause,
8939 : NIL,
8940 : NULL,
8941 : partial_unique_rel->rows);
8942 :
8943 3748 : add_partial_path(partial_unique_rel, path);
8944 : }
8945 :
8946 3748 : if (partial_unique_rel->partial_pathlist != NIL)
8947 : {
8948 3748 : generate_useful_gather_paths(root, partial_unique_rel, true);
8949 3748 : set_cheapest(partial_unique_rel);
8950 :
8951 : /*
8952 : * Finally, create paths to unique-ify the final result. This step is
8953 : * needed to remove any duplicates due to combining rows from parallel
8954 : * workers.
8955 : */
8956 3748 : create_final_unique_paths(root, partial_unique_rel,
8957 : sortPathkeys, groupClause,
8958 : sjinfo, unique_rel);
8959 : }
8960 : }
8961 :
8962 : /*
8963 : * Choose a unique name for some subroot.
8964 : *
8965 : * Modifies glob->subplanNames to track names already used.
8966 : */
8967 : char *
8968 84576 : choose_plan_name(PlannerGlobal *glob, const char *name, bool always_number)
8969 : {
8970 : unsigned n;
8971 :
8972 : /*
8973 : * If a numeric suffix is not required, then search the list of
8974 : * previously-assigned names for a match. If none is found, then we can
8975 : * use the provided name without modification.
8976 : */
8977 84576 : if (!always_number)
8978 : {
8979 25846 : bool found = false;
8980 :
8981 61846 : foreach_ptr(char, subplan_name, glob->subplanNames)
8982 : {
8983 15878 : if (strcmp(subplan_name, name) == 0)
8984 : {
8985 5724 : found = true;
8986 5724 : break;
8987 : }
8988 : }
8989 :
8990 25846 : if (!found)
8991 : {
8992 : /* pstrdup here is just to avoid cast-away-const */
8993 20122 : char *chosen_name = pstrdup(name);
8994 :
8995 20122 : glob->subplanNames = lappend(glob->subplanNames, chosen_name);
8996 20122 : return chosen_name;
8997 : }
8998 : }
8999 :
9000 : /*
9001 : * If a numeric suffix is required or if the un-suffixed name is already
9002 : * in use, then loop until we find a positive integer that produces a
9003 : * novel name.
9004 : */
9005 64454 : for (n = 1; true; ++n)
9006 55572 : {
9007 120026 : char *proposed_name = psprintf("%s_%u", name, n);
9008 120026 : bool found = false;
9009 :
9010 458222 : foreach_ptr(char, subplan_name, glob->subplanNames)
9011 : {
9012 273742 : if (strcmp(subplan_name, proposed_name) == 0)
9013 : {
9014 55572 : found = true;
9015 55572 : break;
9016 : }
9017 : }
9018 :
9019 120026 : if (!found)
9020 : {
9021 64454 : glob->subplanNames = lappend(glob->subplanNames, proposed_name);
9022 64454 : return proposed_name;
9023 : }
9024 :
9025 55572 : pfree(proposed_name);
9026 : }
9027 : }
|