Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * planner.c
4 : * The query optimizer external interface.
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/optimizer/plan/planner.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 :
16 : #include "postgres.h"
17 :
18 : #include <limits.h>
19 : #include <math.h>
20 :
21 : #include "access/genam.h"
22 : #include "access/parallel.h"
23 : #include "access/sysattr.h"
24 : #include "access/table.h"
25 : #include "catalog/pg_aggregate.h"
26 : #include "catalog/pg_inherits.h"
27 : #include "catalog/pg_proc.h"
28 : #include "catalog/pg_type.h"
29 : #include "executor/executor.h"
30 : #include "foreign/fdwapi.h"
31 : #include "jit/jit.h"
32 : #include "lib/bipartite_match.h"
33 : #include "lib/knapsack.h"
34 : #include "miscadmin.h"
35 : #include "nodes/makefuncs.h"
36 : #include "nodes/nodeFuncs.h"
37 : #ifdef OPTIMIZER_DEBUG
38 : #include "nodes/print.h"
39 : #endif
40 : #include "nodes/supportnodes.h"
41 : #include "optimizer/appendinfo.h"
42 : #include "optimizer/clauses.h"
43 : #include "optimizer/cost.h"
44 : #include "optimizer/optimizer.h"
45 : #include "optimizer/paramassign.h"
46 : #include "optimizer/pathnode.h"
47 : #include "optimizer/paths.h"
48 : #include "optimizer/plancat.h"
49 : #include "optimizer/planmain.h"
50 : #include "optimizer/planner.h"
51 : #include "optimizer/prep.h"
52 : #include "optimizer/subselect.h"
53 : #include "optimizer/tlist.h"
54 : #include "parser/analyze.h"
55 : #include "parser/parse_agg.h"
56 : #include "parser/parse_clause.h"
57 : #include "parser/parse_relation.h"
58 : #include "parser/parsetree.h"
59 : #include "partitioning/partdesc.h"
60 : #include "rewrite/rewriteManip.h"
61 : #include "utils/lsyscache.h"
62 : #include "utils/rel.h"
63 : #include "utils/selfuncs.h"
64 :
65 : /* GUC parameters */
66 : double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
67 : int debug_parallel_query = DEBUG_PARALLEL_OFF;
68 : bool parallel_leader_participation = true;
69 : bool enable_distinct_reordering = true;
70 :
71 : /* Hook for plugins to get control in planner() */
72 : planner_hook_type planner_hook = NULL;
73 :
74 : /* Hook for plugins to get control when grouping_planner() plans upper rels */
75 : create_upper_paths_hook_type create_upper_paths_hook = NULL;
76 :
77 :
78 : /* Expression kind codes for preprocess_expression */
79 : #define EXPRKIND_QUAL 0
80 : #define EXPRKIND_TARGET 1
81 : #define EXPRKIND_RTFUNC 2
82 : #define EXPRKIND_RTFUNC_LATERAL 3
83 : #define EXPRKIND_VALUES 4
84 : #define EXPRKIND_VALUES_LATERAL 5
85 : #define EXPRKIND_LIMIT 6
86 : #define EXPRKIND_APPINFO 7
87 : #define EXPRKIND_PHV 8
88 : #define EXPRKIND_TABLESAMPLE 9
89 : #define EXPRKIND_ARBITER_ELEM 10
90 : #define EXPRKIND_TABLEFUNC 11
91 : #define EXPRKIND_TABLEFUNC_LATERAL 12
92 : #define EXPRKIND_GROUPEXPR 13
93 :
94 : /*
95 : * Data specific to grouping sets
96 : */
97 : typedef struct
98 : {
99 : List *rollups;
100 : List *hash_sets_idx;
101 : double dNumHashGroups;
102 : bool any_hashable;
103 : Bitmapset *unsortable_refs;
104 : Bitmapset *unhashable_refs;
105 : List *unsortable_sets;
106 : int *tleref_to_colnum_map;
107 : } grouping_sets_data;
108 :
109 : /*
110 : * Temporary structure for use during WindowClause reordering in order to be
111 : * able to sort WindowClauses on partitioning/ordering prefix.
112 : */
113 : typedef struct
114 : {
115 : WindowClause *wc;
116 : List *uniqueOrder; /* A List of unique ordering/partitioning
117 : * clauses per Window */
118 : } WindowClauseSortData;
119 :
120 : /* Passthrough data for standard_qp_callback */
121 : typedef struct
122 : {
123 : List *activeWindows; /* active windows, if any */
124 : grouping_sets_data *gset_data; /* grouping sets data, if any */
125 : SetOperationStmt *setop; /* parent set operation or NULL if not a
126 : * subquery belonging to a set operation */
127 : } standard_qp_extra;
128 :
129 : /* Local functions */
130 : static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
131 : static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
132 : static void grouping_planner(PlannerInfo *root, double tuple_fraction,
133 : SetOperationStmt *setops);
134 : static grouping_sets_data *preprocess_grouping_sets(PlannerInfo *root);
135 : static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
136 : int *tleref_to_colnum_map);
137 : static void preprocess_rowmarks(PlannerInfo *root);
138 : static double preprocess_limit(PlannerInfo *root,
139 : double tuple_fraction,
140 : int64 *offset_est, int64 *count_est);
141 : static List *preprocess_groupclause(PlannerInfo *root, List *force);
142 : static List *extract_rollup_sets(List *groupingSets);
143 : static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
144 : static void standard_qp_callback(PlannerInfo *root, void *extra);
145 : static double get_number_of_groups(PlannerInfo *root,
146 : double path_rows,
147 : grouping_sets_data *gd,
148 : List *target_list);
149 : static RelOptInfo *create_grouping_paths(PlannerInfo *root,
150 : RelOptInfo *input_rel,
151 : PathTarget *target,
152 : bool target_parallel_safe,
153 : grouping_sets_data *gd);
154 : static bool is_degenerate_grouping(PlannerInfo *root);
155 : static void create_degenerate_grouping_paths(PlannerInfo *root,
156 : RelOptInfo *input_rel,
157 : RelOptInfo *grouped_rel);
158 : static RelOptInfo *make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
159 : PathTarget *target, bool target_parallel_safe,
160 : Node *havingQual);
161 : static void create_ordinary_grouping_paths(PlannerInfo *root,
162 : RelOptInfo *input_rel,
163 : RelOptInfo *grouped_rel,
164 : const AggClauseCosts *agg_costs,
165 : grouping_sets_data *gd,
166 : GroupPathExtraData *extra,
167 : RelOptInfo **partially_grouped_rel_p);
168 : static void consider_groupingsets_paths(PlannerInfo *root,
169 : RelOptInfo *grouped_rel,
170 : Path *path,
171 : bool is_sorted,
172 : bool can_hash,
173 : grouping_sets_data *gd,
174 : const AggClauseCosts *agg_costs,
175 : double dNumGroups);
176 : static RelOptInfo *create_window_paths(PlannerInfo *root,
177 : RelOptInfo *input_rel,
178 : PathTarget *input_target,
179 : PathTarget *output_target,
180 : bool output_target_parallel_safe,
181 : WindowFuncLists *wflists,
182 : List *activeWindows);
183 : static void create_one_window_path(PlannerInfo *root,
184 : RelOptInfo *window_rel,
185 : Path *path,
186 : PathTarget *input_target,
187 : PathTarget *output_target,
188 : WindowFuncLists *wflists,
189 : List *activeWindows);
190 : static RelOptInfo *create_distinct_paths(PlannerInfo *root,
191 : RelOptInfo *input_rel,
192 : PathTarget *target);
193 : static void create_partial_distinct_paths(PlannerInfo *root,
194 : RelOptInfo *input_rel,
195 : RelOptInfo *final_distinct_rel,
196 : PathTarget *target);
197 : static RelOptInfo *create_final_distinct_paths(PlannerInfo *root,
198 : RelOptInfo *input_rel,
199 : RelOptInfo *distinct_rel);
200 : static List *get_useful_pathkeys_for_distinct(PlannerInfo *root,
201 : List *needed_pathkeys,
202 : List *path_pathkeys);
203 : static RelOptInfo *create_ordered_paths(PlannerInfo *root,
204 : RelOptInfo *input_rel,
205 : PathTarget *target,
206 : bool target_parallel_safe,
207 : double limit_tuples);
208 : static PathTarget *make_group_input_target(PlannerInfo *root,
209 : PathTarget *final_target);
210 : static PathTarget *make_partial_grouping_target(PlannerInfo *root,
211 : PathTarget *grouping_target,
212 : Node *havingQual);
213 : static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
214 : static void optimize_window_clauses(PlannerInfo *root,
215 : WindowFuncLists *wflists);
216 : static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
217 : static PathTarget *make_window_input_target(PlannerInfo *root,
218 : PathTarget *final_target,
219 : List *activeWindows);
220 : static List *make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
221 : List *tlist);
222 : static PathTarget *make_sort_input_target(PlannerInfo *root,
223 : PathTarget *final_target,
224 : bool *have_postponed_srfs);
225 : static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
226 : List *targets, List *targets_contain_srfs);
227 : static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
228 : RelOptInfo *grouped_rel,
229 : RelOptInfo *partially_grouped_rel,
230 : const AggClauseCosts *agg_costs,
231 : grouping_sets_data *gd,
232 : double dNumGroups,
233 : GroupPathExtraData *extra);
234 : static RelOptInfo *create_partial_grouping_paths(PlannerInfo *root,
235 : RelOptInfo *grouped_rel,
236 : RelOptInfo *input_rel,
237 : grouping_sets_data *gd,
238 : GroupPathExtraData *extra,
239 : bool force_rel_creation);
240 : static Path *make_ordered_path(PlannerInfo *root,
241 : RelOptInfo *rel,
242 : Path *path,
243 : Path *cheapest_path,
244 : List *pathkeys,
245 : double limit_tuples);
246 : static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel);
247 : static bool can_partial_agg(PlannerInfo *root);
248 : static void apply_scanjoin_target_to_paths(PlannerInfo *root,
249 : RelOptInfo *rel,
250 : List *scanjoin_targets,
251 : List *scanjoin_targets_contain_srfs,
252 : bool scanjoin_target_parallel_safe,
253 : bool tlist_same_exprs);
254 : static void create_partitionwise_grouping_paths(PlannerInfo *root,
255 : RelOptInfo *input_rel,
256 : RelOptInfo *grouped_rel,
257 : RelOptInfo *partially_grouped_rel,
258 : const AggClauseCosts *agg_costs,
259 : grouping_sets_data *gd,
260 : PartitionwiseAggregateType patype,
261 : GroupPathExtraData *extra);
262 : static bool group_by_has_partkey(RelOptInfo *input_rel,
263 : List *targetList,
264 : List *groupClause);
265 : static int common_prefix_cmp(const void *a, const void *b);
266 : static List *generate_setop_child_grouplist(SetOperationStmt *op,
267 : List *targetlist);
268 :
269 :
270 : /*****************************************************************************
271 : *
272 : * Query optimizer entry point
273 : *
274 : * To support loadable plugins that monitor or modify planner behavior,
275 : * we provide a hook variable that lets a plugin get control before and
276 : * after the standard planning process. The plugin would normally call
277 : * standard_planner().
278 : *
279 : * Note to plugin authors: standard_planner() scribbles on its Query input,
280 : * so you'd better copy that data structure if you want to plan more than once.
281 : *
282 : *****************************************************************************/
283 : PlannedStmt *
284 450498 : planner(Query *parse, const char *query_string, int cursorOptions,
285 : ParamListInfo boundParams)
286 : {
287 : PlannedStmt *result;
288 :
289 450498 : if (planner_hook)
290 91690 : result = (*planner_hook) (parse, query_string, cursorOptions, boundParams);
291 : else
292 358808 : result = standard_planner(parse, query_string, cursorOptions, boundParams);
293 446368 : return result;
294 : }
295 :
296 : PlannedStmt *
297 450498 : standard_planner(Query *parse, const char *query_string, int cursorOptions,
298 : ParamListInfo boundParams)
299 : {
300 : PlannedStmt *result;
301 : PlannerGlobal *glob;
302 : double tuple_fraction;
303 : PlannerInfo *root;
304 : RelOptInfo *final_rel;
305 : Path *best_path;
306 : Plan *top_plan;
307 : ListCell *lp,
308 : *lr;
309 :
310 : /*
311 : * Set up global state for this planner invocation. This data is needed
312 : * across all levels of sub-Query that might exist in the given command,
313 : * so we keep it in a separate struct that's linked to by each per-Query
314 : * PlannerInfo.
315 : */
316 450498 : glob = makeNode(PlannerGlobal);
317 :
318 450498 : glob->boundParams = boundParams;
319 450498 : glob->subplans = NIL;
320 450498 : glob->subpaths = NIL;
321 450498 : glob->subroots = NIL;
322 450498 : glob->rewindPlanIDs = NULL;
323 450498 : glob->finalrtable = NIL;
324 450498 : glob->finalrteperminfos = NIL;
325 450498 : glob->finalrowmarks = NIL;
326 450498 : glob->resultRelations = NIL;
327 450498 : glob->appendRelations = NIL;
328 450498 : glob->relationOids = NIL;
329 450498 : glob->invalItems = NIL;
330 450498 : glob->paramExecTypes = NIL;
331 450498 : glob->lastPHId = 0;
332 450498 : glob->lastRowMarkId = 0;
333 450498 : glob->lastPlanNodeId = 0;
334 450498 : glob->transientPlan = false;
335 450498 : glob->dependsOnRole = false;
336 :
337 : /*
338 : * Assess whether it's feasible to use parallel mode for this query. We
339 : * can't do this in a standalone backend, or if the command will try to
340 : * modify any data, or if this is a cursor operation, or if GUCs are set
341 : * to values that don't permit parallelism, or if parallel-unsafe
342 : * functions are present in the query tree.
343 : *
344 : * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
345 : * MATERIALIZED VIEW to use parallel plans, but this is safe only because
346 : * the command is writing into a completely new table which workers won't
347 : * be able to see. If the workers could see the table, the fact that
348 : * group locking would cause them to ignore the leader's heavyweight GIN
349 : * page locks would make this unsafe. We'll have to fix that somehow if
350 : * we want to allow parallel inserts in general; updates and deletes have
351 : * additional problems especially around combo CIDs.)
352 : *
353 : * For now, we don't try to use parallel mode if we're running inside a
354 : * parallel worker. We might eventually be able to relax this
355 : * restriction, but for now it seems best not to have parallel workers
356 : * trying to create their own parallel workers.
357 : */
358 450498 : if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
359 424970 : IsUnderPostmaster &&
360 424970 : parse->commandType == CMD_SELECT &&
361 340082 : !parse->hasModifyingCTE &&
362 339942 : max_parallel_workers_per_gather > 0 &&
363 339360 : !IsParallelWorker())
364 : {
365 : /* all the cheap tests pass, so scan the query tree */
366 339284 : glob->maxParallelHazard = max_parallel_hazard(parse);
367 339284 : glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE);
368 : }
369 : else
370 : {
371 : /* skip the query tree scan, just assume it's unsafe */
372 111214 : glob->maxParallelHazard = PROPARALLEL_UNSAFE;
373 111214 : glob->parallelModeOK = false;
374 : }
375 :
376 : /*
377 : * glob->parallelModeNeeded is normally set to false here and changed to
378 : * true during plan creation if a Gather or Gather Merge plan is actually
379 : * created (cf. create_gather_plan, create_gather_merge_plan).
380 : *
381 : * However, if debug_parallel_query = on or debug_parallel_query =
382 : * regress, then we impose parallel mode whenever it's safe to do so, even
383 : * if the final plan doesn't use parallelism. It's not safe to do so if
384 : * the query contains anything parallel-unsafe; parallelModeOK will be
385 : * false in that case. Note that parallelModeOK can't change after this
386 : * point. Otherwise, everything in the query is either parallel-safe or
387 : * parallel-restricted, and in either case it should be OK to impose
388 : * parallel-mode restrictions. If that ends up breaking something, then
389 : * either some function the user included in the query is incorrectly
390 : * labeled as parallel-safe or parallel-restricted when in reality it's
391 : * parallel-unsafe, or else the query planner itself has a bug.
392 : */
393 733688 : glob->parallelModeNeeded = glob->parallelModeOK &&
394 283190 : (debug_parallel_query != DEBUG_PARALLEL_OFF);
395 :
396 : /* Determine what fraction of the plan is likely to be scanned */
397 450498 : if (cursorOptions & CURSOR_OPT_FAST_PLAN)
398 : {
399 : /*
400 : * We have no real idea how many tuples the user will ultimately FETCH
401 : * from a cursor, but it is often the case that he doesn't want 'em
402 : * all, or would prefer a fast-start plan anyway so that he can
403 : * process some of the tuples sooner. Use a GUC parameter to decide
404 : * what fraction to optimize for.
405 : */
406 2902 : tuple_fraction = cursor_tuple_fraction;
407 :
408 : /*
409 : * We document cursor_tuple_fraction as simply being a fraction, which
410 : * means the edge cases 0 and 1 have to be treated specially here. We
411 : * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
412 : */
413 2902 : if (tuple_fraction >= 1.0)
414 0 : tuple_fraction = 0.0;
415 2902 : else if (tuple_fraction <= 0.0)
416 0 : tuple_fraction = 1e-10;
417 : }
418 : else
419 : {
420 : /* Default assumption is we need all the tuples */
421 447596 : tuple_fraction = 0.0;
422 : }
423 :
424 : /* primary planning entry point (may recurse for subqueries) */
425 450498 : root = subquery_planner(glob, parse, NULL, false, tuple_fraction, NULL);
426 :
427 : /* Select best Path and turn it into a Plan */
428 446764 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
429 446764 : best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
430 :
431 446764 : top_plan = create_plan(root, best_path);
432 :
433 : /*
434 : * If creating a plan for a scrollable cursor, make sure it can run
435 : * backwards on demand. Add a Material node at the top at need.
436 : */
437 446368 : if (cursorOptions & CURSOR_OPT_SCROLL)
438 : {
439 264 : if (!ExecSupportsBackwardScan(top_plan))
440 30 : top_plan = materialize_finished_plan(top_plan);
441 : }
442 :
443 : /*
444 : * Optionally add a Gather node for testing purposes, provided this is
445 : * actually a safe thing to do.
446 : *
447 : * We can add Gather even when top_plan has parallel-safe initPlans, but
448 : * then we have to move the initPlans to the Gather node because of
449 : * SS_finalize_plan's limitations. That would cause cosmetic breakage of
450 : * regression tests when debug_parallel_query = regress, because initPlans
451 : * that would normally appear on the top_plan move to the Gather, causing
452 : * them to disappear from EXPLAIN output. That doesn't seem worth kluging
453 : * EXPLAIN to hide, so skip it when debug_parallel_query = regress.
454 : */
455 446368 : if (debug_parallel_query != DEBUG_PARALLEL_OFF &&
456 182 : top_plan->parallel_safe &&
457 84 : (top_plan->initPlan == NIL ||
458 0 : debug_parallel_query != DEBUG_PARALLEL_REGRESS))
459 : {
460 84 : Gather *gather = makeNode(Gather);
461 : Cost initplan_cost;
462 : bool unsafe_initplans;
463 :
464 84 : gather->plan.targetlist = top_plan->targetlist;
465 84 : gather->plan.qual = NIL;
466 84 : gather->plan.lefttree = top_plan;
467 84 : gather->plan.righttree = NULL;
468 84 : gather->num_workers = 1;
469 84 : gather->single_copy = true;
470 84 : gather->invisible = (debug_parallel_query == DEBUG_PARALLEL_REGRESS);
471 :
472 : /* Transfer any initPlans to the new top node */
473 84 : gather->plan.initPlan = top_plan->initPlan;
474 84 : top_plan->initPlan = NIL;
475 :
476 : /*
477 : * Since this Gather has no parallel-aware descendants to signal to,
478 : * we don't need a rescan Param.
479 : */
480 84 : gather->rescan_param = -1;
481 :
482 : /*
483 : * Ideally we'd use cost_gather here, but setting up dummy path data
484 : * to satisfy it doesn't seem much cleaner than knowing what it does.
485 : */
486 84 : gather->plan.startup_cost = top_plan->startup_cost +
487 : parallel_setup_cost;
488 84 : gather->plan.total_cost = top_plan->total_cost +
489 84 : parallel_setup_cost + parallel_tuple_cost * top_plan->plan_rows;
490 84 : gather->plan.plan_rows = top_plan->plan_rows;
491 84 : gather->plan.plan_width = top_plan->plan_width;
492 84 : gather->plan.parallel_aware = false;
493 84 : gather->plan.parallel_safe = false;
494 :
495 : /*
496 : * Delete the initplans' cost from top_plan. We needn't add it to the
497 : * Gather node, since the above coding already included it there.
498 : */
499 84 : SS_compute_initplan_cost(gather->plan.initPlan,
500 : &initplan_cost, &unsafe_initplans);
501 84 : top_plan->startup_cost -= initplan_cost;
502 84 : top_plan->total_cost -= initplan_cost;
503 :
504 : /* use parallel mode for parallel plans. */
505 84 : root->glob->parallelModeNeeded = true;
506 :
507 84 : top_plan = &gather->plan;
508 : }
509 :
510 : /*
511 : * If any Params were generated, run through the plan tree and compute
512 : * each plan node's extParam/allParam sets. Ideally we'd merge this into
513 : * set_plan_references' tree traversal, but for now it has to be separate
514 : * because we need to visit subplans before not after main plan.
515 : */
516 446368 : if (glob->paramExecTypes != NIL)
517 : {
518 : Assert(list_length(glob->subplans) == list_length(glob->subroots));
519 189666 : forboth(lp, glob->subplans, lr, glob->subroots)
520 : {
521 38880 : Plan *subplan = (Plan *) lfirst(lp);
522 38880 : PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
523 :
524 38880 : SS_finalize_plan(subroot, subplan);
525 : }
526 150786 : SS_finalize_plan(root, top_plan);
527 : }
528 :
529 : /* final cleanup of the plan */
530 : Assert(glob->finalrtable == NIL);
531 : Assert(glob->finalrteperminfos == NIL);
532 : Assert(glob->finalrowmarks == NIL);
533 : Assert(glob->resultRelations == NIL);
534 : Assert(glob->appendRelations == NIL);
535 446368 : top_plan = set_plan_references(root, top_plan);
536 : /* ... and the subplans (both regular subplans and initplans) */
537 : Assert(list_length(glob->subplans) == list_length(glob->subroots));
538 485248 : forboth(lp, glob->subplans, lr, glob->subroots)
539 : {
540 38880 : Plan *subplan = (Plan *) lfirst(lp);
541 38880 : PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
542 :
543 38880 : lfirst(lp) = set_plan_references(subroot, subplan);
544 : }
545 :
546 : /* build the PlannedStmt result */
547 446368 : result = makeNode(PlannedStmt);
548 :
549 446368 : result->commandType = parse->commandType;
550 446368 : result->queryId = parse->queryId;
551 446368 : result->hasReturning = (parse->returningList != NIL);
552 446368 : result->hasModifyingCTE = parse->hasModifyingCTE;
553 446368 : result->canSetTag = parse->canSetTag;
554 446368 : result->transientPlan = glob->transientPlan;
555 446368 : result->dependsOnRole = glob->dependsOnRole;
556 446368 : result->parallelModeNeeded = glob->parallelModeNeeded;
557 446368 : result->planTree = top_plan;
558 446368 : result->rtable = glob->finalrtable;
559 446368 : result->permInfos = glob->finalrteperminfos;
560 446368 : result->resultRelations = glob->resultRelations;
561 446368 : result->appendRelations = glob->appendRelations;
562 446368 : result->subplans = glob->subplans;
563 446368 : result->rewindPlanIDs = glob->rewindPlanIDs;
564 446368 : result->rowMarks = glob->finalrowmarks;
565 446368 : result->relationOids = glob->relationOids;
566 446368 : result->invalItems = glob->invalItems;
567 446368 : result->paramExecTypes = glob->paramExecTypes;
568 : /* utilityStmt should be null, but we might as well copy it */
569 446368 : result->utilityStmt = parse->utilityStmt;
570 446368 : result->stmt_location = parse->stmt_location;
571 446368 : result->stmt_len = parse->stmt_len;
572 :
573 446368 : result->jitFlags = PGJIT_NONE;
574 446368 : if (jit_enabled && jit_above_cost >= 0 &&
575 445770 : top_plan->total_cost > jit_above_cost)
576 : {
577 934 : result->jitFlags |= PGJIT_PERFORM;
578 :
579 : /*
580 : * Decide how much effort should be put into generating better code.
581 : */
582 934 : if (jit_optimize_above_cost >= 0 &&
583 934 : top_plan->total_cost > jit_optimize_above_cost)
584 432 : result->jitFlags |= PGJIT_OPT3;
585 934 : if (jit_inline_above_cost >= 0 &&
586 934 : top_plan->total_cost > jit_inline_above_cost)
587 432 : result->jitFlags |= PGJIT_INLINE;
588 :
589 : /*
590 : * Decide which operations should be JITed.
591 : */
592 934 : if (jit_expressions)
593 934 : result->jitFlags |= PGJIT_EXPR;
594 934 : if (jit_tuple_deforming)
595 934 : result->jitFlags |= PGJIT_DEFORM;
596 : }
597 :
598 446368 : if (glob->partition_directory != NULL)
599 11332 : DestroyPartitionDirectory(glob->partition_directory);
600 :
601 446368 : return result;
602 : }
603 :
604 :
605 : /*--------------------
606 : * subquery_planner
607 : * Invokes the planner on a subquery. We recurse to here for each
608 : * sub-SELECT found in the query tree.
609 : *
610 : * glob is the global state for the current planner run.
611 : * parse is the querytree produced by the parser & rewriter.
612 : * parent_root is the immediate parent Query's info (NULL at the top level).
613 : * hasRecursion is true if this is a recursive WITH query.
614 : * tuple_fraction is the fraction of tuples we expect will be retrieved.
615 : * tuple_fraction is interpreted as explained for grouping_planner, below.
616 : * setops is used for set operation subqueries to provide the subquery with
617 : * the context in which it's being used so that Paths correctly sorted for the
618 : * set operation can be generated. NULL when not planning a set operation
619 : * child, or when a child of a set op that isn't interested in sorted input.
620 : *
621 : * Basically, this routine does the stuff that should only be done once
622 : * per Query object. It then calls grouping_planner. At one time,
623 : * grouping_planner could be invoked recursively on the same Query object;
624 : * that's not currently true, but we keep the separation between the two
625 : * routines anyway, in case we need it again someday.
626 : *
627 : * subquery_planner will be called recursively to handle sub-Query nodes
628 : * found within the query's expressions and rangetable.
629 : *
630 : * Returns the PlannerInfo struct ("root") that contains all data generated
631 : * while planning the subquery. In particular, the Path(s) attached to
632 : * the (UPPERREL_FINAL, NULL) upperrel represent our conclusions about the
633 : * cheapest way(s) to implement the query. The top level will select the
634 : * best Path and pass it through createplan.c to produce a finished Plan.
635 : *--------------------
636 : */
637 : PlannerInfo *
638 511744 : subquery_planner(PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root,
639 : bool hasRecursion, double tuple_fraction,
640 : SetOperationStmt *setops)
641 : {
642 : PlannerInfo *root;
643 : List *newWithCheckOptions;
644 : List *newHaving;
645 : bool hasOuterJoins;
646 : bool hasResultRTEs;
647 : RelOptInfo *final_rel;
648 : ListCell *l;
649 :
650 : /* Create a PlannerInfo data structure for this subquery */
651 511744 : root = makeNode(PlannerInfo);
652 511744 : root->parse = parse;
653 511744 : root->glob = glob;
654 511744 : root->query_level = parent_root ? parent_root->query_level + 1 : 1;
655 511744 : root->parent_root = parent_root;
656 511744 : root->plan_params = NIL;
657 511744 : root->outer_params = NULL;
658 511744 : root->planner_cxt = CurrentMemoryContext;
659 511744 : root->init_plans = NIL;
660 511744 : root->cte_plan_ids = NIL;
661 511744 : root->multiexpr_params = NIL;
662 511744 : root->join_domains = NIL;
663 511744 : root->eq_classes = NIL;
664 511744 : root->ec_merging_done = false;
665 511744 : root->last_rinfo_serial = 0;
666 511744 : root->all_result_relids =
667 511744 : parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
668 511744 : root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
669 511744 : root->append_rel_list = NIL;
670 511744 : root->row_identity_vars = NIL;
671 511744 : root->rowMarks = NIL;
672 511744 : memset(root->upper_rels, 0, sizeof(root->upper_rels));
673 511744 : memset(root->upper_targets, 0, sizeof(root->upper_targets));
674 511744 : root->processed_groupClause = NIL;
675 511744 : root->processed_distinctClause = NIL;
676 511744 : root->processed_tlist = NIL;
677 511744 : root->update_colnos = NIL;
678 511744 : root->grouping_map = NULL;
679 511744 : root->minmax_aggs = NIL;
680 511744 : root->qual_security_level = 0;
681 511744 : root->hasPseudoConstantQuals = false;
682 511744 : root->hasAlternativeSubPlans = false;
683 511744 : root->placeholdersFrozen = false;
684 511744 : root->hasRecursion = hasRecursion;
685 511744 : if (hasRecursion)
686 822 : root->wt_param_id = assign_special_exec_param(root);
687 : else
688 510922 : root->wt_param_id = -1;
689 511744 : root->non_recursive_path = NULL;
690 511744 : root->partColsUpdated = false;
691 :
692 : /*
693 : * Create the top-level join domain. This won't have valid contents until
694 : * deconstruct_jointree fills it in, but the node needs to exist before
695 : * that so we can build EquivalenceClasses referencing it.
696 : */
697 511744 : root->join_domains = list_make1(makeNode(JoinDomain));
698 :
699 : /*
700 : * If there is a WITH list, process each WITH query and either convert it
701 : * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
702 : */
703 511744 : if (parse->cteList)
704 2464 : SS_process_ctes(root);
705 :
706 : /*
707 : * If it's a MERGE command, transform the joinlist as appropriate.
708 : */
709 511738 : transform_MERGE_to_join(parse);
710 :
711 : /*
712 : * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
713 : * that we don't need so many special cases to deal with that situation.
714 : */
715 511738 : replace_empty_jointree(parse);
716 :
717 : /*
718 : * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
719 : * to transform them into joins. Note that this step does not descend
720 : * into subqueries; if we pull up any subqueries below, their SubLinks are
721 : * processed just before pulling them up.
722 : */
723 511738 : if (parse->hasSubLinks)
724 30402 : pull_up_sublinks(root);
725 :
726 : /*
727 : * Scan the rangetable for function RTEs, do const-simplification on them,
728 : * and then inline them if possible (producing subqueries that might get
729 : * pulled up next). Recursion issues here are handled in the same way as
730 : * for SubLinks.
731 : */
732 511738 : preprocess_function_rtes(root);
733 :
734 : /*
735 : * Check to see if any subqueries in the jointree can be merged into this
736 : * query.
737 : */
738 511732 : pull_up_subqueries(root);
739 :
740 : /*
741 : * If this is a simple UNION ALL query, flatten it into an appendrel. We
742 : * do this now because it requires applying pull_up_subqueries to the leaf
743 : * queries of the UNION ALL, which weren't touched above because they
744 : * weren't referenced by the jointree (they will be after we do this).
745 : */
746 511726 : if (parse->setOperations)
747 5850 : flatten_simple_union_all(root);
748 :
749 : /*
750 : * Survey the rangetable to see what kinds of entries are present. We can
751 : * skip some later processing if relevant SQL features are not used; for
752 : * example if there are no JOIN RTEs we can avoid the expense of doing
753 : * flatten_join_alias_vars(). This must be done after we have finished
754 : * adding rangetable entries, of course. (Note: actually, processing of
755 : * inherited or partitioned rels can cause RTEs for their child tables to
756 : * get added later; but those must all be RTE_RELATION entries, so they
757 : * don't invalidate the conclusions drawn here.)
758 : */
759 511726 : root->hasJoinRTEs = false;
760 511726 : root->hasLateralRTEs = false;
761 511726 : root->group_rtindex = 0;
762 511726 : hasOuterJoins = false;
763 511726 : hasResultRTEs = false;
764 1356514 : foreach(l, parse->rtable)
765 : {
766 844788 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
767 :
768 844788 : switch (rte->rtekind)
769 : {
770 434124 : case RTE_RELATION:
771 434124 : if (rte->inh)
772 : {
773 : /*
774 : * Check to see if the relation actually has any children;
775 : * if not, clear the inh flag so we can treat it as a
776 : * plain base relation.
777 : *
778 : * Note: this could give a false-positive result, if the
779 : * rel once had children but no longer does. We used to
780 : * be able to clear rte->inh later on when we discovered
781 : * that, but no more; we have to handle such cases as
782 : * full-fledged inheritance.
783 : */
784 348920 : rte->inh = has_subclass(rte->relid);
785 : }
786 434124 : break;
787 76998 : case RTE_JOIN:
788 76998 : root->hasJoinRTEs = true;
789 76998 : if (IS_OUTER_JOIN(rte->jointype))
790 43218 : hasOuterJoins = true;
791 76998 : break;
792 217510 : case RTE_RESULT:
793 217510 : hasResultRTEs = true;
794 217510 : break;
795 4406 : case RTE_GROUP:
796 : Assert(parse->hasGroupRTE);
797 4406 : root->group_rtindex = list_cell_number(parse->rtable, l) + 1;
798 4406 : break;
799 111750 : default:
800 : /* No work here for other RTE types */
801 111750 : break;
802 : }
803 :
804 844788 : if (rte->lateral)
805 10078 : root->hasLateralRTEs = true;
806 :
807 : /*
808 : * We can also determine the maximum security level required for any
809 : * securityQuals now. Addition of inheritance-child RTEs won't affect
810 : * this, because child tables don't have their own securityQuals; see
811 : * expand_single_inheritance_child().
812 : */
813 844788 : if (rte->securityQuals)
814 2466 : root->qual_security_level = Max(root->qual_security_level,
815 : list_length(rte->securityQuals));
816 : }
817 :
818 : /*
819 : * If we have now verified that the query target relation is
820 : * non-inheriting, mark it as a leaf target.
821 : */
822 511726 : if (parse->resultRelation)
823 : {
824 91176 : RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
825 :
826 91176 : if (!rte->inh)
827 88432 : root->leaf_result_relids =
828 88432 : bms_make_singleton(parse->resultRelation);
829 : }
830 :
831 : /*
832 : * Preprocess RowMark information. We need to do this after subquery
833 : * pullup, so that all base relations are present.
834 : */
835 511726 : preprocess_rowmarks(root);
836 :
837 : /*
838 : * Set hasHavingQual to remember if HAVING clause is present. Needed
839 : * because preprocess_expression will reduce a constant-true condition to
840 : * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
841 : */
842 511726 : root->hasHavingQual = (parse->havingQual != NULL);
843 :
844 : /*
845 : * Do expression preprocessing on targetlist and quals, as well as other
846 : * random expressions in the querytree. Note that we do not need to
847 : * handle sort/group expressions explicitly, because they are actually
848 : * part of the targetlist.
849 : */
850 508070 : parse->targetList = (List *)
851 511726 : preprocess_expression(root, (Node *) parse->targetList,
852 : EXPRKIND_TARGET);
853 :
854 508070 : newWithCheckOptions = NIL;
855 510402 : foreach(l, parse->withCheckOptions)
856 : {
857 2332 : WithCheckOption *wco = lfirst_node(WithCheckOption, l);
858 :
859 2332 : wco->qual = preprocess_expression(root, wco->qual,
860 : EXPRKIND_QUAL);
861 2332 : if (wco->qual != NULL)
862 1932 : newWithCheckOptions = lappend(newWithCheckOptions, wco);
863 : }
864 508070 : parse->withCheckOptions = newWithCheckOptions;
865 :
866 508070 : parse->returningList = (List *)
867 508070 : preprocess_expression(root, (Node *) parse->returningList,
868 : EXPRKIND_TARGET);
869 :
870 508070 : preprocess_qual_conditions(root, (Node *) parse->jointree);
871 :
872 508070 : parse->havingQual = preprocess_expression(root, parse->havingQual,
873 : EXPRKIND_QUAL);
874 :
875 510628 : foreach(l, parse->windowClause)
876 : {
877 2558 : WindowClause *wc = lfirst_node(WindowClause, l);
878 :
879 : /* partitionClause/orderClause are sort/group expressions */
880 2558 : wc->startOffset = preprocess_expression(root, wc->startOffset,
881 : EXPRKIND_LIMIT);
882 2558 : wc->endOffset = preprocess_expression(root, wc->endOffset,
883 : EXPRKIND_LIMIT);
884 : }
885 :
886 508070 : parse->limitOffset = preprocess_expression(root, parse->limitOffset,
887 : EXPRKIND_LIMIT);
888 508070 : parse->limitCount = preprocess_expression(root, parse->limitCount,
889 : EXPRKIND_LIMIT);
890 :
891 508070 : if (parse->onConflict)
892 : {
893 3628 : parse->onConflict->arbiterElems = (List *)
894 1814 : preprocess_expression(root,
895 1814 : (Node *) parse->onConflict->arbiterElems,
896 : EXPRKIND_ARBITER_ELEM);
897 3628 : parse->onConflict->arbiterWhere =
898 1814 : preprocess_expression(root,
899 1814 : parse->onConflict->arbiterWhere,
900 : EXPRKIND_QUAL);
901 3628 : parse->onConflict->onConflictSet = (List *)
902 1814 : preprocess_expression(root,
903 1814 : (Node *) parse->onConflict->onConflictSet,
904 : EXPRKIND_TARGET);
905 1814 : parse->onConflict->onConflictWhere =
906 1814 : preprocess_expression(root,
907 1814 : parse->onConflict->onConflictWhere,
908 : EXPRKIND_QUAL);
909 : /* exclRelTlist contains only Vars, so no preprocessing needed */
910 : }
911 :
912 510764 : foreach(l, parse->mergeActionList)
913 : {
914 2694 : MergeAction *action = (MergeAction *) lfirst(l);
915 :
916 2694 : action->targetList = (List *)
917 2694 : preprocess_expression(root,
918 2694 : (Node *) action->targetList,
919 : EXPRKIND_TARGET);
920 2694 : action->qual =
921 2694 : preprocess_expression(root,
922 : (Node *) action->qual,
923 : EXPRKIND_QUAL);
924 : }
925 :
926 508070 : parse->mergeJoinCondition =
927 508070 : preprocess_expression(root, parse->mergeJoinCondition, EXPRKIND_QUAL);
928 :
929 508070 : root->append_rel_list = (List *)
930 508070 : preprocess_expression(root, (Node *) root->append_rel_list,
931 : EXPRKIND_APPINFO);
932 :
933 : /* Also need to preprocess expressions within RTEs */
934 1348924 : foreach(l, parse->rtable)
935 : {
936 840854 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
937 : int kind;
938 : ListCell *lcsq;
939 :
940 840854 : if (rte->rtekind == RTE_RELATION)
941 : {
942 433858 : if (rte->tablesample)
943 222 : rte->tablesample = (TableSampleClause *)
944 222 : preprocess_expression(root,
945 222 : (Node *) rte->tablesample,
946 : EXPRKIND_TABLESAMPLE);
947 : }
948 406996 : else if (rte->rtekind == RTE_SUBQUERY)
949 : {
950 : /*
951 : * We don't want to do all preprocessing yet on the subquery's
952 : * expressions, since that will happen when we plan it. But if it
953 : * contains any join aliases of our level, those have to get
954 : * expanded now, because planning of the subquery won't do it.
955 : * That's only possible if the subquery is LATERAL.
956 : */
957 54960 : if (rte->lateral && root->hasJoinRTEs)
958 998 : rte->subquery = (Query *)
959 998 : flatten_join_alias_vars(root, root->parse,
960 998 : (Node *) rte->subquery);
961 : }
962 352036 : else if (rte->rtekind == RTE_FUNCTION)
963 : {
964 : /* Preprocess the function expression(s) fully */
965 43704 : kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
966 43704 : rte->functions = (List *)
967 43704 : preprocess_expression(root, (Node *) rte->functions, kind);
968 : }
969 308332 : else if (rte->rtekind == RTE_TABLEFUNC)
970 : {
971 : /* Preprocess the function expression(s) fully */
972 626 : kind = rte->lateral ? EXPRKIND_TABLEFUNC_LATERAL : EXPRKIND_TABLEFUNC;
973 626 : rte->tablefunc = (TableFunc *)
974 626 : preprocess_expression(root, (Node *) rte->tablefunc, kind);
975 : }
976 307706 : else if (rte->rtekind == RTE_VALUES)
977 : {
978 : /* Preprocess the values lists fully */
979 7988 : kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
980 7988 : rte->values_lists = (List *)
981 7988 : preprocess_expression(root, (Node *) rte->values_lists, kind);
982 : }
983 299718 : else if (rte->rtekind == RTE_GROUP)
984 : {
985 : /* Preprocess the groupexprs list fully */
986 4406 : rte->groupexprs = (List *)
987 4406 : preprocess_expression(root, (Node *) rte->groupexprs,
988 : EXPRKIND_GROUPEXPR);
989 : }
990 :
991 : /*
992 : * Process each element of the securityQuals list as if it were a
993 : * separate qual expression (as indeed it is). We need to do it this
994 : * way to get proper canonicalization of AND/OR structure. Note that
995 : * this converts each element into an implicit-AND sublist.
996 : */
997 843666 : foreach(lcsq, rte->securityQuals)
998 : {
999 2812 : lfirst(lcsq) = preprocess_expression(root,
1000 2812 : (Node *) lfirst(lcsq),
1001 : EXPRKIND_QUAL);
1002 : }
1003 : }
1004 :
1005 : /*
1006 : * Now that we are done preprocessing expressions, and in particular done
1007 : * flattening join alias variables, get rid of the joinaliasvars lists.
1008 : * They no longer match what expressions in the rest of the tree look
1009 : * like, because we have not preprocessed expressions in those lists (and
1010 : * do not want to; for example, expanding a SubLink there would result in
1011 : * a useless unreferenced subplan). Leaving them in place simply creates
1012 : * a hazard for later scans of the tree. We could try to prevent that by
1013 : * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
1014 : * but that doesn't sound very reliable.
1015 : */
1016 508070 : if (root->hasJoinRTEs)
1017 : {
1018 267038 : foreach(l, parse->rtable)
1019 : {
1020 219912 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
1021 :
1022 219912 : rte->joinaliasvars = NIL;
1023 : }
1024 : }
1025 :
1026 : /*
1027 : * Replace any Vars in the subquery's targetlist and havingQual that
1028 : * reference GROUP outputs with the underlying grouping expressions.
1029 : *
1030 : * Note that we need to perform this replacement after we've preprocessed
1031 : * the grouping expressions. This is to ensure that there is only one
1032 : * instance of SubPlan for each SubLink contained within the grouping
1033 : * expressions.
1034 : */
1035 508070 : if (parse->hasGroupRTE)
1036 : {
1037 4406 : parse->targetList = (List *)
1038 4406 : flatten_group_exprs(root, root->parse, (Node *) parse->targetList);
1039 4406 : parse->havingQual =
1040 4406 : flatten_group_exprs(root, root->parse, parse->havingQual);
1041 : }
1042 :
1043 : /* Constant-folding might have removed all set-returning functions */
1044 508070 : if (parse->hasTargetSRFs)
1045 8740 : parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
1046 :
1047 : /*
1048 : * In some cases we may want to transfer a HAVING clause into WHERE. We
1049 : * cannot do so if the HAVING clause contains aggregates (obviously) or
1050 : * volatile functions (since a HAVING clause is supposed to be executed
1051 : * only once per group). We also can't do this if there are any nonempty
1052 : * grouping sets and the clause references any columns that are nullable
1053 : * by the grouping sets; moving such a clause into WHERE would potentially
1054 : * change the results. (If there are only empty grouping sets, then the
1055 : * HAVING clause must be degenerate as discussed below.)
1056 : *
1057 : * Also, it may be that the clause is so expensive to execute that we're
1058 : * better off doing it only once per group, despite the loss of
1059 : * selectivity. This is hard to estimate short of doing the entire
1060 : * planning process twice, so we use a heuristic: clauses containing
1061 : * subplans are left in HAVING. Otherwise, we move or copy the HAVING
1062 : * clause into WHERE, in hopes of eliminating tuples before aggregation
1063 : * instead of after.
1064 : *
1065 : * If the query has explicit grouping then we can simply move such a
1066 : * clause into WHERE; any group that fails the clause will not be in the
1067 : * output because none of its tuples will reach the grouping or
1068 : * aggregation stage. Otherwise we must have a degenerate (variable-free)
1069 : * HAVING clause, which we put in WHERE so that query_planner() can use it
1070 : * in a gating Result node, but also keep in HAVING to ensure that we
1071 : * don't emit a bogus aggregated row. (This could be done better, but it
1072 : * seems not worth optimizing.)
1073 : *
1074 : * Note that a HAVING clause may contain expressions that are not fully
1075 : * preprocessed. This can happen if these expressions are part of
1076 : * grouping items. In such cases, they are replaced with GROUP Vars in
1077 : * the parser and then replaced back after we've done with expression
1078 : * preprocessing on havingQual. This is not an issue if the clause
1079 : * remains in HAVING, because these expressions will be matched to lower
1080 : * target items in setrefs.c. However, if the clause is moved or copied
1081 : * into WHERE, we need to ensure that these expressions are fully
1082 : * preprocessed.
1083 : *
1084 : * Note that both havingQual and parse->jointree->quals are in
1085 : * implicitly-ANDed-list form at this point, even though they are declared
1086 : * as Node *.
1087 : */
1088 508070 : newHaving = NIL;
1089 509516 : foreach(l, (List *) parse->havingQual)
1090 : {
1091 1446 : Node *havingclause = (Node *) lfirst(l);
1092 :
1093 1762 : if (contain_agg_clause(havingclause) ||
1094 632 : contain_volatile_functions(havingclause) ||
1095 316 : contain_subplans(havingclause) ||
1096 382 : (parse->groupClause && parse->groupingSets &&
1097 66 : bms_is_member(root->group_rtindex, pull_varnos(root, havingclause))))
1098 : {
1099 : /* keep it in HAVING */
1100 1184 : newHaving = lappend(newHaving, havingclause);
1101 : }
1102 262 : else if (parse->groupClause)
1103 : {
1104 : Node *whereclause;
1105 :
1106 : /* Preprocess the HAVING clause fully */
1107 244 : whereclause = preprocess_expression(root, havingclause,
1108 : EXPRKIND_QUAL);
1109 : /* ... and move it to WHERE */
1110 244 : parse->jointree->quals = (Node *)
1111 244 : list_concat((List *) parse->jointree->quals,
1112 : (List *) whereclause);
1113 : }
1114 : else
1115 : {
1116 : Node *whereclause;
1117 :
1118 : /* Preprocess the HAVING clause fully */
1119 18 : whereclause = preprocess_expression(root, copyObject(havingclause),
1120 : EXPRKIND_QUAL);
1121 : /* ... and put a copy in WHERE */
1122 36 : parse->jointree->quals = (Node *)
1123 18 : list_concat((List *) parse->jointree->quals,
1124 : (List *) whereclause);
1125 : /* ... and also keep it in HAVING */
1126 18 : newHaving = lappend(newHaving, havingclause);
1127 : }
1128 : }
1129 508070 : parse->havingQual = (Node *) newHaving;
1130 :
1131 : /*
1132 : * If we have any outer joins, try to reduce them to plain inner joins.
1133 : * This step is most easily done after we've done expression
1134 : * preprocessing.
1135 : */
1136 508070 : if (hasOuterJoins)
1137 30408 : reduce_outer_joins(root);
1138 :
1139 : /*
1140 : * If we have any RTE_RESULT relations, see if they can be deleted from
1141 : * the jointree. We also rely on this processing to flatten single-child
1142 : * FromExprs underneath outer joins. This step is most effectively done
1143 : * after we've done expression preprocessing and outer join reduction.
1144 : */
1145 508070 : if (hasResultRTEs || hasOuterJoins)
1146 243222 : remove_useless_result_rtes(root);
1147 :
1148 : /*
1149 : * Do the main planning.
1150 : */
1151 508070 : grouping_planner(root, tuple_fraction, setops);
1152 :
1153 : /*
1154 : * Capture the set of outer-level param IDs we have access to, for use in
1155 : * extParam/allParam calculations later.
1156 : */
1157 508004 : SS_identify_outer_params(root);
1158 :
1159 : /*
1160 : * If any initPlans were created in this query level, adjust the surviving
1161 : * Paths' costs and parallel-safety flags to account for them. The
1162 : * initPlans won't actually get attached to the plan tree till
1163 : * create_plan() runs, but we must include their effects now.
1164 : */
1165 508004 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1166 508004 : SS_charge_for_initplans(root, final_rel);
1167 :
1168 : /*
1169 : * Make sure we've identified the cheapest Path for the final rel. (By
1170 : * doing this here not in grouping_planner, we include initPlan costs in
1171 : * the decision, though it's unlikely that will change anything.)
1172 : */
1173 508004 : set_cheapest(final_rel);
1174 :
1175 508004 : return root;
1176 : }
1177 :
1178 : /*
1179 : * preprocess_expression
1180 : * Do subquery_planner's preprocessing work for an expression,
1181 : * which can be a targetlist, a WHERE clause (including JOIN/ON
1182 : * conditions), a HAVING clause, or a few other things.
1183 : */
1184 : static Node *
1185 4240952 : preprocess_expression(PlannerInfo *root, Node *expr, int kind)
1186 : {
1187 : /*
1188 : * Fall out quickly if expression is empty. This occurs often enough to
1189 : * be worth checking. Note that null->null is the correct conversion for
1190 : * implicit-AND result format, too.
1191 : */
1192 4240952 : if (expr == NULL)
1193 3382160 : return NULL;
1194 :
1195 : /*
1196 : * If the query has any join RTEs, replace join alias variables with
1197 : * base-relation variables. We must do this first, since any expressions
1198 : * we may extract from the joinaliasvars lists have not been preprocessed.
1199 : * For example, if we did this after sublink processing, sublinks expanded
1200 : * out from join aliases would not get processed. But we can skip this in
1201 : * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1202 : * they can't contain any Vars of the current query level.
1203 : */
1204 858792 : if (root->hasJoinRTEs &&
1205 339044 : !(kind == EXPRKIND_RTFUNC ||
1206 169324 : kind == EXPRKIND_VALUES ||
1207 : kind == EXPRKIND_TABLESAMPLE ||
1208 : kind == EXPRKIND_TABLEFUNC))
1209 169312 : expr = flatten_join_alias_vars(root, root->parse, expr);
1210 :
1211 : /*
1212 : * Simplify constant expressions. For function RTEs, this was already
1213 : * done by preprocess_function_rtes. (But note we must do it again for
1214 : * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1215 : * un-simplified subexpressions inserted by flattening of subqueries or
1216 : * join alias variables.)
1217 : *
1218 : * Note: an essential effect of this is to convert named-argument function
1219 : * calls to positional notation and insert the current actual values of
1220 : * any default arguments for functions. To ensure that happens, we *must*
1221 : * process all expressions here. Previous PG versions sometimes skipped
1222 : * const-simplification if it didn't seem worth the trouble, but we can't
1223 : * do that anymore.
1224 : *
1225 : * Note: this also flattens nested AND and OR expressions into N-argument
1226 : * form. All processing of a qual expression after this point must be
1227 : * careful to maintain AND/OR flatness --- that is, do not generate a tree
1228 : * with AND directly under AND, nor OR directly under OR.
1229 : */
1230 858792 : if (kind != EXPRKIND_RTFUNC)
1231 823304 : expr = eval_const_expressions(root, expr);
1232 :
1233 : /*
1234 : * If it's a qual or havingQual, canonicalize it.
1235 : */
1236 855136 : if (kind == EXPRKIND_QUAL)
1237 : {
1238 292536 : expr = (Node *) canonicalize_qual((Expr *) expr, false);
1239 :
1240 : #ifdef OPTIMIZER_DEBUG
1241 : printf("After canonicalize_qual()\n");
1242 : pprint(expr);
1243 : #endif
1244 : }
1245 :
1246 : /*
1247 : * Check for ANY ScalarArrayOpExpr with Const arrays and set the
1248 : * hashfuncid of any that might execute more quickly by using hash lookups
1249 : * instead of a linear search.
1250 : */
1251 855136 : if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1252 : {
1253 788172 : convert_saop_to_hashed_saop(expr);
1254 : }
1255 :
1256 : /* Expand SubLinks to SubPlans */
1257 855136 : if (root->parse->hasSubLinks)
1258 84550 : expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1259 :
1260 : /*
1261 : * XXX do not insert anything here unless you have grokked the comments in
1262 : * SS_replace_correlation_vars ...
1263 : */
1264 :
1265 : /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1266 855136 : if (root->query_level > 1)
1267 136948 : expr = SS_replace_correlation_vars(root, expr);
1268 :
1269 : /*
1270 : * If it's a qual or havingQual, convert it to implicit-AND format. (We
1271 : * don't want to do this before eval_const_expressions, since the latter
1272 : * would be unable to simplify a top-level AND correctly. Also,
1273 : * SS_process_sublinks expects explicit-AND format.)
1274 : */
1275 855136 : if (kind == EXPRKIND_QUAL)
1276 292536 : expr = (Node *) make_ands_implicit((Expr *) expr);
1277 :
1278 855136 : return expr;
1279 : }
1280 :
1281 : /*
1282 : * preprocess_qual_conditions
1283 : * Recursively scan the query's jointree and do subquery_planner's
1284 : * preprocessing work on each qual condition found therein.
1285 : */
1286 : static void
1287 1231040 : preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
1288 : {
1289 1231040 : if (jtnode == NULL)
1290 0 : return;
1291 1231040 : if (IsA(jtnode, RangeTblRef))
1292 : {
1293 : /* nothing to do here */
1294 : }
1295 600622 : else if (IsA(jtnode, FromExpr))
1296 : {
1297 519170 : FromExpr *f = (FromExpr *) jtnode;
1298 : ListCell *l;
1299 :
1300 1079236 : foreach(l, f->fromlist)
1301 560066 : preprocess_qual_conditions(root, lfirst(l));
1302 :
1303 519170 : f->quals = preprocess_expression(root, f->quals, EXPRKIND_QUAL);
1304 : }
1305 81452 : else if (IsA(jtnode, JoinExpr))
1306 : {
1307 81452 : JoinExpr *j = (JoinExpr *) jtnode;
1308 :
1309 81452 : preprocess_qual_conditions(root, j->larg);
1310 81452 : preprocess_qual_conditions(root, j->rarg);
1311 :
1312 81452 : j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1313 : }
1314 : else
1315 0 : elog(ERROR, "unrecognized node type: %d",
1316 : (int) nodeTag(jtnode));
1317 : }
1318 :
1319 : /*
1320 : * preprocess_phv_expression
1321 : * Do preprocessing on a PlaceHolderVar expression that's been pulled up.
1322 : *
1323 : * If a LATERAL subquery references an output of another subquery, and that
1324 : * output must be wrapped in a PlaceHolderVar because of an intermediate outer
1325 : * join, then we'll push the PlaceHolderVar expression down into the subquery
1326 : * and later pull it back up during find_lateral_references, which runs after
1327 : * subquery_planner has preprocessed all the expressions that were in the
1328 : * current query level to start with. So we need to preprocess it then.
1329 : */
1330 : Expr *
1331 72 : preprocess_phv_expression(PlannerInfo *root, Expr *expr)
1332 : {
1333 72 : return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1334 : }
1335 :
1336 : /*--------------------
1337 : * grouping_planner
1338 : * Perform planning steps related to grouping, aggregation, etc.
1339 : *
1340 : * This function adds all required top-level processing to the scan/join
1341 : * Path(s) produced by query_planner.
1342 : *
1343 : * tuple_fraction is the fraction of tuples we expect will be retrieved.
1344 : * tuple_fraction is interpreted as follows:
1345 : * 0: expect all tuples to be retrieved (normal case)
1346 : * 0 < tuple_fraction < 1: expect the given fraction of tuples available
1347 : * from the plan to be retrieved
1348 : * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
1349 : * expected to be retrieved (ie, a LIMIT specification).
1350 : * setops is used for set operation subqueries to provide the subquery with
1351 : * the context in which it's being used so that Paths correctly sorted for the
1352 : * set operation can be generated. NULL when not planning a set operation
1353 : * child, or when a child of a set op that isn't interested in sorted input.
1354 : *
1355 : * Returns nothing; the useful output is in the Paths we attach to the
1356 : * (UPPERREL_FINAL, NULL) upperrel in *root. In addition,
1357 : * root->processed_tlist contains the final processed targetlist.
1358 : *
1359 : * Note that we have not done set_cheapest() on the final rel; it's convenient
1360 : * to leave this to the caller.
1361 : *--------------------
1362 : */
1363 : static void
1364 508070 : grouping_planner(PlannerInfo *root, double tuple_fraction,
1365 : SetOperationStmt *setops)
1366 : {
1367 508070 : Query *parse = root->parse;
1368 508070 : int64 offset_est = 0;
1369 508070 : int64 count_est = 0;
1370 508070 : double limit_tuples = -1.0;
1371 508070 : bool have_postponed_srfs = false;
1372 : PathTarget *final_target;
1373 : List *final_targets;
1374 : List *final_targets_contain_srfs;
1375 : bool final_target_parallel_safe;
1376 : RelOptInfo *current_rel;
1377 : RelOptInfo *final_rel;
1378 : FinalPathExtraData extra;
1379 : ListCell *lc;
1380 :
1381 : /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1382 508070 : if (parse->limitCount || parse->limitOffset)
1383 : {
1384 4714 : tuple_fraction = preprocess_limit(root, tuple_fraction,
1385 : &offset_est, &count_est);
1386 :
1387 : /*
1388 : * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1389 : * estimate the effects of using a bounded sort.
1390 : */
1391 4714 : if (count_est > 0 && offset_est >= 0)
1392 4244 : limit_tuples = (double) count_est + (double) offset_est;
1393 : }
1394 :
1395 : /* Make tuple_fraction accessible to lower-level routines */
1396 508070 : root->tuple_fraction = tuple_fraction;
1397 :
1398 508070 : if (parse->setOperations)
1399 : {
1400 : /*
1401 : * Construct Paths for set operations. The results will not need any
1402 : * work except perhaps a top-level sort and/or LIMIT. Note that any
1403 : * special work for recursive unions is the responsibility of
1404 : * plan_set_operations.
1405 : */
1406 5502 : current_rel = plan_set_operations(root);
1407 :
1408 : /*
1409 : * We should not need to call preprocess_targetlist, since we must be
1410 : * in a SELECT query node. Instead, use the processed_tlist returned
1411 : * by plan_set_operations (since this tells whether it returned any
1412 : * resjunk columns!), and transfer any sort key information from the
1413 : * original tlist.
1414 : */
1415 : Assert(parse->commandType == CMD_SELECT);
1416 :
1417 : /* for safety, copy processed_tlist instead of modifying in-place */
1418 5496 : root->processed_tlist =
1419 5496 : postprocess_setop_tlist(copyObject(root->processed_tlist),
1420 : parse->targetList);
1421 :
1422 : /* Also extract the PathTarget form of the setop result tlist */
1423 5496 : final_target = current_rel->cheapest_total_path->pathtarget;
1424 :
1425 : /* And check whether it's parallel safe */
1426 : final_target_parallel_safe =
1427 5496 : is_parallel_safe(root, (Node *) final_target->exprs);
1428 :
1429 : /* The setop result tlist couldn't contain any SRFs */
1430 : Assert(!parse->hasTargetSRFs);
1431 5496 : final_targets = final_targets_contain_srfs = NIL;
1432 :
1433 : /*
1434 : * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1435 : * checked already, but let's make sure).
1436 : */
1437 5496 : if (parse->rowMarks)
1438 0 : ereport(ERROR,
1439 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1440 : /*------
1441 : translator: %s is a SQL row locking clause such as FOR UPDATE */
1442 : errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1443 : LCS_asString(linitial_node(RowMarkClause,
1444 : parse->rowMarks)->strength))));
1445 :
1446 : /*
1447 : * Calculate pathkeys that represent result ordering requirements
1448 : */
1449 : Assert(parse->distinctClause == NIL);
1450 5496 : root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1451 : parse->sortClause,
1452 : root->processed_tlist);
1453 : }
1454 : else
1455 : {
1456 : /* No set operations, do regular planning */
1457 : PathTarget *sort_input_target;
1458 : List *sort_input_targets;
1459 : List *sort_input_targets_contain_srfs;
1460 : bool sort_input_target_parallel_safe;
1461 : PathTarget *grouping_target;
1462 : List *grouping_targets;
1463 : List *grouping_targets_contain_srfs;
1464 : bool grouping_target_parallel_safe;
1465 : PathTarget *scanjoin_target;
1466 : List *scanjoin_targets;
1467 : List *scanjoin_targets_contain_srfs;
1468 : bool scanjoin_target_parallel_safe;
1469 : bool scanjoin_target_same_exprs;
1470 : bool have_grouping;
1471 502568 : WindowFuncLists *wflists = NULL;
1472 502568 : List *activeWindows = NIL;
1473 502568 : grouping_sets_data *gset_data = NULL;
1474 : standard_qp_extra qp_extra;
1475 :
1476 : /* A recursive query should always have setOperations */
1477 : Assert(!root->hasRecursion);
1478 :
1479 : /* Preprocess grouping sets and GROUP BY clause, if any */
1480 502568 : if (parse->groupingSets)
1481 : {
1482 854 : gset_data = preprocess_grouping_sets(root);
1483 : }
1484 501714 : else if (parse->groupClause)
1485 : {
1486 : /* Preprocess regular GROUP BY clause, if any */
1487 3594 : root->processed_groupClause = preprocess_groupclause(root, NIL);
1488 : }
1489 :
1490 : /*
1491 : * Preprocess targetlist. Note that much of the remaining planning
1492 : * work will be done with the PathTarget representation of tlists, but
1493 : * we must also maintain the full representation of the final tlist so
1494 : * that we can transfer its decoration (resnames etc) to the topmost
1495 : * tlist of the finished Plan. This is kept in processed_tlist.
1496 : */
1497 502562 : preprocess_targetlist(root);
1498 :
1499 : /*
1500 : * Mark all the aggregates with resolved aggtranstypes, and detect
1501 : * aggregates that are duplicates or can share transition state. We
1502 : * must do this before slicing and dicing the tlist into various
1503 : * pathtargets, else some copies of the Aggref nodes might escape
1504 : * being marked.
1505 : */
1506 502562 : if (parse->hasAggs)
1507 : {
1508 39182 : preprocess_aggrefs(root, (Node *) root->processed_tlist);
1509 39182 : preprocess_aggrefs(root, (Node *) parse->havingQual);
1510 : }
1511 :
1512 : /*
1513 : * Locate any window functions in the tlist. (We don't need to look
1514 : * anywhere else, since expressions used in ORDER BY will be in there
1515 : * too.) Note that they could all have been eliminated by constant
1516 : * folding, in which case we don't need to do any more work.
1517 : */
1518 502562 : if (parse->hasWindowFuncs)
1519 : {
1520 2342 : wflists = find_window_functions((Node *) root->processed_tlist,
1521 2342 : list_length(parse->windowClause));
1522 2342 : if (wflists->numWindowFuncs > 0)
1523 : {
1524 : /*
1525 : * See if any modifications can be made to each WindowClause
1526 : * to allow the executor to execute the WindowFuncs more
1527 : * quickly.
1528 : */
1529 2336 : optimize_window_clauses(root, wflists);
1530 :
1531 2336 : activeWindows = select_active_windows(root, wflists);
1532 : }
1533 : else
1534 6 : parse->hasWindowFuncs = false;
1535 : }
1536 :
1537 : /*
1538 : * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1539 : * adding logic between here and the query_planner() call. Anything
1540 : * that is needed in MIN/MAX-optimizable cases will have to be
1541 : * duplicated in planagg.c.
1542 : */
1543 502562 : if (parse->hasAggs)
1544 39182 : preprocess_minmax_aggregates(root);
1545 :
1546 : /*
1547 : * Figure out whether there's a hard limit on the number of rows that
1548 : * query_planner's result subplan needs to return. Even if we know a
1549 : * hard limit overall, it doesn't apply if the query has any
1550 : * grouping/aggregation operations, or SRFs in the tlist.
1551 : */
1552 502562 : if (parse->groupClause ||
1553 498162 : parse->groupingSets ||
1554 498120 : parse->distinctClause ||
1555 495736 : parse->hasAggs ||
1556 460420 : parse->hasWindowFuncs ||
1557 458222 : parse->hasTargetSRFs ||
1558 449924 : root->hasHavingQual)
1559 52656 : root->limit_tuples = -1.0;
1560 : else
1561 449906 : root->limit_tuples = limit_tuples;
1562 :
1563 : /* Set up data needed by standard_qp_callback */
1564 502562 : qp_extra.activeWindows = activeWindows;
1565 502562 : qp_extra.gset_data = gset_data;
1566 :
1567 : /*
1568 : * If we're a subquery for a set operation, store the SetOperationStmt
1569 : * in qp_extra.
1570 : */
1571 502562 : qp_extra.setop = setops;
1572 :
1573 : /*
1574 : * Generate the best unsorted and presorted paths for the scan/join
1575 : * portion of this Query, ie the processing represented by the
1576 : * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1577 : * We also generate (in standard_qp_callback) pathkey representations
1578 : * of the query's sort clause, distinct clause, etc.
1579 : */
1580 502562 : current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1581 :
1582 : /*
1583 : * Convert the query's result tlist into PathTarget format.
1584 : *
1585 : * Note: this cannot be done before query_planner() has performed
1586 : * appendrel expansion, because that might add resjunk entries to
1587 : * root->processed_tlist. Waiting till afterwards is also helpful
1588 : * because the target width estimates can use per-Var width numbers
1589 : * that were obtained within query_planner().
1590 : */
1591 502514 : final_target = create_pathtarget(root, root->processed_tlist);
1592 : final_target_parallel_safe =
1593 502514 : is_parallel_safe(root, (Node *) final_target->exprs);
1594 :
1595 : /*
1596 : * If ORDER BY was given, consider whether we should use a post-sort
1597 : * projection, and compute the adjusted target for preceding steps if
1598 : * so.
1599 : */
1600 502514 : if (parse->sortClause)
1601 : {
1602 59726 : sort_input_target = make_sort_input_target(root,
1603 : final_target,
1604 : &have_postponed_srfs);
1605 : sort_input_target_parallel_safe =
1606 59726 : is_parallel_safe(root, (Node *) sort_input_target->exprs);
1607 : }
1608 : else
1609 : {
1610 442788 : sort_input_target = final_target;
1611 442788 : sort_input_target_parallel_safe = final_target_parallel_safe;
1612 : }
1613 :
1614 : /*
1615 : * If we have window functions to deal with, the output from any
1616 : * grouping step needs to be what the window functions want;
1617 : * otherwise, it should be sort_input_target.
1618 : */
1619 502514 : if (activeWindows)
1620 : {
1621 2336 : grouping_target = make_window_input_target(root,
1622 : final_target,
1623 : activeWindows);
1624 : grouping_target_parallel_safe =
1625 2336 : is_parallel_safe(root, (Node *) grouping_target->exprs);
1626 : }
1627 : else
1628 : {
1629 500178 : grouping_target = sort_input_target;
1630 500178 : grouping_target_parallel_safe = sort_input_target_parallel_safe;
1631 : }
1632 :
1633 : /*
1634 : * If we have grouping or aggregation to do, the topmost scan/join
1635 : * plan node must emit what the grouping step wants; otherwise, it
1636 : * should emit grouping_target.
1637 : */
1638 498114 : have_grouping = (parse->groupClause || parse->groupingSets ||
1639 1000628 : parse->hasAggs || root->hasHavingQual);
1640 502514 : if (have_grouping)
1641 : {
1642 39814 : scanjoin_target = make_group_input_target(root, final_target);
1643 : scanjoin_target_parallel_safe =
1644 39814 : is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1645 : }
1646 : else
1647 : {
1648 462700 : scanjoin_target = grouping_target;
1649 462700 : scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1650 : }
1651 :
1652 : /*
1653 : * If there are any SRFs in the targetlist, we must separate each of
1654 : * these PathTargets into SRF-computing and SRF-free targets. Replace
1655 : * each of the named targets with a SRF-free version, and remember the
1656 : * list of additional projection steps we need to add afterwards.
1657 : */
1658 502514 : if (parse->hasTargetSRFs)
1659 : {
1660 : /* final_target doesn't recompute any SRFs in sort_input_target */
1661 8740 : split_pathtarget_at_srfs(root, final_target, sort_input_target,
1662 : &final_targets,
1663 : &final_targets_contain_srfs);
1664 8740 : final_target = linitial_node(PathTarget, final_targets);
1665 : Assert(!linitial_int(final_targets_contain_srfs));
1666 : /* likewise for sort_input_target vs. grouping_target */
1667 8740 : split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1668 : &sort_input_targets,
1669 : &sort_input_targets_contain_srfs);
1670 8740 : sort_input_target = linitial_node(PathTarget, sort_input_targets);
1671 : Assert(!linitial_int(sort_input_targets_contain_srfs));
1672 : /* likewise for grouping_target vs. scanjoin_target */
1673 8740 : split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
1674 : &grouping_targets,
1675 : &grouping_targets_contain_srfs);
1676 8740 : grouping_target = linitial_node(PathTarget, grouping_targets);
1677 : Assert(!linitial_int(grouping_targets_contain_srfs));
1678 : /* scanjoin_target will not have any SRFs precomputed for it */
1679 8740 : split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1680 : &scanjoin_targets,
1681 : &scanjoin_targets_contain_srfs);
1682 8740 : scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1683 : Assert(!linitial_int(scanjoin_targets_contain_srfs));
1684 : }
1685 : else
1686 : {
1687 : /* initialize lists; for most of these, dummy values are OK */
1688 493774 : final_targets = final_targets_contain_srfs = NIL;
1689 493774 : sort_input_targets = sort_input_targets_contain_srfs = NIL;
1690 493774 : grouping_targets = grouping_targets_contain_srfs = NIL;
1691 493774 : scanjoin_targets = list_make1(scanjoin_target);
1692 493774 : scanjoin_targets_contain_srfs = NIL;
1693 : }
1694 :
1695 : /* Apply scan/join target. */
1696 502514 : scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1697 502514 : && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1698 502514 : apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1699 : scanjoin_targets_contain_srfs,
1700 : scanjoin_target_parallel_safe,
1701 : scanjoin_target_same_exprs);
1702 :
1703 : /*
1704 : * Save the various upper-rel PathTargets we just computed into
1705 : * root->upper_targets[]. The core code doesn't use this, but it
1706 : * provides a convenient place for extensions to get at the info. For
1707 : * consistency, we save all the intermediate targets, even though some
1708 : * of the corresponding upperrels might not be needed for this query.
1709 : */
1710 502514 : root->upper_targets[UPPERREL_FINAL] = final_target;
1711 502514 : root->upper_targets[UPPERREL_ORDERED] = final_target;
1712 502514 : root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1713 502514 : root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1714 502514 : root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1715 502514 : root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1716 :
1717 : /*
1718 : * If we have grouping and/or aggregation, consider ways to implement
1719 : * that. We build a new upperrel representing the output of this
1720 : * phase.
1721 : */
1722 502514 : if (have_grouping)
1723 : {
1724 39814 : current_rel = create_grouping_paths(root,
1725 : current_rel,
1726 : grouping_target,
1727 : grouping_target_parallel_safe,
1728 : gset_data);
1729 : /* Fix things up if grouping_target contains SRFs */
1730 39808 : if (parse->hasTargetSRFs)
1731 400 : adjust_paths_for_srfs(root, current_rel,
1732 : grouping_targets,
1733 : grouping_targets_contain_srfs);
1734 : }
1735 :
1736 : /*
1737 : * If we have window functions, consider ways to implement those. We
1738 : * build a new upperrel representing the output of this phase.
1739 : */
1740 502508 : if (activeWindows)
1741 : {
1742 2336 : current_rel = create_window_paths(root,
1743 : current_rel,
1744 : grouping_target,
1745 : sort_input_target,
1746 : sort_input_target_parallel_safe,
1747 : wflists,
1748 : activeWindows);
1749 : /* Fix things up if sort_input_target contains SRFs */
1750 2336 : if (parse->hasTargetSRFs)
1751 12 : adjust_paths_for_srfs(root, current_rel,
1752 : sort_input_targets,
1753 : sort_input_targets_contain_srfs);
1754 : }
1755 :
1756 : /*
1757 : * If there is a DISTINCT clause, consider ways to implement that. We
1758 : * build a new upperrel representing the output of this phase.
1759 : */
1760 502508 : if (parse->distinctClause)
1761 : {
1762 2418 : current_rel = create_distinct_paths(root,
1763 : current_rel,
1764 : sort_input_target);
1765 : }
1766 : } /* end of if (setOperations) */
1767 :
1768 : /*
1769 : * If ORDER BY was given, consider ways to implement that, and generate a
1770 : * new upperrel containing only paths that emit the correct ordering and
1771 : * project the correct final_target. We can apply the original
1772 : * limit_tuples limit in sort costing here, but only if there are no
1773 : * postponed SRFs.
1774 : */
1775 508004 : if (parse->sortClause)
1776 : {
1777 63276 : current_rel = create_ordered_paths(root,
1778 : current_rel,
1779 : final_target,
1780 : final_target_parallel_safe,
1781 : have_postponed_srfs ? -1.0 :
1782 : limit_tuples);
1783 : /* Fix things up if final_target contains SRFs */
1784 63276 : if (parse->hasTargetSRFs)
1785 196 : adjust_paths_for_srfs(root, current_rel,
1786 : final_targets,
1787 : final_targets_contain_srfs);
1788 : }
1789 :
1790 : /*
1791 : * Now we are prepared to build the final-output upperrel.
1792 : */
1793 508004 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1794 :
1795 : /*
1796 : * If the input rel is marked consider_parallel and there's nothing that's
1797 : * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1798 : * consider_parallel as well. Note that if the query has rowMarks or is
1799 : * not a SELECT, consider_parallel will be false for every relation in the
1800 : * query.
1801 : */
1802 662272 : if (current_rel->consider_parallel &&
1803 308512 : is_parallel_safe(root, parse->limitOffset) &&
1804 154244 : is_parallel_safe(root, parse->limitCount))
1805 154238 : final_rel->consider_parallel = true;
1806 :
1807 : /*
1808 : * If the current_rel belongs to a single FDW, so does the final_rel.
1809 : */
1810 508004 : final_rel->serverid = current_rel->serverid;
1811 508004 : final_rel->userid = current_rel->userid;
1812 508004 : final_rel->useridiscurrent = current_rel->useridiscurrent;
1813 508004 : final_rel->fdwroutine = current_rel->fdwroutine;
1814 :
1815 : /*
1816 : * Generate paths for the final_rel. Insert all surviving paths, with
1817 : * LockRows, Limit, and/or ModifyTable steps added if needed.
1818 : */
1819 1032458 : foreach(lc, current_rel->pathlist)
1820 : {
1821 524454 : Path *path = (Path *) lfirst(lc);
1822 :
1823 : /*
1824 : * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1825 : * (Note: we intentionally test parse->rowMarks not root->rowMarks
1826 : * here. If there are only non-locking rowmarks, they should be
1827 : * handled by the ModifyTable node instead. However, root->rowMarks
1828 : * is what goes into the LockRows node.)
1829 : */
1830 524454 : if (parse->rowMarks)
1831 : {
1832 8292 : path = (Path *) create_lockrows_path(root, final_rel, path,
1833 : root->rowMarks,
1834 : assign_special_exec_param(root));
1835 : }
1836 :
1837 : /*
1838 : * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1839 : */
1840 524454 : if (limit_needed(parse))
1841 : {
1842 5682 : path = (Path *) create_limit_path(root, final_rel, path,
1843 : parse->limitOffset,
1844 : parse->limitCount,
1845 : parse->limitOption,
1846 : offset_est, count_est);
1847 : }
1848 :
1849 : /*
1850 : * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1851 : */
1852 524454 : if (parse->commandType != CMD_SELECT)
1853 : {
1854 : Index rootRelation;
1855 90928 : List *resultRelations = NIL;
1856 90928 : List *updateColnosLists = NIL;
1857 90928 : List *withCheckOptionLists = NIL;
1858 90928 : List *returningLists = NIL;
1859 90928 : List *mergeActionLists = NIL;
1860 90928 : List *mergeJoinConditions = NIL;
1861 : List *rowMarks;
1862 :
1863 90928 : if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
1864 : {
1865 : /* Inherited UPDATE/DELETE/MERGE */
1866 2714 : RelOptInfo *top_result_rel = find_base_rel(root,
1867 : parse->resultRelation);
1868 2714 : int resultRelation = -1;
1869 :
1870 : /* Pass the root result rel forward to the executor. */
1871 2714 : rootRelation = parse->resultRelation;
1872 :
1873 : /* Add only leaf children to ModifyTable. */
1874 7846 : while ((resultRelation = bms_next_member(root->leaf_result_relids,
1875 : resultRelation)) >= 0)
1876 : {
1877 5132 : RelOptInfo *this_result_rel = find_base_rel(root,
1878 : resultRelation);
1879 :
1880 : /*
1881 : * Also exclude any leaf rels that have turned dummy since
1882 : * being added to the list, for example, by being excluded
1883 : * by constraint exclusion.
1884 : */
1885 5132 : if (IS_DUMMY_REL(this_result_rel))
1886 84 : continue;
1887 :
1888 : /* Build per-target-rel lists needed by ModifyTable */
1889 5048 : resultRelations = lappend_int(resultRelations,
1890 : resultRelation);
1891 5048 : if (parse->commandType == CMD_UPDATE)
1892 : {
1893 3570 : List *update_colnos = root->update_colnos;
1894 :
1895 3570 : if (this_result_rel != top_result_rel)
1896 : update_colnos =
1897 3570 : adjust_inherited_attnums_multilevel(root,
1898 : update_colnos,
1899 : this_result_rel->relid,
1900 : top_result_rel->relid);
1901 3570 : updateColnosLists = lappend(updateColnosLists,
1902 : update_colnos);
1903 : }
1904 5048 : if (parse->withCheckOptions)
1905 : {
1906 426 : List *withCheckOptions = parse->withCheckOptions;
1907 :
1908 426 : if (this_result_rel != top_result_rel)
1909 : withCheckOptions = (List *)
1910 426 : adjust_appendrel_attrs_multilevel(root,
1911 : (Node *) withCheckOptions,
1912 : this_result_rel,
1913 : top_result_rel);
1914 426 : withCheckOptionLists = lappend(withCheckOptionLists,
1915 : withCheckOptions);
1916 : }
1917 5048 : if (parse->returningList)
1918 : {
1919 690 : List *returningList = parse->returningList;
1920 :
1921 690 : if (this_result_rel != top_result_rel)
1922 : returningList = (List *)
1923 690 : adjust_appendrel_attrs_multilevel(root,
1924 : (Node *) returningList,
1925 : this_result_rel,
1926 : top_result_rel);
1927 690 : returningLists = lappend(returningLists,
1928 : returningList);
1929 : }
1930 5048 : if (parse->mergeActionList)
1931 : {
1932 : ListCell *l;
1933 390 : List *mergeActionList = NIL;
1934 :
1935 : /*
1936 : * Copy MergeActions and translate stuff that
1937 : * references attribute numbers.
1938 : */
1939 1344 : foreach(l, parse->mergeActionList)
1940 : {
1941 954 : MergeAction *action = lfirst(l),
1942 954 : *leaf_action = copyObject(action);
1943 :
1944 954 : leaf_action->qual =
1945 954 : adjust_appendrel_attrs_multilevel(root,
1946 : (Node *) action->qual,
1947 : this_result_rel,
1948 : top_result_rel);
1949 954 : leaf_action->targetList = (List *)
1950 954 : adjust_appendrel_attrs_multilevel(root,
1951 954 : (Node *) action->targetList,
1952 : this_result_rel,
1953 : top_result_rel);
1954 954 : if (leaf_action->commandType == CMD_UPDATE)
1955 592 : leaf_action->updateColnos =
1956 592 : adjust_inherited_attnums_multilevel(root,
1957 : action->updateColnos,
1958 : this_result_rel->relid,
1959 : top_result_rel->relid);
1960 954 : mergeActionList = lappend(mergeActionList,
1961 : leaf_action);
1962 : }
1963 :
1964 390 : mergeActionLists = lappend(mergeActionLists,
1965 : mergeActionList);
1966 : }
1967 5048 : if (parse->commandType == CMD_MERGE)
1968 : {
1969 390 : Node *mergeJoinCondition = parse->mergeJoinCondition;
1970 :
1971 390 : if (this_result_rel != top_result_rel)
1972 : mergeJoinCondition =
1973 390 : adjust_appendrel_attrs_multilevel(root,
1974 : mergeJoinCondition,
1975 : this_result_rel,
1976 : top_result_rel);
1977 390 : mergeJoinConditions = lappend(mergeJoinConditions,
1978 : mergeJoinCondition);
1979 : }
1980 : }
1981 :
1982 2714 : if (resultRelations == NIL)
1983 : {
1984 : /*
1985 : * We managed to exclude every child rel, so generate a
1986 : * dummy one-relation plan using info for the top target
1987 : * rel (even though that may not be a leaf target).
1988 : * Although it's clear that no data will be updated or
1989 : * deleted, we still need to have a ModifyTable node so
1990 : * that any statement triggers will be executed. (This
1991 : * could be cleaner if we fixed nodeModifyTable.c to allow
1992 : * zero target relations, but that probably wouldn't be a
1993 : * net win.)
1994 : */
1995 30 : resultRelations = list_make1_int(parse->resultRelation);
1996 30 : if (parse->commandType == CMD_UPDATE)
1997 30 : updateColnosLists = list_make1(root->update_colnos);
1998 30 : if (parse->withCheckOptions)
1999 0 : withCheckOptionLists = list_make1(parse->withCheckOptions);
2000 30 : if (parse->returningList)
2001 18 : returningLists = list_make1(parse->returningList);
2002 30 : if (parse->mergeActionList)
2003 0 : mergeActionLists = list_make1(parse->mergeActionList);
2004 30 : if (parse->commandType == CMD_MERGE)
2005 0 : mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2006 : }
2007 : }
2008 : else
2009 : {
2010 : /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2011 88214 : rootRelation = 0; /* there's no separate root rel */
2012 88214 : resultRelations = list_make1_int(parse->resultRelation);
2013 88214 : if (parse->commandType == CMD_UPDATE)
2014 11348 : updateColnosLists = list_make1(root->update_colnos);
2015 88214 : if (parse->withCheckOptions)
2016 908 : withCheckOptionLists = list_make1(parse->withCheckOptions);
2017 88214 : if (parse->returningList)
2018 2338 : returningLists = list_make1(parse->returningList);
2019 88214 : if (parse->mergeActionList)
2020 1602 : mergeActionLists = list_make1(parse->mergeActionList);
2021 88214 : if (parse->commandType == CMD_MERGE)
2022 1602 : mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2023 : }
2024 :
2025 : /*
2026 : * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2027 : * will have dealt with fetching non-locked marked rows, else we
2028 : * need to have ModifyTable do that.
2029 : */
2030 90928 : if (parse->rowMarks)
2031 0 : rowMarks = NIL;
2032 : else
2033 90928 : rowMarks = root->rowMarks;
2034 :
2035 : path = (Path *)
2036 90928 : create_modifytable_path(root, final_rel,
2037 : path,
2038 : parse->commandType,
2039 90928 : parse->canSetTag,
2040 90928 : parse->resultRelation,
2041 : rootRelation,
2042 90928 : root->partColsUpdated,
2043 : resultRelations,
2044 : updateColnosLists,
2045 : withCheckOptionLists,
2046 : returningLists,
2047 : rowMarks,
2048 : parse->onConflict,
2049 : mergeActionLists,
2050 : mergeJoinConditions,
2051 : assign_special_exec_param(root));
2052 : }
2053 :
2054 : /* And shove it into final_rel */
2055 524454 : add_path(final_rel, path);
2056 : }
2057 :
2058 : /*
2059 : * Generate partial paths for final_rel, too, if outer query levels might
2060 : * be able to make use of them.
2061 : */
2062 508004 : if (final_rel->consider_parallel && root->query_level > 1 &&
2063 19404 : !limit_needed(parse))
2064 : {
2065 : Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2066 19350 : foreach(lc, current_rel->partial_pathlist)
2067 : {
2068 102 : Path *partial_path = (Path *) lfirst(lc);
2069 :
2070 102 : add_partial_path(final_rel, partial_path);
2071 : }
2072 : }
2073 :
2074 508004 : extra.limit_needed = limit_needed(parse);
2075 508004 : extra.limit_tuples = limit_tuples;
2076 508004 : extra.count_est = count_est;
2077 508004 : extra.offset_est = offset_est;
2078 :
2079 : /*
2080 : * If there is an FDW that's responsible for all baserels of the query,
2081 : * let it consider adding ForeignPaths.
2082 : */
2083 508004 : if (final_rel->fdwroutine &&
2084 1248 : final_rel->fdwroutine->GetForeignUpperPaths)
2085 1180 : final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2086 : current_rel, final_rel,
2087 : &extra);
2088 :
2089 : /* Let extensions possibly add some more paths */
2090 508004 : if (create_upper_paths_hook)
2091 0 : (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2092 : current_rel, final_rel, &extra);
2093 :
2094 : /* Note: currently, we leave it to callers to do set_cheapest() */
2095 508004 : }
2096 :
2097 : /*
2098 : * Do preprocessing for groupingSets clause and related data. This handles the
2099 : * preliminary steps of expanding the grouping sets, organizing them into lists
2100 : * of rollups, and preparing annotations which will later be filled in with
2101 : * size estimates.
2102 : */
2103 : static grouping_sets_data *
2104 854 : preprocess_grouping_sets(PlannerInfo *root)
2105 : {
2106 854 : Query *parse = root->parse;
2107 : List *sets;
2108 854 : int maxref = 0;
2109 : ListCell *lc_set;
2110 854 : grouping_sets_data *gd = palloc0(sizeof(grouping_sets_data));
2111 :
2112 854 : parse->groupingSets = expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
2113 :
2114 854 : gd->any_hashable = false;
2115 854 : gd->unhashable_refs = NULL;
2116 854 : gd->unsortable_refs = NULL;
2117 854 : gd->unsortable_sets = NIL;
2118 :
2119 : /*
2120 : * We don't currently make any attempt to optimize the groupClause when
2121 : * there are grouping sets, so just duplicate it in processed_groupClause.
2122 : */
2123 854 : root->processed_groupClause = parse->groupClause;
2124 :
2125 854 : if (parse->groupClause)
2126 : {
2127 : ListCell *lc;
2128 :
2129 2576 : foreach(lc, parse->groupClause)
2130 : {
2131 1764 : SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2132 1764 : Index ref = gc->tleSortGroupRef;
2133 :
2134 1764 : if (ref > maxref)
2135 1728 : maxref = ref;
2136 :
2137 1764 : if (!gc->hashable)
2138 30 : gd->unhashable_refs = bms_add_member(gd->unhashable_refs, ref);
2139 :
2140 1764 : if (!OidIsValid(gc->sortop))
2141 42 : gd->unsortable_refs = bms_add_member(gd->unsortable_refs, ref);
2142 : }
2143 : }
2144 :
2145 : /* Allocate workspace array for remapping */
2146 854 : gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2147 :
2148 : /*
2149 : * If we have any unsortable sets, we must extract them before trying to
2150 : * prepare rollups. Unsortable sets don't go through
2151 : * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2152 : * here.
2153 : */
2154 854 : if (!bms_is_empty(gd->unsortable_refs))
2155 : {
2156 42 : List *sortable_sets = NIL;
2157 : ListCell *lc;
2158 :
2159 126 : foreach(lc, parse->groupingSets)
2160 : {
2161 90 : List *gset = (List *) lfirst(lc);
2162 :
2163 90 : if (bms_overlap_list(gd->unsortable_refs, gset))
2164 : {
2165 48 : GroupingSetData *gs = makeNode(GroupingSetData);
2166 :
2167 48 : gs->set = gset;
2168 48 : gd->unsortable_sets = lappend(gd->unsortable_sets, gs);
2169 :
2170 : /*
2171 : * We must enforce here that an unsortable set is hashable;
2172 : * later code assumes this. Parse analysis only checks that
2173 : * every individual column is either hashable or sortable.
2174 : *
2175 : * Note that passing this test doesn't guarantee we can
2176 : * generate a plan; there might be other showstoppers.
2177 : */
2178 48 : if (bms_overlap_list(gd->unhashable_refs, gset))
2179 6 : ereport(ERROR,
2180 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2181 : errmsg("could not implement GROUP BY"),
2182 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2183 : }
2184 : else
2185 42 : sortable_sets = lappend(sortable_sets, gset);
2186 : }
2187 :
2188 36 : if (sortable_sets)
2189 30 : sets = extract_rollup_sets(sortable_sets);
2190 : else
2191 6 : sets = NIL;
2192 : }
2193 : else
2194 812 : sets = extract_rollup_sets(parse->groupingSets);
2195 :
2196 2202 : foreach(lc_set, sets)
2197 : {
2198 1354 : List *current_sets = (List *) lfirst(lc_set);
2199 1354 : RollupData *rollup = makeNode(RollupData);
2200 : GroupingSetData *gs;
2201 :
2202 : /*
2203 : * Reorder the current list of grouping sets into correct prefix
2204 : * order. If only one aggregation pass is needed, try to make the
2205 : * list match the ORDER BY clause; if more than one pass is needed, we
2206 : * don't bother with that.
2207 : *
2208 : * Note that this reorders the sets from smallest-member-first to
2209 : * largest-member-first, and applies the GroupingSetData annotations,
2210 : * though the data will be filled in later.
2211 : */
2212 1354 : current_sets = reorder_grouping_sets(current_sets,
2213 1354 : (list_length(sets) == 1
2214 : ? parse->sortClause
2215 : : NIL));
2216 :
2217 : /*
2218 : * Get the initial (and therefore largest) grouping set.
2219 : */
2220 1354 : gs = linitial_node(GroupingSetData, current_sets);
2221 :
2222 : /*
2223 : * Order the groupClause appropriately. If the first grouping set is
2224 : * empty, then the groupClause must also be empty; otherwise we have
2225 : * to force the groupClause to match that grouping set's order.
2226 : *
2227 : * (The first grouping set can be empty even though parse->groupClause
2228 : * is not empty only if all non-empty grouping sets are unsortable.
2229 : * The groupClauses for hashed grouping sets are built later on.)
2230 : */
2231 1354 : if (gs->set)
2232 1312 : rollup->groupClause = preprocess_groupclause(root, gs->set);
2233 : else
2234 42 : rollup->groupClause = NIL;
2235 :
2236 : /*
2237 : * Is it hashable? We pretend empty sets are hashable even though we
2238 : * actually force them not to be hashed later. But don't bother if
2239 : * there's nothing but empty sets (since in that case we can't hash
2240 : * anything).
2241 : */
2242 1354 : if (gs->set &&
2243 1312 : !bms_overlap_list(gd->unhashable_refs, gs->set))
2244 : {
2245 1288 : rollup->hashable = true;
2246 1288 : gd->any_hashable = true;
2247 : }
2248 :
2249 : /*
2250 : * Now that we've pinned down an order for the groupClause for this
2251 : * list of grouping sets, we need to remap the entries in the grouping
2252 : * sets from sortgrouprefs to plain indices (0-based) into the
2253 : * groupClause for this collection of grouping sets. We keep the
2254 : * original form for later use, though.
2255 : */
2256 1354 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2257 : current_sets,
2258 : gd->tleref_to_colnum_map);
2259 1354 : rollup->gsets_data = current_sets;
2260 :
2261 1354 : gd->rollups = lappend(gd->rollups, rollup);
2262 : }
2263 :
2264 848 : if (gd->unsortable_sets)
2265 : {
2266 : /*
2267 : * We have not yet pinned down a groupclause for this, but we will
2268 : * need index-based lists for estimation purposes. Construct
2269 : * hash_sets_idx based on the entire original groupclause for now.
2270 : */
2271 36 : gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2272 : gd->unsortable_sets,
2273 : gd->tleref_to_colnum_map);
2274 36 : gd->any_hashable = true;
2275 : }
2276 :
2277 848 : return gd;
2278 : }
2279 :
2280 : /*
2281 : * Given a groupclause and a list of GroupingSetData, return equivalent sets
2282 : * (without annotation) mapped to indexes into the given groupclause.
2283 : */
2284 : static List *
2285 3972 : remap_to_groupclause_idx(List *groupClause,
2286 : List *gsets,
2287 : int *tleref_to_colnum_map)
2288 : {
2289 3972 : int ref = 0;
2290 3972 : List *result = NIL;
2291 : ListCell *lc;
2292 :
2293 9808 : foreach(lc, groupClause)
2294 : {
2295 5836 : SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2296 :
2297 5836 : tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2298 : }
2299 :
2300 9246 : foreach(lc, gsets)
2301 : {
2302 5274 : List *set = NIL;
2303 : ListCell *lc2;
2304 5274 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
2305 :
2306 11948 : foreach(lc2, gs->set)
2307 : {
2308 6674 : set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2309 : }
2310 :
2311 5274 : result = lappend(result, set);
2312 : }
2313 :
2314 3972 : return result;
2315 : }
2316 :
2317 :
2318 : /*
2319 : * preprocess_rowmarks - set up PlanRowMarks if needed
2320 : */
2321 : static void
2322 511726 : preprocess_rowmarks(PlannerInfo *root)
2323 : {
2324 511726 : Query *parse = root->parse;
2325 : Bitmapset *rels;
2326 : List *prowmarks;
2327 : ListCell *l;
2328 : int i;
2329 :
2330 511726 : if (parse->rowMarks)
2331 : {
2332 : /*
2333 : * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2334 : * grouping, since grouping renders a reference to individual tuple
2335 : * CTIDs invalid. This is also checked at parse time, but that's
2336 : * insufficient because of rule substitution, query pullup, etc.
2337 : */
2338 7814 : CheckSelectLocking(parse, linitial_node(RowMarkClause,
2339 : parse->rowMarks)->strength);
2340 : }
2341 : else
2342 : {
2343 : /*
2344 : * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
2345 : * UPDATE/SHARE.
2346 : */
2347 503912 : if (parse->commandType != CMD_UPDATE &&
2348 490598 : parse->commandType != CMD_DELETE &&
2349 486224 : parse->commandType != CMD_MERGE)
2350 484466 : return;
2351 : }
2352 :
2353 : /*
2354 : * We need to have rowmarks for all base relations except the target. We
2355 : * make a bitmapset of all base rels and then remove the items we don't
2356 : * need or have FOR [KEY] UPDATE/SHARE marks for.
2357 : */
2358 27260 : rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
2359 27260 : if (parse->resultRelation)
2360 19446 : rels = bms_del_member(rels, parse->resultRelation);
2361 :
2362 : /*
2363 : * Convert RowMarkClauses to PlanRowMark representation.
2364 : */
2365 27260 : prowmarks = NIL;
2366 35330 : foreach(l, parse->rowMarks)
2367 : {
2368 8070 : RowMarkClause *rc = lfirst_node(RowMarkClause, l);
2369 8070 : RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2370 : PlanRowMark *newrc;
2371 :
2372 : /*
2373 : * Currently, it is syntactically impossible to have FOR UPDATE et al
2374 : * applied to an update/delete target rel. If that ever becomes
2375 : * possible, we should drop the target from the PlanRowMark list.
2376 : */
2377 : Assert(rc->rti != parse->resultRelation);
2378 :
2379 : /*
2380 : * Ignore RowMarkClauses for subqueries; they aren't real tables and
2381 : * can't support true locking. Subqueries that got flattened into the
2382 : * main query should be ignored completely. Any that didn't will get
2383 : * ROW_MARK_COPY items in the next loop.
2384 : */
2385 8070 : if (rte->rtekind != RTE_RELATION)
2386 108 : continue;
2387 :
2388 7962 : rels = bms_del_member(rels, rc->rti);
2389 :
2390 7962 : newrc = makeNode(PlanRowMark);
2391 7962 : newrc->rti = newrc->prti = rc->rti;
2392 7962 : newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2393 7962 : newrc->markType = select_rowmark_type(rte, rc->strength);
2394 7962 : newrc->allMarkTypes = (1 << newrc->markType);
2395 7962 : newrc->strength = rc->strength;
2396 7962 : newrc->waitPolicy = rc->waitPolicy;
2397 7962 : newrc->isParent = false;
2398 :
2399 7962 : prowmarks = lappend(prowmarks, newrc);
2400 : }
2401 :
2402 : /*
2403 : * Now, add rowmarks for any non-target, non-locked base relations.
2404 : */
2405 27260 : i = 0;
2406 65336 : foreach(l, parse->rtable)
2407 : {
2408 38076 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
2409 : PlanRowMark *newrc;
2410 :
2411 38076 : i++;
2412 38076 : if (!bms_is_member(i, rels))
2413 34526 : continue;
2414 :
2415 3550 : newrc = makeNode(PlanRowMark);
2416 3550 : newrc->rti = newrc->prti = i;
2417 3550 : newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2418 3550 : newrc->markType = select_rowmark_type(rte, LCS_NONE);
2419 3550 : newrc->allMarkTypes = (1 << newrc->markType);
2420 3550 : newrc->strength = LCS_NONE;
2421 3550 : newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2422 3550 : newrc->isParent = false;
2423 :
2424 3550 : prowmarks = lappend(prowmarks, newrc);
2425 : }
2426 :
2427 27260 : root->rowMarks = prowmarks;
2428 : }
2429 :
2430 : /*
2431 : * Select RowMarkType to use for a given table
2432 : */
2433 : RowMarkType
2434 13868 : select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
2435 : {
2436 13868 : if (rte->rtekind != RTE_RELATION)
2437 : {
2438 : /* If it's not a table at all, use ROW_MARK_COPY */
2439 1384 : return ROW_MARK_COPY;
2440 : }
2441 12484 : else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2442 : {
2443 : /* Let the FDW select the rowmark type, if it wants to */
2444 200 : FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2445 :
2446 200 : if (fdwroutine->GetForeignRowMarkType != NULL)
2447 0 : return fdwroutine->GetForeignRowMarkType(rte, strength);
2448 : /* Otherwise, use ROW_MARK_COPY by default */
2449 200 : return ROW_MARK_COPY;
2450 : }
2451 : else
2452 : {
2453 : /* Regular table, apply the appropriate lock type */
2454 12284 : switch (strength)
2455 : {
2456 2396 : case LCS_NONE:
2457 :
2458 : /*
2459 : * We don't need a tuple lock, only the ability to re-fetch
2460 : * the row.
2461 : */
2462 2396 : return ROW_MARK_REFERENCE;
2463 : break;
2464 8014 : case LCS_FORKEYSHARE:
2465 8014 : return ROW_MARK_KEYSHARE;
2466 : break;
2467 300 : case LCS_FORSHARE:
2468 300 : return ROW_MARK_SHARE;
2469 : break;
2470 72 : case LCS_FORNOKEYUPDATE:
2471 72 : return ROW_MARK_NOKEYEXCLUSIVE;
2472 : break;
2473 1502 : case LCS_FORUPDATE:
2474 1502 : return ROW_MARK_EXCLUSIVE;
2475 : break;
2476 : }
2477 0 : elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2478 : return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2479 : }
2480 : }
2481 :
2482 : /*
2483 : * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
2484 : *
2485 : * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
2486 : * results back in *count_est and *offset_est. These variables are set to
2487 : * 0 if the corresponding clause is not present, and -1 if it's present
2488 : * but we couldn't estimate the value for it. (The "0" convention is OK
2489 : * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
2490 : * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
2491 : * usual practice of never estimating less than one row.) These values will
2492 : * be passed to create_limit_path, which see if you change this code.
2493 : *
2494 : * The return value is the suitably adjusted tuple_fraction to use for
2495 : * planning the query. This adjustment is not overridable, since it reflects
2496 : * plan actions that grouping_planner() will certainly take, not assumptions
2497 : * about context.
2498 : */
2499 : static double
2500 4714 : preprocess_limit(PlannerInfo *root, double tuple_fraction,
2501 : int64 *offset_est, int64 *count_est)
2502 : {
2503 4714 : Query *parse = root->parse;
2504 : Node *est;
2505 : double limit_fraction;
2506 :
2507 : /* Should not be called unless LIMIT or OFFSET */
2508 : Assert(parse->limitCount || parse->limitOffset);
2509 :
2510 : /*
2511 : * Try to obtain the clause values. We use estimate_expression_value
2512 : * primarily because it can sometimes do something useful with Params.
2513 : */
2514 4714 : if (parse->limitCount)
2515 : {
2516 4268 : est = estimate_expression_value(root, parse->limitCount);
2517 4268 : if (est && IsA(est, Const))
2518 : {
2519 4262 : if (((Const *) est)->constisnull)
2520 : {
2521 : /* NULL indicates LIMIT ALL, ie, no limit */
2522 0 : *count_est = 0; /* treat as not present */
2523 : }
2524 : else
2525 : {
2526 4262 : *count_est = DatumGetInt64(((Const *) est)->constvalue);
2527 4262 : if (*count_est <= 0)
2528 150 : *count_est = 1; /* force to at least 1 */
2529 : }
2530 : }
2531 : else
2532 6 : *count_est = -1; /* can't estimate */
2533 : }
2534 : else
2535 446 : *count_est = 0; /* not present */
2536 :
2537 4714 : if (parse->limitOffset)
2538 : {
2539 806 : est = estimate_expression_value(root, parse->limitOffset);
2540 806 : if (est && IsA(est, Const))
2541 : {
2542 782 : if (((Const *) est)->constisnull)
2543 : {
2544 : /* Treat NULL as no offset; the executor will too */
2545 0 : *offset_est = 0; /* treat as not present */
2546 : }
2547 : else
2548 : {
2549 782 : *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2550 782 : if (*offset_est < 0)
2551 0 : *offset_est = 0; /* treat as not present */
2552 : }
2553 : }
2554 : else
2555 24 : *offset_est = -1; /* can't estimate */
2556 : }
2557 : else
2558 3908 : *offset_est = 0; /* not present */
2559 :
2560 4714 : if (*count_est != 0)
2561 : {
2562 : /*
2563 : * A LIMIT clause limits the absolute number of tuples returned.
2564 : * However, if it's not a constant LIMIT then we have to guess; for
2565 : * lack of a better idea, assume 10% of the plan's result is wanted.
2566 : */
2567 4268 : if (*count_est < 0 || *offset_est < 0)
2568 : {
2569 : /* LIMIT or OFFSET is an expression ... punt ... */
2570 24 : limit_fraction = 0.10;
2571 : }
2572 : else
2573 : {
2574 : /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2575 4244 : limit_fraction = (double) *count_est + (double) *offset_est;
2576 : }
2577 :
2578 : /*
2579 : * If we have absolute limits from both caller and LIMIT, use the
2580 : * smaller value; likewise if they are both fractional. If one is
2581 : * fractional and the other absolute, we can't easily determine which
2582 : * is smaller, but we use the heuristic that the absolute will usually
2583 : * be smaller.
2584 : */
2585 4268 : if (tuple_fraction >= 1.0)
2586 : {
2587 6 : if (limit_fraction >= 1.0)
2588 : {
2589 : /* both absolute */
2590 6 : tuple_fraction = Min(tuple_fraction, limit_fraction);
2591 : }
2592 : else
2593 : {
2594 : /* caller absolute, limit fractional; use caller's value */
2595 : }
2596 : }
2597 4262 : else if (tuple_fraction > 0.0)
2598 : {
2599 146 : if (limit_fraction >= 1.0)
2600 : {
2601 : /* caller fractional, limit absolute; use limit */
2602 146 : tuple_fraction = limit_fraction;
2603 : }
2604 : else
2605 : {
2606 : /* both fractional */
2607 0 : tuple_fraction = Min(tuple_fraction, limit_fraction);
2608 : }
2609 : }
2610 : else
2611 : {
2612 : /* no info from caller, just use limit */
2613 4116 : tuple_fraction = limit_fraction;
2614 : }
2615 : }
2616 446 : else if (*offset_est != 0 && tuple_fraction > 0.0)
2617 : {
2618 : /*
2619 : * We have an OFFSET but no LIMIT. This acts entirely differently
2620 : * from the LIMIT case: here, we need to increase rather than decrease
2621 : * the caller's tuple_fraction, because the OFFSET acts to cause more
2622 : * tuples to be fetched instead of fewer. This only matters if we got
2623 : * a tuple_fraction > 0, however.
2624 : *
2625 : * As above, use 10% if OFFSET is present but unestimatable.
2626 : */
2627 12 : if (*offset_est < 0)
2628 0 : limit_fraction = 0.10;
2629 : else
2630 12 : limit_fraction = (double) *offset_est;
2631 :
2632 : /*
2633 : * If we have absolute counts from both caller and OFFSET, add them
2634 : * together; likewise if they are both fractional. If one is
2635 : * fractional and the other absolute, we want to take the larger, and
2636 : * we heuristically assume that's the fractional one.
2637 : */
2638 12 : if (tuple_fraction >= 1.0)
2639 : {
2640 0 : if (limit_fraction >= 1.0)
2641 : {
2642 : /* both absolute, so add them together */
2643 0 : tuple_fraction += limit_fraction;
2644 : }
2645 : else
2646 : {
2647 : /* caller absolute, limit fractional; use limit */
2648 0 : tuple_fraction = limit_fraction;
2649 : }
2650 : }
2651 : else
2652 : {
2653 12 : if (limit_fraction >= 1.0)
2654 : {
2655 : /* caller fractional, limit absolute; use caller's value */
2656 : }
2657 : else
2658 : {
2659 : /* both fractional, so add them together */
2660 0 : tuple_fraction += limit_fraction;
2661 0 : if (tuple_fraction >= 1.0)
2662 0 : tuple_fraction = 0.0; /* assume fetch all */
2663 : }
2664 : }
2665 : }
2666 :
2667 4714 : return tuple_fraction;
2668 : }
2669 :
2670 : /*
2671 : * limit_needed - do we actually need a Limit plan node?
2672 : *
2673 : * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
2674 : * a Limit node. This is worth checking for because "OFFSET 0" is a common
2675 : * locution for an optimization fence. (Because other places in the planner
2676 : * merely check whether parse->limitOffset isn't NULL, it will still work as
2677 : * an optimization fence --- we're just suppressing unnecessary run-time
2678 : * overhead.)
2679 : *
2680 : * This might look like it could be merged into preprocess_limit, but there's
2681 : * a key distinction: here we need hard constants in OFFSET/LIMIT, whereas
2682 : * in preprocess_limit it's good enough to consider estimated values.
2683 : */
2684 : bool
2685 1058502 : limit_needed(Query *parse)
2686 : {
2687 : Node *node;
2688 :
2689 1058502 : node = parse->limitCount;
2690 1058502 : if (node)
2691 : {
2692 10228 : if (IsA(node, Const))
2693 : {
2694 : /* NULL indicates LIMIT ALL, ie, no limit */
2695 10034 : if (!((Const *) node)->constisnull)
2696 10034 : return true; /* LIMIT with a constant value */
2697 : }
2698 : else
2699 194 : return true; /* non-constant LIMIT */
2700 : }
2701 :
2702 1048274 : node = parse->limitOffset;
2703 1048274 : if (node)
2704 : {
2705 1310 : if (IsA(node, Const))
2706 : {
2707 : /* Treat NULL as no offset; the executor would too */
2708 1044 : if (!((Const *) node)->constisnull)
2709 : {
2710 1044 : int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2711 :
2712 1044 : if (offset != 0)
2713 84 : return true; /* OFFSET with a nonzero value */
2714 : }
2715 : }
2716 : else
2717 266 : return true; /* non-constant OFFSET */
2718 : }
2719 :
2720 1047924 : return false; /* don't need a Limit plan node */
2721 : }
2722 :
2723 : /*
2724 : * preprocess_groupclause - do preparatory work on GROUP BY clause
2725 : *
2726 : * The idea here is to adjust the ordering of the GROUP BY elements
2727 : * (which in itself is semantically insignificant) to match ORDER BY,
2728 : * thereby allowing a single sort operation to both implement the ORDER BY
2729 : * requirement and set up for a Unique step that implements GROUP BY.
2730 : * We also consider partial match between GROUP BY and ORDER BY elements,
2731 : * which could allow to implement ORDER BY using the incremental sort.
2732 : *
2733 : * We also consider other orderings of the GROUP BY elements, which could
2734 : * match the sort ordering of other possible plans (eg an indexscan) and
2735 : * thereby reduce cost. This is implemented during the generation of grouping
2736 : * paths. See get_useful_group_keys_orderings() for details.
2737 : *
2738 : * Note: we need no comparable processing of the distinctClause because
2739 : * the parser already enforced that that matches ORDER BY.
2740 : *
2741 : * Note: we return a fresh List, but its elements are the same
2742 : * SortGroupClauses appearing in parse->groupClause. This is important
2743 : * because later processing may modify the processed_groupClause list.
2744 : *
2745 : * For grouping sets, the order of items is instead forced to agree with that
2746 : * of the grouping set (and items not in the grouping set are skipped). The
2747 : * work of sorting the order of grouping set elements to match the ORDER BY if
2748 : * possible is done elsewhere.
2749 : */
2750 : static List *
2751 7488 : preprocess_groupclause(PlannerInfo *root, List *force)
2752 : {
2753 7488 : Query *parse = root->parse;
2754 7488 : List *new_groupclause = NIL;
2755 : ListCell *sl;
2756 : ListCell *gl;
2757 :
2758 : /* For grouping sets, we need to force the ordering */
2759 7488 : if (force)
2760 : {
2761 9652 : foreach(sl, force)
2762 : {
2763 5758 : Index ref = lfirst_int(sl);
2764 5758 : SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
2765 :
2766 5758 : new_groupclause = lappend(new_groupclause, cl);
2767 : }
2768 :
2769 3894 : return new_groupclause;
2770 : }
2771 :
2772 : /* If no ORDER BY, nothing useful to do here */
2773 3594 : if (parse->sortClause == NIL)
2774 2014 : return list_copy(parse->groupClause);
2775 :
2776 : /*
2777 : * Scan the ORDER BY clause and construct a list of matching GROUP BY
2778 : * items, but only as far as we can make a matching prefix.
2779 : *
2780 : * This code assumes that the sortClause contains no duplicate items.
2781 : */
2782 3066 : foreach(sl, parse->sortClause)
2783 : {
2784 2132 : SortGroupClause *sc = lfirst_node(SortGroupClause, sl);
2785 :
2786 3228 : foreach(gl, parse->groupClause)
2787 : {
2788 2582 : SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
2789 :
2790 2582 : if (equal(gc, sc))
2791 : {
2792 1486 : new_groupclause = lappend(new_groupclause, gc);
2793 1486 : break;
2794 : }
2795 : }
2796 2132 : if (gl == NULL)
2797 646 : break; /* no match, so stop scanning */
2798 : }
2799 :
2800 :
2801 : /* If no match at all, no point in reordering GROUP BY */
2802 1580 : if (new_groupclause == NIL)
2803 298 : return list_copy(parse->groupClause);
2804 :
2805 : /*
2806 : * Add any remaining GROUP BY items to the new list. We don't require a
2807 : * complete match, because even partial match allows ORDER BY to be
2808 : * implemented using incremental sort. Also, give up if there are any
2809 : * non-sortable GROUP BY items, since then there's no hope anyway.
2810 : */
2811 2934 : foreach(gl, parse->groupClause)
2812 : {
2813 1652 : SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
2814 :
2815 1652 : if (list_member_ptr(new_groupclause, gc))
2816 1486 : continue; /* it matched an ORDER BY item */
2817 166 : if (!OidIsValid(gc->sortop)) /* give up, GROUP BY can't be sorted */
2818 0 : return list_copy(parse->groupClause);
2819 166 : new_groupclause = lappend(new_groupclause, gc);
2820 : }
2821 :
2822 : /* Success --- install the rearranged GROUP BY list */
2823 : Assert(list_length(parse->groupClause) == list_length(new_groupclause));
2824 1282 : return new_groupclause;
2825 : }
2826 :
2827 : /*
2828 : * Extract lists of grouping sets that can be implemented using a single
2829 : * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
2830 : *
2831 : * Input must be sorted with smallest sets first. Result has each sublist
2832 : * sorted with smallest sets first.
2833 : *
2834 : * We want to produce the absolute minimum possible number of lists here to
2835 : * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
2836 : * of finding the minimal partition of a partially-ordered set into chains
2837 : * (which is what we need, taking the list of grouping sets as a poset ordered
2838 : * by set inclusion) can be mapped to the problem of finding the maximum
2839 : * cardinality matching on a bipartite graph, which is solvable in polynomial
2840 : * time with a worst case of no worse than O(n^2.5) and usually much
2841 : * better. Since our N is at most 4096, we don't need to consider fallbacks to
2842 : * heuristic or approximate methods. (Planning time for a 12-d cube is under
2843 : * half a second on my modest system even with optimization off and assertions
2844 : * on.)
2845 : */
2846 : static List *
2847 842 : extract_rollup_sets(List *groupingSets)
2848 : {
2849 842 : int num_sets_raw = list_length(groupingSets);
2850 842 : int num_empty = 0;
2851 842 : int num_sets = 0; /* distinct sets */
2852 842 : int num_chains = 0;
2853 842 : List *result = NIL;
2854 : List **results;
2855 : List **orig_sets;
2856 : Bitmapset **set_masks;
2857 : int *chains;
2858 : short **adjacency;
2859 : short *adjacency_buf;
2860 : BipartiteMatchState *state;
2861 : int i;
2862 : int j;
2863 : int j_size;
2864 842 : ListCell *lc1 = list_head(groupingSets);
2865 : ListCell *lc;
2866 :
2867 : /*
2868 : * Start by stripping out empty sets. The algorithm doesn't require this,
2869 : * but the planner currently needs all empty sets to be returned in the
2870 : * first list, so we strip them here and add them back after.
2871 : */
2872 1452 : while (lc1 && lfirst(lc1) == NIL)
2873 : {
2874 610 : ++num_empty;
2875 610 : lc1 = lnext(groupingSets, lc1);
2876 : }
2877 :
2878 : /* bail out now if it turns out that all we had were empty sets. */
2879 842 : if (!lc1)
2880 42 : return list_make1(groupingSets);
2881 :
2882 : /*----------
2883 : * We don't strictly need to remove duplicate sets here, but if we don't,
2884 : * they tend to become scattered through the result, which is a bit
2885 : * confusing (and irritating if we ever decide to optimize them out).
2886 : * So we remove them here and add them back after.
2887 : *
2888 : * For each non-duplicate set, we fill in the following:
2889 : *
2890 : * orig_sets[i] = list of the original set lists
2891 : * set_masks[i] = bitmapset for testing inclusion
2892 : * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
2893 : *
2894 : * chains[i] will be the result group this set is assigned to.
2895 : *
2896 : * We index all of these from 1 rather than 0 because it is convenient
2897 : * to leave 0 free for the NIL node in the graph algorithm.
2898 : *----------
2899 : */
2900 800 : orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
2901 800 : set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
2902 800 : adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
2903 800 : adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
2904 :
2905 800 : j_size = 0;
2906 800 : j = 0;
2907 800 : i = 1;
2908 :
2909 2840 : for_each_cell(lc, groupingSets, lc1)
2910 : {
2911 2040 : List *candidate = (List *) lfirst(lc);
2912 2040 : Bitmapset *candidate_set = NULL;
2913 : ListCell *lc2;
2914 2040 : int dup_of = 0;
2915 :
2916 4974 : foreach(lc2, candidate)
2917 : {
2918 2934 : candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
2919 : }
2920 :
2921 : /* we can only be a dup if we're the same length as a previous set */
2922 2040 : if (j_size == list_length(candidate))
2923 : {
2924 : int k;
2925 :
2926 1712 : for (k = j; k < i; ++k)
2927 : {
2928 1104 : if (bms_equal(set_masks[k], candidate_set))
2929 : {
2930 158 : dup_of = k;
2931 158 : break;
2932 : }
2933 : }
2934 : }
2935 1274 : else if (j_size < list_length(candidate))
2936 : {
2937 1274 : j_size = list_length(candidate);
2938 1274 : j = i;
2939 : }
2940 :
2941 2040 : if (dup_of > 0)
2942 : {
2943 158 : orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
2944 158 : bms_free(candidate_set);
2945 : }
2946 : else
2947 : {
2948 : int k;
2949 1882 : int n_adj = 0;
2950 :
2951 1882 : orig_sets[i] = list_make1(candidate);
2952 1882 : set_masks[i] = candidate_set;
2953 :
2954 : /* fill in adjacency list; no need to compare equal-size sets */
2955 :
2956 3154 : for (k = j - 1; k > 0; --k)
2957 : {
2958 1272 : if (bms_is_subset(set_masks[k], candidate_set))
2959 1110 : adjacency_buf[++n_adj] = k;
2960 : }
2961 :
2962 1882 : if (n_adj > 0)
2963 : {
2964 598 : adjacency_buf[0] = n_adj;
2965 598 : adjacency[i] = palloc((n_adj + 1) * sizeof(short));
2966 598 : memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
2967 : }
2968 : else
2969 1284 : adjacency[i] = NULL;
2970 :
2971 1882 : ++i;
2972 : }
2973 : }
2974 :
2975 800 : num_sets = i - 1;
2976 :
2977 : /*
2978 : * Apply the graph matching algorithm to do the work.
2979 : */
2980 800 : state = BipartiteMatch(num_sets, num_sets, adjacency);
2981 :
2982 : /*
2983 : * Now, the state->pair* fields have the info we need to assign sets to
2984 : * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
2985 : * pair_vu[v] = u (both will be true, but we check both so that we can do
2986 : * it in one pass)
2987 : */
2988 800 : chains = palloc0((num_sets + 1) * sizeof(int));
2989 :
2990 2682 : for (i = 1; i <= num_sets; ++i)
2991 : {
2992 1882 : int u = state->pair_vu[i];
2993 1882 : int v = state->pair_uv[i];
2994 :
2995 1882 : if (u > 0 && u < i)
2996 0 : chains[i] = chains[u];
2997 1882 : else if (v > 0 && v < i)
2998 570 : chains[i] = chains[v];
2999 : else
3000 1312 : chains[i] = ++num_chains;
3001 : }
3002 :
3003 : /* build result lists. */
3004 800 : results = palloc0((num_chains + 1) * sizeof(List *));
3005 :
3006 2682 : for (i = 1; i <= num_sets; ++i)
3007 : {
3008 1882 : int c = chains[i];
3009 :
3010 : Assert(c > 0);
3011 :
3012 1882 : results[c] = list_concat(results[c], orig_sets[i]);
3013 : }
3014 :
3015 : /* push any empty sets back on the first list. */
3016 1320 : while (num_empty-- > 0)
3017 520 : results[1] = lcons(NIL, results[1]);
3018 :
3019 : /* make result list */
3020 2112 : for (i = 1; i <= num_chains; ++i)
3021 1312 : result = lappend(result, results[i]);
3022 :
3023 : /*
3024 : * Free all the things.
3025 : *
3026 : * (This is over-fussy for small sets but for large sets we could have
3027 : * tied up a nontrivial amount of memory.)
3028 : */
3029 800 : BipartiteMatchFree(state);
3030 800 : pfree(results);
3031 800 : pfree(chains);
3032 2682 : for (i = 1; i <= num_sets; ++i)
3033 1882 : if (adjacency[i])
3034 598 : pfree(adjacency[i]);
3035 800 : pfree(adjacency);
3036 800 : pfree(adjacency_buf);
3037 800 : pfree(orig_sets);
3038 2682 : for (i = 1; i <= num_sets; ++i)
3039 1882 : bms_free(set_masks[i]);
3040 800 : pfree(set_masks);
3041 :
3042 800 : return result;
3043 : }
3044 :
3045 : /*
3046 : * Reorder the elements of a list of grouping sets such that they have correct
3047 : * prefix relationships. Also inserts the GroupingSetData annotations.
3048 : *
3049 : * The input must be ordered with smallest sets first; the result is returned
3050 : * with largest sets first. Note that the result shares no list substructure
3051 : * with the input, so it's safe for the caller to modify it later.
3052 : *
3053 : * If we're passed in a sortclause, we follow its order of columns to the
3054 : * extent possible, to minimize the chance that we add unnecessary sorts.
3055 : * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
3056 : * gets implemented in one pass.)
3057 : */
3058 : static List *
3059 1354 : reorder_grouping_sets(List *groupingSets, List *sortclause)
3060 : {
3061 : ListCell *lc;
3062 1354 : List *previous = NIL;
3063 1354 : List *result = NIL;
3064 :
3065 4004 : foreach(lc, groupingSets)
3066 : {
3067 2650 : List *candidate = (List *) lfirst(lc);
3068 2650 : List *new_elems = list_difference_int(candidate, previous);
3069 2650 : GroupingSetData *gs = makeNode(GroupingSetData);
3070 :
3071 2814 : while (list_length(sortclause) > list_length(previous) &&
3072 : new_elems != NIL)
3073 : {
3074 272 : SortGroupClause *sc = list_nth(sortclause, list_length(previous));
3075 272 : int ref = sc->tleSortGroupRef;
3076 :
3077 272 : if (list_member_int(new_elems, ref))
3078 : {
3079 164 : previous = lappend_int(previous, ref);
3080 164 : new_elems = list_delete_int(new_elems, ref);
3081 : }
3082 : else
3083 : {
3084 : /* diverged from the sortclause; give up on it */
3085 108 : sortclause = NIL;
3086 108 : break;
3087 : }
3088 : }
3089 :
3090 2650 : previous = list_concat(previous, new_elems);
3091 :
3092 2650 : gs->set = list_copy(previous);
3093 2650 : result = lcons(gs, result);
3094 : }
3095 :
3096 1354 : list_free(previous);
3097 :
3098 1354 : return result;
3099 : }
3100 :
3101 : /*
3102 : * has_volatile_pathkey
3103 : * Returns true if any PathKey in 'keys' has an EquivalenceClass
3104 : * containing a volatile function. Otherwise returns false.
3105 : */
3106 : static bool
3107 2246 : has_volatile_pathkey(List *keys)
3108 : {
3109 : ListCell *lc;
3110 :
3111 4648 : foreach(lc, keys)
3112 : {
3113 2420 : PathKey *pathkey = lfirst_node(PathKey, lc);
3114 :
3115 2420 : if (pathkey->pk_eclass->ec_has_volatile)
3116 18 : return true;
3117 : }
3118 :
3119 2228 : return false;
3120 : }
3121 :
3122 : /*
3123 : * adjust_group_pathkeys_for_groupagg
3124 : * Add pathkeys to root->group_pathkeys to reflect the best set of
3125 : * pre-ordered input for ordered aggregates.
3126 : *
3127 : * We define "best" as the pathkeys that suit the largest number of
3128 : * aggregate functions. We find these by looking at the first ORDER BY /
3129 : * DISTINCT aggregate and take the pathkeys for that before searching for
3130 : * other aggregates that require the same or a more strict variation of the
3131 : * same pathkeys. We then repeat that process for any remaining aggregates
3132 : * with different pathkeys and if we find another set of pathkeys that suits a
3133 : * larger number of aggregates then we select those pathkeys instead.
3134 : *
3135 : * When the best pathkeys are found we also mark each Aggref that can use
3136 : * those pathkeys as aggpresorted = true.
3137 : *
3138 : * Note: When an aggregate function's ORDER BY / DISTINCT clause contains any
3139 : * volatile functions, we never make use of these pathkeys. We want to ensure
3140 : * that sorts using volatile functions are done independently in each Aggref
3141 : * rather than once at the query level. If we were to allow this then Aggrefs
3142 : * with compatible sort orders would all transition their rows in the same
3143 : * order if those pathkeys were deemed to be the best pathkeys to sort on.
3144 : * Whereas, if some other set of Aggref's pathkeys happened to be deemed
3145 : * better pathkeys to sort on, then the volatile function Aggrefs would be
3146 : * left to perform their sorts individually. To avoid this inconsistent
3147 : * behavior which could make Aggref results depend on what other Aggrefs the
3148 : * query contains, we always force Aggrefs with volatile functions to perform
3149 : * their own sorts.
3150 : */
3151 : static void
3152 1826 : adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
3153 : {
3154 1826 : List *grouppathkeys = root->group_pathkeys;
3155 : List *bestpathkeys;
3156 : Bitmapset *bestaggs;
3157 : Bitmapset *unprocessed_aggs;
3158 : ListCell *lc;
3159 : int i;
3160 :
3161 : /* Shouldn't be here if there are grouping sets */
3162 : Assert(root->parse->groupingSets == NIL);
3163 : /* Shouldn't be here unless there are some ordered aggregates */
3164 : Assert(root->numOrderedAggs > 0);
3165 :
3166 : /* Do nothing if disabled */
3167 1826 : if (!enable_presorted_aggregate)
3168 6 : return;
3169 :
3170 : /*
3171 : * Make a first pass over all AggInfos to collect a Bitmapset containing
3172 : * the indexes of all AggInfos to be processed below.
3173 : */
3174 1820 : unprocessed_aggs = NULL;
3175 4324 : foreach(lc, root->agginfos)
3176 : {
3177 2504 : AggInfo *agginfo = lfirst_node(AggInfo, lc);
3178 2504 : Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3179 :
3180 2504 : if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3181 264 : continue;
3182 :
3183 : /* only add aggregates with a DISTINCT or ORDER BY */
3184 2240 : if (aggref->aggdistinct != NIL || aggref->aggorder != NIL)
3185 1940 : unprocessed_aggs = bms_add_member(unprocessed_aggs,
3186 : foreach_current_index(lc));
3187 : }
3188 :
3189 : /*
3190 : * Now process all the unprocessed_aggs to find the best set of pathkeys
3191 : * for the given set of aggregates.
3192 : *
3193 : * On the first outer loop here 'bestaggs' will be empty. We'll populate
3194 : * this during the first loop using the pathkeys for the very first
3195 : * AggInfo then taking any stronger pathkeys from any other AggInfos with
3196 : * a more strict set of compatible pathkeys. Once the outer loop is
3197 : * complete, we mark off all the aggregates with compatible pathkeys then
3198 : * remove those from the unprocessed_aggs and repeat the process to try to
3199 : * find another set of pathkeys that are suitable for a larger number of
3200 : * aggregates. The outer loop will stop when there are not enough
3201 : * unprocessed aggregates for it to be possible to find a set of pathkeys
3202 : * to suit a larger number of aggregates.
3203 : */
3204 1820 : bestpathkeys = NIL;
3205 1820 : bestaggs = NULL;
3206 3592 : while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3207 : {
3208 1772 : Bitmapset *aggindexes = NULL;
3209 1772 : List *currpathkeys = NIL;
3210 :
3211 1772 : i = -1;
3212 5790 : while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3213 : {
3214 2246 : AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3215 2246 : Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3216 : List *sortlist;
3217 : List *pathkeys;
3218 :
3219 2246 : if (aggref->aggdistinct != NIL)
3220 718 : sortlist = aggref->aggdistinct;
3221 : else
3222 1528 : sortlist = aggref->aggorder;
3223 :
3224 2246 : pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3225 : aggref->args);
3226 :
3227 : /*
3228 : * Ignore Aggrefs which have volatile functions in their ORDER BY
3229 : * or DISTINCT clause.
3230 : */
3231 2246 : if (has_volatile_pathkey(pathkeys))
3232 : {
3233 18 : unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3234 18 : continue;
3235 : }
3236 :
3237 : /*
3238 : * When not set yet, take the pathkeys from the first unprocessed
3239 : * aggregate.
3240 : */
3241 2228 : if (currpathkeys == NIL)
3242 : {
3243 1766 : currpathkeys = pathkeys;
3244 :
3245 : /* include the GROUP BY pathkeys, if they exist */
3246 1766 : if (grouppathkeys != NIL)
3247 270 : currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3248 : currpathkeys);
3249 :
3250 : /* record that we found pathkeys for this aggregate */
3251 1766 : aggindexes = bms_add_member(aggindexes, i);
3252 : }
3253 : else
3254 : {
3255 : /* now look for a stronger set of matching pathkeys */
3256 :
3257 : /* include the GROUP BY pathkeys, if they exist */
3258 462 : if (grouppathkeys != NIL)
3259 288 : pathkeys = append_pathkeys(list_copy(grouppathkeys),
3260 : pathkeys);
3261 :
3262 : /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3263 462 : switch (compare_pathkeys(currpathkeys, pathkeys))
3264 : {
3265 12 : case PATHKEYS_BETTER2:
3266 : /* 'pathkeys' are stronger, use these ones instead */
3267 12 : currpathkeys = pathkeys;
3268 : /* FALLTHROUGH */
3269 :
3270 72 : case PATHKEYS_BETTER1:
3271 : /* 'pathkeys' are less strict */
3272 : /* FALLTHROUGH */
3273 :
3274 : case PATHKEYS_EQUAL:
3275 : /* mark this aggregate as covered by 'currpathkeys' */
3276 72 : aggindexes = bms_add_member(aggindexes, i);
3277 72 : break;
3278 :
3279 390 : case PATHKEYS_DIFFERENT:
3280 390 : break;
3281 : }
3282 4018 : }
3283 : }
3284 :
3285 : /* remove the aggregates that we've just processed */
3286 1772 : unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3287 :
3288 : /*
3289 : * If this pass included more aggregates than the previous best then
3290 : * use these ones as the best set.
3291 : */
3292 1772 : if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3293 : {
3294 1664 : bestaggs = aggindexes;
3295 1664 : bestpathkeys = currpathkeys;
3296 : }
3297 : }
3298 :
3299 : /*
3300 : * If we found any ordered aggregates, update root->group_pathkeys to add
3301 : * the best set of aggregate pathkeys. Note that bestpathkeys includes
3302 : * the original GROUP BY pathkeys already.
3303 : */
3304 1820 : if (bestpathkeys != NIL)
3305 1616 : root->group_pathkeys = bestpathkeys;
3306 :
3307 : /*
3308 : * Now that we've found the best set of aggregates we can set the
3309 : * presorted flag to indicate to the executor that it needn't bother
3310 : * performing a sort for these Aggrefs. We're able to do this now as
3311 : * there's no chance of a Hash Aggregate plan as create_grouping_paths
3312 : * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3313 : * of ordered aggregates.
3314 : */
3315 1820 : i = -1;
3316 3526 : while ((i = bms_next_member(bestaggs, i)) >= 0)
3317 : {
3318 1706 : AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3319 :
3320 3430 : foreach(lc, agginfo->aggrefs)
3321 : {
3322 1724 : Aggref *aggref = lfirst_node(Aggref, lc);
3323 :
3324 1724 : aggref->aggpresorted = true;
3325 : }
3326 : }
3327 : }
3328 :
3329 : /*
3330 : * Compute query_pathkeys and other pathkeys during plan generation
3331 : */
3332 : static void
3333 502544 : standard_qp_callback(PlannerInfo *root, void *extra)
3334 : {
3335 502544 : Query *parse = root->parse;
3336 502544 : standard_qp_extra *qp_extra = (standard_qp_extra *) extra;
3337 502544 : List *tlist = root->processed_tlist;
3338 502544 : List *activeWindows = qp_extra->activeWindows;
3339 :
3340 : /*
3341 : * Calculate pathkeys that represent grouping/ordering and/or ordered
3342 : * aggregate requirements.
3343 : */
3344 502544 : if (qp_extra->gset_data)
3345 : {
3346 : /*
3347 : * With grouping sets, just use the first RollupData's groupClause. We
3348 : * don't make any effort to optimize grouping clauses when there are
3349 : * grouping sets, nor can we combine aggregate ordering keys with
3350 : * grouping.
3351 : */
3352 848 : List *rollups = qp_extra->gset_data->rollups;
3353 848 : List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3354 :
3355 848 : if (grouping_is_sortable(groupClause))
3356 : {
3357 : bool sortable;
3358 :
3359 : /*
3360 : * The groupClause is logically below the grouping step. So if
3361 : * there is an RTE entry for the grouping step, we need to remove
3362 : * its RT index from the sort expressions before we make PathKeys
3363 : * for them.
3364 : */
3365 848 : root->group_pathkeys =
3366 848 : make_pathkeys_for_sortclauses_extended(root,
3367 : &groupClause,
3368 : tlist,
3369 : false,
3370 848 : parse->hasGroupRTE,
3371 : &sortable,
3372 : false);
3373 : Assert(sortable);
3374 848 : root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3375 : }
3376 : else
3377 : {
3378 0 : root->group_pathkeys = NIL;
3379 0 : root->num_groupby_pathkeys = 0;
3380 : }
3381 : }
3382 501696 : else if (parse->groupClause || root->numOrderedAggs > 0)
3383 5182 : {
3384 : /*
3385 : * With a plain GROUP BY list, we can remove any grouping items that
3386 : * are proven redundant by EquivalenceClass processing. For example,
3387 : * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3388 : * especially common cases, but they're nearly free to detect. Note
3389 : * that we remove redundant items from processed_groupClause but not
3390 : * the original parse->groupClause.
3391 : */
3392 : bool sortable;
3393 :
3394 : /*
3395 : * Convert group clauses into pathkeys. Set the ec_sortref field of
3396 : * EquivalenceClass'es if it's not set yet.
3397 : */
3398 5182 : root->group_pathkeys =
3399 5182 : make_pathkeys_for_sortclauses_extended(root,
3400 : &root->processed_groupClause,
3401 : tlist,
3402 : true,
3403 : false,
3404 : &sortable,
3405 : true);
3406 5182 : if (!sortable)
3407 : {
3408 : /* Can't sort; no point in considering aggregate ordering either */
3409 0 : root->group_pathkeys = NIL;
3410 0 : root->num_groupby_pathkeys = 0;
3411 : }
3412 : else
3413 : {
3414 5182 : root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3415 : /* If we have ordered aggs, consider adding onto group_pathkeys */
3416 5182 : if (root->numOrderedAggs > 0)
3417 1826 : adjust_group_pathkeys_for_groupagg(root);
3418 : }
3419 : }
3420 : else
3421 : {
3422 496514 : root->group_pathkeys = NIL;
3423 496514 : root->num_groupby_pathkeys = 0;
3424 : }
3425 :
3426 : /* We consider only the first (bottom) window in pathkeys logic */
3427 502544 : if (activeWindows != NIL)
3428 : {
3429 2336 : WindowClause *wc = linitial_node(WindowClause, activeWindows);
3430 :
3431 2336 : root->window_pathkeys = make_pathkeys_for_window(root,
3432 : wc,
3433 : tlist);
3434 : }
3435 : else
3436 500208 : root->window_pathkeys = NIL;
3437 :
3438 : /*
3439 : * As with GROUP BY, we can discard any DISTINCT items that are proven
3440 : * redundant by EquivalenceClass processing. The non-redundant list is
3441 : * kept in root->processed_distinctClause, leaving the original
3442 : * parse->distinctClause alone.
3443 : */
3444 502544 : if (parse->distinctClause)
3445 : {
3446 : bool sortable;
3447 :
3448 : /* Make a copy since pathkey processing can modify the list */
3449 2418 : root->processed_distinctClause = list_copy(parse->distinctClause);
3450 2418 : root->distinct_pathkeys =
3451 2418 : make_pathkeys_for_sortclauses_extended(root,
3452 : &root->processed_distinctClause,
3453 : tlist,
3454 : true,
3455 : false,
3456 : &sortable,
3457 : false);
3458 2418 : if (!sortable)
3459 6 : root->distinct_pathkeys = NIL;
3460 : }
3461 : else
3462 500126 : root->distinct_pathkeys = NIL;
3463 :
3464 502544 : root->sort_pathkeys =
3465 502544 : make_pathkeys_for_sortclauses(root,
3466 : parse->sortClause,
3467 : tlist);
3468 :
3469 : /* setting setop_pathkeys might be useful to the union planner */
3470 502544 : if (qp_extra->setop != NULL)
3471 : {
3472 : List *groupClauses;
3473 : bool sortable;
3474 :
3475 11528 : groupClauses = generate_setop_child_grouplist(qp_extra->setop, tlist);
3476 :
3477 11528 : root->setop_pathkeys =
3478 11528 : make_pathkeys_for_sortclauses_extended(root,
3479 : &groupClauses,
3480 : tlist,
3481 : false,
3482 : false,
3483 : &sortable,
3484 : false);
3485 11528 : if (!sortable)
3486 184 : root->setop_pathkeys = NIL;
3487 : }
3488 : else
3489 491016 : root->setop_pathkeys = NIL;
3490 :
3491 : /*
3492 : * Figure out whether we want a sorted result from query_planner.
3493 : *
3494 : * If we have a sortable GROUP BY clause, then we want a result sorted
3495 : * properly for grouping. Otherwise, if we have window functions to
3496 : * evaluate, we try to sort for the first window. Otherwise, if there's a
3497 : * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3498 : * we try to produce output that's sufficiently well sorted for the
3499 : * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3500 : * by the ORDER BY clause. Otherwise, if we're a subquery being planned
3501 : * for a set operation which can benefit from presorted results and have a
3502 : * sortable targetlist, we want to sort by the target list.
3503 : *
3504 : * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3505 : * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3506 : * that might just leave us failing to exploit an available sort order at
3507 : * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3508 : * much easier, since we know that the parser ensured that one is a
3509 : * superset of the other.
3510 : */
3511 502544 : if (root->group_pathkeys)
3512 5704 : root->query_pathkeys = root->group_pathkeys;
3513 496840 : else if (root->window_pathkeys)
3514 1994 : root->query_pathkeys = root->window_pathkeys;
3515 989692 : else if (list_length(root->distinct_pathkeys) >
3516 494846 : list_length(root->sort_pathkeys))
3517 1974 : root->query_pathkeys = root->distinct_pathkeys;
3518 492872 : else if (root->sort_pathkeys)
3519 57314 : root->query_pathkeys = root->sort_pathkeys;
3520 435558 : else if (root->setop_pathkeys != NIL)
3521 10144 : root->query_pathkeys = root->setop_pathkeys;
3522 : else
3523 425414 : root->query_pathkeys = NIL;
3524 502544 : }
3525 :
3526 : /*
3527 : * Estimate number of groups produced by grouping clauses (1 if not grouping)
3528 : *
3529 : * path_rows: number of output rows from scan/join step
3530 : * gd: grouping sets data including list of grouping sets and their clauses
3531 : * target_list: target list containing group clause references
3532 : *
3533 : * If doing grouping sets, we also annotate the gsets data with the estimates
3534 : * for each set and each individual rollup list, with a view to later
3535 : * determining whether some combination of them could be hashed instead.
3536 : */
3537 : static double
3538 43064 : get_number_of_groups(PlannerInfo *root,
3539 : double path_rows,
3540 : grouping_sets_data *gd,
3541 : List *target_list)
3542 : {
3543 43064 : Query *parse = root->parse;
3544 : double dNumGroups;
3545 :
3546 43064 : if (parse->groupClause)
3547 : {
3548 : List *groupExprs;
3549 :
3550 6874 : if (parse->groupingSets)
3551 : {
3552 : /* Add up the estimates for each grouping set */
3553 : ListCell *lc;
3554 :
3555 : Assert(gd); /* keep Coverity happy */
3556 :
3557 806 : dNumGroups = 0;
3558 :
3559 2118 : foreach(lc, gd->rollups)
3560 : {
3561 1312 : RollupData *rollup = lfirst_node(RollupData, lc);
3562 : ListCell *lc2;
3563 : ListCell *lc3;
3564 :
3565 1312 : groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3566 : target_list);
3567 :
3568 1312 : rollup->numGroups = 0.0;
3569 :
3570 3872 : forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3571 : {
3572 2560 : List *gset = (List *) lfirst(lc2);
3573 2560 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc3);
3574 2560 : double numGroups = estimate_num_groups(root,
3575 : groupExprs,
3576 : path_rows,
3577 : &gset,
3578 : NULL);
3579 :
3580 2560 : gs->numGroups = numGroups;
3581 2560 : rollup->numGroups += numGroups;
3582 : }
3583 :
3584 1312 : dNumGroups += rollup->numGroups;
3585 : }
3586 :
3587 806 : if (gd->hash_sets_idx)
3588 : {
3589 : ListCell *lc2;
3590 :
3591 36 : gd->dNumHashGroups = 0;
3592 :
3593 36 : groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3594 : target_list);
3595 :
3596 78 : forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3597 : {
3598 42 : List *gset = (List *) lfirst(lc);
3599 42 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
3600 42 : double numGroups = estimate_num_groups(root,
3601 : groupExprs,
3602 : path_rows,
3603 : &gset,
3604 : NULL);
3605 :
3606 42 : gs->numGroups = numGroups;
3607 42 : gd->dNumHashGroups += numGroups;
3608 : }
3609 :
3610 36 : dNumGroups += gd->dNumHashGroups;
3611 : }
3612 : }
3613 : else
3614 : {
3615 : /* Plain GROUP BY -- estimate based on optimized groupClause */
3616 6068 : groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3617 : target_list);
3618 :
3619 6068 : dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3620 : NULL, NULL);
3621 : }
3622 : }
3623 36190 : else if (parse->groupingSets)
3624 : {
3625 : /* Empty grouping sets ... one result row for each one */
3626 42 : dNumGroups = list_length(parse->groupingSets);
3627 : }
3628 36148 : else if (parse->hasAggs || root->hasHavingQual)
3629 : {
3630 : /* Plain aggregation, one result row */
3631 36148 : dNumGroups = 1;
3632 : }
3633 : else
3634 : {
3635 : /* Not grouping */
3636 0 : dNumGroups = 1;
3637 : }
3638 :
3639 43064 : return dNumGroups;
3640 : }
3641 :
3642 : /*
3643 : * create_grouping_paths
3644 : *
3645 : * Build a new upperrel containing Paths for grouping and/or aggregation.
3646 : * Along the way, we also build an upperrel for Paths which are partially
3647 : * grouped and/or aggregated. A partially grouped and/or aggregated path
3648 : * needs a FinalizeAggregate node to complete the aggregation. Currently,
3649 : * the only partially grouped paths we build are also partial paths; that
3650 : * is, they need a Gather and then a FinalizeAggregate.
3651 : *
3652 : * input_rel: contains the source-data Paths
3653 : * target: the pathtarget for the result Paths to compute
3654 : * gd: grouping sets data including list of grouping sets and their clauses
3655 : *
3656 : * Note: all Paths in input_rel are expected to return the target computed
3657 : * by make_group_input_target.
3658 : */
3659 : static RelOptInfo *
3660 39814 : create_grouping_paths(PlannerInfo *root,
3661 : RelOptInfo *input_rel,
3662 : PathTarget *target,
3663 : bool target_parallel_safe,
3664 : grouping_sets_data *gd)
3665 : {
3666 39814 : Query *parse = root->parse;
3667 : RelOptInfo *grouped_rel;
3668 : RelOptInfo *partially_grouped_rel;
3669 : AggClauseCosts agg_costs;
3670 :
3671 238884 : MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3672 39814 : get_agg_clause_costs(root, AGGSPLIT_SIMPLE, &agg_costs);
3673 :
3674 : /*
3675 : * Create grouping relation to hold fully aggregated grouping and/or
3676 : * aggregation paths.
3677 : */
3678 39814 : grouped_rel = make_grouping_rel(root, input_rel, target,
3679 : target_parallel_safe, parse->havingQual);
3680 :
3681 : /*
3682 : * Create either paths for a degenerate grouping or paths for ordinary
3683 : * grouping, as appropriate.
3684 : */
3685 39814 : if (is_degenerate_grouping(root))
3686 18 : create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3687 : else
3688 : {
3689 39796 : int flags = 0;
3690 : GroupPathExtraData extra;
3691 :
3692 : /*
3693 : * Determine whether it's possible to perform sort-based
3694 : * implementations of grouping. (Note that if processed_groupClause
3695 : * is empty, grouping_is_sortable() is trivially true, and all the
3696 : * pathkeys_contained_in() tests will succeed too, so that we'll
3697 : * consider every surviving input path.)
3698 : *
3699 : * If we have grouping sets, we might be able to sort some but not all
3700 : * of them; in this case, we need can_sort to be true as long as we
3701 : * must consider any sorted-input plan.
3702 : */
3703 39796 : if ((gd && gd->rollups != NIL)
3704 38954 : || grouping_is_sortable(root->processed_groupClause))
3705 39790 : flags |= GROUPING_CAN_USE_SORT;
3706 :
3707 : /*
3708 : * Determine whether we should consider hash-based implementations of
3709 : * grouping.
3710 : *
3711 : * Hashed aggregation only applies if we're grouping. If we have
3712 : * grouping sets, some groups might be hashable but others not; in
3713 : * this case we set can_hash true as long as there is nothing globally
3714 : * preventing us from hashing (and we should therefore consider plans
3715 : * with hashes).
3716 : *
3717 : * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3718 : * BY aggregates. (Doing so would imply storing *all* the input
3719 : * values in the hash table, and/or running many sorts in parallel,
3720 : * either of which seems like a certain loser.) We similarly don't
3721 : * support ordered-set aggregates in hashed aggregation, but that case
3722 : * is also included in the numOrderedAggs count.
3723 : *
3724 : * Note: grouping_is_hashable() is much more expensive to check than
3725 : * the other gating conditions, so we want to do it last.
3726 : */
3727 39796 : if ((parse->groupClause != NIL &&
3728 8526 : root->numOrderedAggs == 0 &&
3729 4126 : (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3730 4122 : flags |= GROUPING_CAN_USE_HASH;
3731 :
3732 : /*
3733 : * Determine whether partial aggregation is possible.
3734 : */
3735 39796 : if (can_partial_agg(root))
3736 35708 : flags |= GROUPING_CAN_PARTIAL_AGG;
3737 :
3738 39796 : extra.flags = flags;
3739 39796 : extra.target_parallel_safe = target_parallel_safe;
3740 39796 : extra.havingQual = parse->havingQual;
3741 39796 : extra.targetList = parse->targetList;
3742 39796 : extra.partial_costs_set = false;
3743 :
3744 : /*
3745 : * Determine whether partitionwise aggregation is in theory possible.
3746 : * It can be disabled by the user, and for now, we don't try to
3747 : * support grouping sets. create_ordinary_grouping_paths() will check
3748 : * additional conditions, such as whether input_rel is partitioned.
3749 : */
3750 39796 : if (enable_partitionwise_aggregate && !parse->groupingSets)
3751 556 : extra.patype = PARTITIONWISE_AGGREGATE_FULL;
3752 : else
3753 39240 : extra.patype = PARTITIONWISE_AGGREGATE_NONE;
3754 :
3755 39796 : create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3756 : &agg_costs, gd, &extra,
3757 : &partially_grouped_rel);
3758 : }
3759 :
3760 39808 : set_cheapest(grouped_rel);
3761 39808 : return grouped_rel;
3762 : }
3763 :
3764 : /*
3765 : * make_grouping_rel
3766 : *
3767 : * Create a new grouping rel and set basic properties.
3768 : *
3769 : * input_rel represents the underlying scan/join relation.
3770 : * target is the output expected from the grouping relation.
3771 : */
3772 : static RelOptInfo *
3773 41308 : make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
3774 : PathTarget *target, bool target_parallel_safe,
3775 : Node *havingQual)
3776 : {
3777 : RelOptInfo *grouped_rel;
3778 :
3779 41308 : if (IS_OTHER_REL(input_rel))
3780 : {
3781 1494 : grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
3782 : input_rel->relids);
3783 1494 : grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3784 : }
3785 : else
3786 : {
3787 : /*
3788 : * By tradition, the relids set for the main grouping relation is
3789 : * NULL. (This could be changed, but might require adjustments
3790 : * elsewhere.)
3791 : */
3792 39814 : grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3793 : }
3794 :
3795 : /* Set target. */
3796 41308 : grouped_rel->reltarget = target;
3797 :
3798 : /*
3799 : * If the input relation is not parallel-safe, then the grouped relation
3800 : * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3801 : * target list and HAVING quals are parallel-safe.
3802 : */
3803 71538 : if (input_rel->consider_parallel && target_parallel_safe &&
3804 30230 : is_parallel_safe(root, (Node *) havingQual))
3805 30212 : grouped_rel->consider_parallel = true;
3806 :
3807 : /*
3808 : * If the input rel belongs to a single FDW, so does the grouped rel.
3809 : */
3810 41308 : grouped_rel->serverid = input_rel->serverid;
3811 41308 : grouped_rel->userid = input_rel->userid;
3812 41308 : grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3813 41308 : grouped_rel->fdwroutine = input_rel->fdwroutine;
3814 :
3815 41308 : return grouped_rel;
3816 : }
3817 :
3818 : /*
3819 : * is_degenerate_grouping
3820 : *
3821 : * A degenerate grouping is one in which the query has a HAVING qual and/or
3822 : * grouping sets, but no aggregates and no GROUP BY (which implies that the
3823 : * grouping sets are all empty).
3824 : */
3825 : static bool
3826 39814 : is_degenerate_grouping(PlannerInfo *root)
3827 : {
3828 39814 : Query *parse = root->parse;
3829 :
3830 38506 : return (root->hasHavingQual || parse->groupingSets) &&
3831 78320 : !parse->hasAggs && parse->groupClause == NIL;
3832 : }
3833 :
3834 : /*
3835 : * create_degenerate_grouping_paths
3836 : *
3837 : * When the grouping is degenerate (see is_degenerate_grouping), we are
3838 : * supposed to emit either zero or one row for each grouping set depending on
3839 : * whether HAVING succeeds. Furthermore, there cannot be any variables in
3840 : * either HAVING or the targetlist, so we actually do not need the FROM table
3841 : * at all! We can just throw away the plan-so-far and generate a Result node.
3842 : * This is a sufficiently unusual corner case that it's not worth contorting
3843 : * the structure of this module to avoid having to generate the earlier paths
3844 : * in the first place.
3845 : */
3846 : static void
3847 18 : create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
3848 : RelOptInfo *grouped_rel)
3849 : {
3850 18 : Query *parse = root->parse;
3851 : int nrows;
3852 : Path *path;
3853 :
3854 18 : nrows = list_length(parse->groupingSets);
3855 18 : if (nrows > 1)
3856 : {
3857 : /*
3858 : * Doesn't seem worthwhile writing code to cons up a generate_series
3859 : * or a values scan to emit multiple rows. Instead just make N clones
3860 : * and append them. (With a volatile HAVING clause, this means you
3861 : * might get between 0 and N output rows. Offhand I think that's
3862 : * desired.)
3863 : */
3864 0 : List *paths = NIL;
3865 :
3866 0 : while (--nrows >= 0)
3867 : {
3868 : path = (Path *)
3869 0 : create_group_result_path(root, grouped_rel,
3870 0 : grouped_rel->reltarget,
3871 0 : (List *) parse->havingQual);
3872 0 : paths = lappend(paths, path);
3873 : }
3874 : path = (Path *)
3875 0 : create_append_path(root,
3876 : grouped_rel,
3877 : paths,
3878 : NIL,
3879 : NIL,
3880 : NULL,
3881 : 0,
3882 : false,
3883 : -1);
3884 : }
3885 : else
3886 : {
3887 : /* No grouping sets, or just one, so one output row */
3888 : path = (Path *)
3889 18 : create_group_result_path(root, grouped_rel,
3890 18 : grouped_rel->reltarget,
3891 18 : (List *) parse->havingQual);
3892 : }
3893 :
3894 18 : add_path(grouped_rel, path);
3895 18 : }
3896 :
3897 : /*
3898 : * create_ordinary_grouping_paths
3899 : *
3900 : * Create grouping paths for the ordinary (that is, non-degenerate) case.
3901 : *
3902 : * We need to consider sorted and hashed aggregation in the same function,
3903 : * because otherwise (1) it would be harder to throw an appropriate error
3904 : * message if neither way works, and (2) we should not allow hashtable size
3905 : * considerations to dissuade us from using hashing if sorting is not possible.
3906 : *
3907 : * *partially_grouped_rel_p will be set to the partially grouped rel which this
3908 : * function creates, or to NULL if it doesn't create one.
3909 : */
3910 : static void
3911 41290 : create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
3912 : RelOptInfo *grouped_rel,
3913 : const AggClauseCosts *agg_costs,
3914 : grouping_sets_data *gd,
3915 : GroupPathExtraData *extra,
3916 : RelOptInfo **partially_grouped_rel_p)
3917 : {
3918 41290 : Path *cheapest_path = input_rel->cheapest_total_path;
3919 41290 : RelOptInfo *partially_grouped_rel = NULL;
3920 : double dNumGroups;
3921 41290 : PartitionwiseAggregateType patype = PARTITIONWISE_AGGREGATE_NONE;
3922 :
3923 : /*
3924 : * If this is the topmost grouping relation or if the parent relation is
3925 : * doing some form of partitionwise aggregation, then we may be able to do
3926 : * it at this level also. However, if the input relation is not
3927 : * partitioned, partitionwise aggregate is impossible.
3928 : */
3929 41290 : if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
3930 2050 : IS_PARTITIONED_REL(input_rel))
3931 : {
3932 : /*
3933 : * If this is the topmost relation or if the parent relation is doing
3934 : * full partitionwise aggregation, then we can do full partitionwise
3935 : * aggregation provided that the GROUP BY clause contains all of the
3936 : * partitioning columns at this level and the collation used by GROUP
3937 : * BY matches the partitioning collation. Otherwise, we can do at
3938 : * most partial partitionwise aggregation. But if partial aggregation
3939 : * is not supported in general then we can't use it for partitionwise
3940 : * aggregation either.
3941 : *
3942 : * Check parse->groupClause not processed_groupClause, because it's
3943 : * okay if some of the partitioning columns were proved redundant.
3944 : */
3945 1160 : if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
3946 556 : group_by_has_partkey(input_rel, extra->targetList,
3947 556 : root->parse->groupClause))
3948 320 : patype = PARTITIONWISE_AGGREGATE_FULL;
3949 284 : else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3950 242 : patype = PARTITIONWISE_AGGREGATE_PARTIAL;
3951 : else
3952 42 : patype = PARTITIONWISE_AGGREGATE_NONE;
3953 : }
3954 :
3955 : /*
3956 : * Before generating paths for grouped_rel, we first generate any possible
3957 : * partially grouped paths; that way, later code can easily consider both
3958 : * parallel and non-parallel approaches to grouping.
3959 : */
3960 41290 : if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3961 : {
3962 : bool force_rel_creation;
3963 :
3964 : /*
3965 : * If we're doing partitionwise aggregation at this level, force
3966 : * creation of a partially_grouped_rel so we can add partitionwise
3967 : * paths to it.
3968 : */
3969 37130 : force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
3970 :
3971 : partially_grouped_rel =
3972 37130 : create_partial_grouping_paths(root,
3973 : grouped_rel,
3974 : input_rel,
3975 : gd,
3976 : extra,
3977 : force_rel_creation);
3978 : }
3979 :
3980 : /* Set out parameter. */
3981 41290 : *partially_grouped_rel_p = partially_grouped_rel;
3982 :
3983 : /* Apply partitionwise aggregation technique, if possible. */
3984 41290 : if (patype != PARTITIONWISE_AGGREGATE_NONE)
3985 562 : create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
3986 : partially_grouped_rel, agg_costs,
3987 : gd, patype, extra);
3988 :
3989 : /* If we are doing partial aggregation only, return. */
3990 41290 : if (extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
3991 : {
3992 : Assert(partially_grouped_rel);
3993 :
3994 618 : if (partially_grouped_rel->pathlist)
3995 618 : set_cheapest(partially_grouped_rel);
3996 :
3997 618 : return;
3998 : }
3999 :
4000 : /* Gather any partially grouped partial paths. */
4001 40672 : if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4002 : {
4003 1474 : gather_grouping_paths(root, partially_grouped_rel);
4004 1474 : set_cheapest(partially_grouped_rel);
4005 : }
4006 :
4007 : /*
4008 : * Estimate number of groups.
4009 : */
4010 40672 : dNumGroups = get_number_of_groups(root,
4011 : cheapest_path->rows,
4012 : gd,
4013 : extra->targetList);
4014 :
4015 : /* Build final grouping paths */
4016 40672 : add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4017 : partially_grouped_rel, agg_costs, gd,
4018 : dNumGroups, extra);
4019 :
4020 : /* Give a helpful error if we failed to find any implementation */
4021 40672 : if (grouped_rel->pathlist == NIL)
4022 6 : ereport(ERROR,
4023 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4024 : errmsg("could not implement GROUP BY"),
4025 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4026 :
4027 : /*
4028 : * If there is an FDW that's responsible for all baserels of the query,
4029 : * let it consider adding ForeignPaths.
4030 : */
4031 40666 : if (grouped_rel->fdwroutine &&
4032 332 : grouped_rel->fdwroutine->GetForeignUpperPaths)
4033 332 : grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4034 : input_rel, grouped_rel,
4035 : extra);
4036 :
4037 : /* Let extensions possibly add some more paths */
4038 40666 : if (create_upper_paths_hook)
4039 0 : (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4040 : input_rel, grouped_rel,
4041 : extra);
4042 : }
4043 :
4044 : /*
4045 : * For a given input path, consider the possible ways of doing grouping sets on
4046 : * it, by combinations of hashing and sorting. This can be called multiple
4047 : * times, so it's important that it not scribble on input. No result is
4048 : * returned, but any generated paths are added to grouped_rel.
4049 : */
4050 : static void
4051 1684 : consider_groupingsets_paths(PlannerInfo *root,
4052 : RelOptInfo *grouped_rel,
4053 : Path *path,
4054 : bool is_sorted,
4055 : bool can_hash,
4056 : grouping_sets_data *gd,
4057 : const AggClauseCosts *agg_costs,
4058 : double dNumGroups)
4059 : {
4060 1684 : Query *parse = root->parse;
4061 1684 : Size hash_mem_limit = get_hash_memory_limit();
4062 :
4063 : /*
4064 : * If we're not being offered sorted input, then only consider plans that
4065 : * can be done entirely by hashing.
4066 : *
4067 : * We can hash everything if it looks like it'll fit in hash_mem. But if
4068 : * the input is actually sorted despite not being advertised as such, we
4069 : * prefer to make use of that in order to use less memory.
4070 : *
4071 : * If none of the grouping sets are sortable, then ignore the hash_mem
4072 : * limit and generate a path anyway, since otherwise we'll just fail.
4073 : */
4074 1684 : if (!is_sorted)
4075 : {
4076 770 : List *new_rollups = NIL;
4077 770 : RollupData *unhashed_rollup = NULL;
4078 : List *sets_data;
4079 770 : List *empty_sets_data = NIL;
4080 770 : List *empty_sets = NIL;
4081 : ListCell *lc;
4082 770 : ListCell *l_start = list_head(gd->rollups);
4083 770 : AggStrategy strat = AGG_HASHED;
4084 : double hashsize;
4085 770 : double exclude_groups = 0.0;
4086 :
4087 : Assert(can_hash);
4088 :
4089 : /*
4090 : * If the input is coincidentally sorted usefully (which can happen
4091 : * even if is_sorted is false, since that only means that our caller
4092 : * has set up the sorting for us), then save some hashtable space by
4093 : * making use of that. But we need to watch out for degenerate cases:
4094 : *
4095 : * 1) If there are any empty grouping sets, then group_pathkeys might
4096 : * be NIL if all non-empty grouping sets are unsortable. In this case,
4097 : * there will be a rollup containing only empty groups, and the
4098 : * pathkeys_contained_in test is vacuously true; this is ok.
4099 : *
4100 : * XXX: the above relies on the fact that group_pathkeys is generated
4101 : * from the first rollup. If we add the ability to consider multiple
4102 : * sort orders for grouping input, this assumption might fail.
4103 : *
4104 : * 2) If there are no empty sets and only unsortable sets, then the
4105 : * rollups list will be empty (and thus l_start == NULL), and
4106 : * group_pathkeys will be NIL; we must ensure that the vacuously-true
4107 : * pathkeys_contained_in test doesn't cause us to crash.
4108 : */
4109 1534 : if (l_start != NULL &&
4110 764 : pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4111 : {
4112 12 : unhashed_rollup = lfirst_node(RollupData, l_start);
4113 12 : exclude_groups = unhashed_rollup->numGroups;
4114 12 : l_start = lnext(gd->rollups, l_start);
4115 : }
4116 :
4117 770 : hashsize = estimate_hashagg_tablesize(root,
4118 : path,
4119 : agg_costs,
4120 : dNumGroups - exclude_groups);
4121 :
4122 : /*
4123 : * gd->rollups is empty if we have only unsortable columns to work
4124 : * with. Override hash_mem in that case; otherwise, we'll rely on the
4125 : * sorted-input case to generate usable mixed paths.
4126 : */
4127 770 : if (hashsize > hash_mem_limit && gd->rollups)
4128 18 : return; /* nope, won't fit */
4129 :
4130 : /*
4131 : * We need to burst the existing rollups list into individual grouping
4132 : * sets and recompute a groupClause for each set.
4133 : */
4134 752 : sets_data = list_copy(gd->unsortable_sets);
4135 :
4136 1872 : for_each_cell(lc, gd->rollups, l_start)
4137 : {
4138 1144 : RollupData *rollup = lfirst_node(RollupData, lc);
4139 :
4140 : /*
4141 : * If we find an unhashable rollup that's not been skipped by the
4142 : * "actually sorted" check above, we can't cope; we'd need sorted
4143 : * input (with a different sort order) but we can't get that here.
4144 : * So bail out; we'll get a valid path from the is_sorted case
4145 : * instead.
4146 : *
4147 : * The mere presence of empty grouping sets doesn't make a rollup
4148 : * unhashable (see preprocess_grouping_sets), we handle those
4149 : * specially below.
4150 : */
4151 1144 : if (!rollup->hashable)
4152 24 : return;
4153 :
4154 1120 : sets_data = list_concat(sets_data, rollup->gsets_data);
4155 : }
4156 3054 : foreach(lc, sets_data)
4157 : {
4158 2326 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
4159 2326 : List *gset = gs->set;
4160 : RollupData *rollup;
4161 :
4162 2326 : if (gset == NIL)
4163 : {
4164 : /* Empty grouping sets can't be hashed. */
4165 484 : empty_sets_data = lappend(empty_sets_data, gs);
4166 484 : empty_sets = lappend(empty_sets, NIL);
4167 : }
4168 : else
4169 : {
4170 1842 : rollup = makeNode(RollupData);
4171 :
4172 1842 : rollup->groupClause = preprocess_groupclause(root, gset);
4173 1842 : rollup->gsets_data = list_make1(gs);
4174 1842 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4175 : rollup->gsets_data,
4176 : gd->tleref_to_colnum_map);
4177 1842 : rollup->numGroups = gs->numGroups;
4178 1842 : rollup->hashable = true;
4179 1842 : rollup->is_hashed = true;
4180 1842 : new_rollups = lappend(new_rollups, rollup);
4181 : }
4182 : }
4183 :
4184 : /*
4185 : * If we didn't find anything nonempty to hash, then bail. We'll
4186 : * generate a path from the is_sorted case.
4187 : */
4188 728 : if (new_rollups == NIL)
4189 0 : return;
4190 :
4191 : /*
4192 : * If there were empty grouping sets they should have been in the
4193 : * first rollup.
4194 : */
4195 : Assert(!unhashed_rollup || !empty_sets);
4196 :
4197 728 : if (unhashed_rollup)
4198 : {
4199 12 : new_rollups = lappend(new_rollups, unhashed_rollup);
4200 12 : strat = AGG_MIXED;
4201 : }
4202 716 : else if (empty_sets)
4203 : {
4204 436 : RollupData *rollup = makeNode(RollupData);
4205 :
4206 436 : rollup->groupClause = NIL;
4207 436 : rollup->gsets_data = empty_sets_data;
4208 436 : rollup->gsets = empty_sets;
4209 436 : rollup->numGroups = list_length(empty_sets);
4210 436 : rollup->hashable = false;
4211 436 : rollup->is_hashed = false;
4212 436 : new_rollups = lappend(new_rollups, rollup);
4213 436 : strat = AGG_MIXED;
4214 : }
4215 :
4216 728 : add_path(grouped_rel, (Path *)
4217 728 : create_groupingsets_path(root,
4218 : grouped_rel,
4219 : path,
4220 728 : (List *) parse->havingQual,
4221 : strat,
4222 : new_rollups,
4223 : agg_costs));
4224 728 : return;
4225 : }
4226 :
4227 : /*
4228 : * If we have sorted input but nothing we can do with it, bail.
4229 : */
4230 914 : if (gd->rollups == NIL)
4231 0 : return;
4232 :
4233 : /*
4234 : * Given sorted input, we try and make two paths: one sorted and one mixed
4235 : * sort/hash. (We need to try both because hashagg might be disabled, or
4236 : * some columns might not be sortable.)
4237 : *
4238 : * can_hash is passed in as false if some obstacle elsewhere (such as
4239 : * ordered aggs) means that we shouldn't consider hashing at all.
4240 : */
4241 914 : if (can_hash && gd->any_hashable)
4242 : {
4243 836 : List *rollups = NIL;
4244 836 : List *hash_sets = list_copy(gd->unsortable_sets);
4245 836 : double availspace = hash_mem_limit;
4246 : ListCell *lc;
4247 :
4248 : /*
4249 : * Account first for space needed for groups we can't sort at all.
4250 : */
4251 836 : availspace -= estimate_hashagg_tablesize(root,
4252 : path,
4253 : agg_costs,
4254 : gd->dNumHashGroups);
4255 :
4256 836 : if (availspace > 0 && list_length(gd->rollups) > 1)
4257 : {
4258 : double scale;
4259 420 : int num_rollups = list_length(gd->rollups);
4260 : int k_capacity;
4261 420 : int *k_weights = palloc(num_rollups * sizeof(int));
4262 420 : Bitmapset *hash_items = NULL;
4263 : int i;
4264 :
4265 : /*
4266 : * We treat this as a knapsack problem: the knapsack capacity
4267 : * represents hash_mem, the item weights are the estimated memory
4268 : * usage of the hashtables needed to implement a single rollup,
4269 : * and we really ought to use the cost saving as the item value;
4270 : * however, currently the costs assigned to sort nodes don't
4271 : * reflect the comparison costs well, and so we treat all items as
4272 : * of equal value (each rollup we hash instead saves us one sort).
4273 : *
4274 : * To use the discrete knapsack, we need to scale the values to a
4275 : * reasonably small bounded range. We choose to allow a 5% error
4276 : * margin; we have no more than 4096 rollups in the worst possible
4277 : * case, which with a 5% error margin will require a bit over 42MB
4278 : * of workspace. (Anyone wanting to plan queries that complex had
4279 : * better have the memory for it. In more reasonable cases, with
4280 : * no more than a couple of dozen rollups, the memory usage will
4281 : * be negligible.)
4282 : *
4283 : * k_capacity is naturally bounded, but we clamp the values for
4284 : * scale and weight (below) to avoid overflows or underflows (or
4285 : * uselessly trying to use a scale factor less than 1 byte).
4286 : */
4287 420 : scale = Max(availspace / (20.0 * num_rollups), 1.0);
4288 420 : k_capacity = (int) floor(availspace / scale);
4289 :
4290 : /*
4291 : * We leave the first rollup out of consideration since it's the
4292 : * one that matches the input sort order. We assign indexes "i"
4293 : * to only those entries considered for hashing; the second loop,
4294 : * below, must use the same condition.
4295 : */
4296 420 : i = 0;
4297 1056 : for_each_from(lc, gd->rollups, 1)
4298 : {
4299 636 : RollupData *rollup = lfirst_node(RollupData, lc);
4300 :
4301 636 : if (rollup->hashable)
4302 : {
4303 636 : double sz = estimate_hashagg_tablesize(root,
4304 : path,
4305 : agg_costs,
4306 : rollup->numGroups);
4307 :
4308 : /*
4309 : * If sz is enormous, but hash_mem (and hence scale) is
4310 : * small, avoid integer overflow here.
4311 : */
4312 636 : k_weights[i] = (int) Min(floor(sz / scale),
4313 : k_capacity + 1.0);
4314 636 : ++i;
4315 : }
4316 : }
4317 :
4318 : /*
4319 : * Apply knapsack algorithm; compute the set of items which
4320 : * maximizes the value stored (in this case the number of sorts
4321 : * saved) while keeping the total size (approximately) within
4322 : * capacity.
4323 : */
4324 420 : if (i > 0)
4325 420 : hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4326 :
4327 420 : if (!bms_is_empty(hash_items))
4328 : {
4329 420 : rollups = list_make1(linitial(gd->rollups));
4330 :
4331 420 : i = 0;
4332 1056 : for_each_from(lc, gd->rollups, 1)
4333 : {
4334 636 : RollupData *rollup = lfirst_node(RollupData, lc);
4335 :
4336 636 : if (rollup->hashable)
4337 : {
4338 636 : if (bms_is_member(i, hash_items))
4339 600 : hash_sets = list_concat(hash_sets,
4340 600 : rollup->gsets_data);
4341 : else
4342 36 : rollups = lappend(rollups, rollup);
4343 636 : ++i;
4344 : }
4345 : else
4346 0 : rollups = lappend(rollups, rollup);
4347 : }
4348 : }
4349 : }
4350 :
4351 836 : if (!rollups && hash_sets)
4352 24 : rollups = list_copy(gd->rollups);
4353 :
4354 1576 : foreach(lc, hash_sets)
4355 : {
4356 740 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
4357 740 : RollupData *rollup = makeNode(RollupData);
4358 :
4359 : Assert(gs->set != NIL);
4360 :
4361 740 : rollup->groupClause = preprocess_groupclause(root, gs->set);
4362 740 : rollup->gsets_data = list_make1(gs);
4363 740 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4364 : rollup->gsets_data,
4365 : gd->tleref_to_colnum_map);
4366 740 : rollup->numGroups = gs->numGroups;
4367 740 : rollup->hashable = true;
4368 740 : rollup->is_hashed = true;
4369 740 : rollups = lcons(rollup, rollups);
4370 : }
4371 :
4372 836 : if (rollups)
4373 : {
4374 444 : add_path(grouped_rel, (Path *)
4375 444 : create_groupingsets_path(root,
4376 : grouped_rel,
4377 : path,
4378 444 : (List *) parse->havingQual,
4379 : AGG_MIXED,
4380 : rollups,
4381 : agg_costs));
4382 : }
4383 : }
4384 :
4385 : /*
4386 : * Now try the simple sorted case.
4387 : */
4388 914 : if (!gd->unsortable_sets)
4389 884 : add_path(grouped_rel, (Path *)
4390 884 : create_groupingsets_path(root,
4391 : grouped_rel,
4392 : path,
4393 884 : (List *) parse->havingQual,
4394 : AGG_SORTED,
4395 : gd->rollups,
4396 : agg_costs));
4397 : }
4398 :
4399 : /*
4400 : * create_window_paths
4401 : *
4402 : * Build a new upperrel containing Paths for window-function evaluation.
4403 : *
4404 : * input_rel: contains the source-data Paths
4405 : * input_target: result of make_window_input_target
4406 : * output_target: what the topmost WindowAggPath should return
4407 : * wflists: result of find_window_functions
4408 : * activeWindows: result of select_active_windows
4409 : *
4410 : * Note: all Paths in input_rel are expected to return input_target.
4411 : */
4412 : static RelOptInfo *
4413 2336 : create_window_paths(PlannerInfo *root,
4414 : RelOptInfo *input_rel,
4415 : PathTarget *input_target,
4416 : PathTarget *output_target,
4417 : bool output_target_parallel_safe,
4418 : WindowFuncLists *wflists,
4419 : List *activeWindows)
4420 : {
4421 : RelOptInfo *window_rel;
4422 : ListCell *lc;
4423 :
4424 : /* For now, do all work in the (WINDOW, NULL) upperrel */
4425 2336 : window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4426 :
4427 : /*
4428 : * If the input relation is not parallel-safe, then the window relation
4429 : * can't be parallel-safe, either. Otherwise, we need to examine the
4430 : * target list and active windows for non-parallel-safe constructs.
4431 : */
4432 2336 : if (input_rel->consider_parallel && output_target_parallel_safe &&
4433 0 : is_parallel_safe(root, (Node *) activeWindows))
4434 0 : window_rel->consider_parallel = true;
4435 :
4436 : /*
4437 : * If the input rel belongs to a single FDW, so does the window rel.
4438 : */
4439 2336 : window_rel->serverid = input_rel->serverid;
4440 2336 : window_rel->userid = input_rel->userid;
4441 2336 : window_rel->useridiscurrent = input_rel->useridiscurrent;
4442 2336 : window_rel->fdwroutine = input_rel->fdwroutine;
4443 :
4444 : /*
4445 : * Consider computing window functions starting from the existing
4446 : * cheapest-total path (which will likely require a sort) as well as any
4447 : * existing paths that satisfy or partially satisfy root->window_pathkeys.
4448 : */
4449 4984 : foreach(lc, input_rel->pathlist)
4450 : {
4451 2648 : Path *path = (Path *) lfirst(lc);
4452 : int presorted_keys;
4453 :
4454 2960 : if (path == input_rel->cheapest_total_path ||
4455 312 : pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4456 144 : &presorted_keys) ||
4457 144 : presorted_keys > 0)
4458 2534 : create_one_window_path(root,
4459 : window_rel,
4460 : path,
4461 : input_target,
4462 : output_target,
4463 : wflists,
4464 : activeWindows);
4465 : }
4466 :
4467 : /*
4468 : * If there is an FDW that's responsible for all baserels of the query,
4469 : * let it consider adding ForeignPaths.
4470 : */
4471 2336 : if (window_rel->fdwroutine &&
4472 12 : window_rel->fdwroutine->GetForeignUpperPaths)
4473 12 : window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4474 : input_rel, window_rel,
4475 : NULL);
4476 :
4477 : /* Let extensions possibly add some more paths */
4478 2336 : if (create_upper_paths_hook)
4479 0 : (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4480 : input_rel, window_rel, NULL);
4481 :
4482 : /* Now choose the best path(s) */
4483 2336 : set_cheapest(window_rel);
4484 :
4485 2336 : return window_rel;
4486 : }
4487 :
4488 : /*
4489 : * Stack window-function implementation steps atop the given Path, and
4490 : * add the result to window_rel.
4491 : *
4492 : * window_rel: upperrel to contain result
4493 : * path: input Path to use (must return input_target)
4494 : * input_target: result of make_window_input_target
4495 : * output_target: what the topmost WindowAggPath should return
4496 : * wflists: result of find_window_functions
4497 : * activeWindows: result of select_active_windows
4498 : */
4499 : static void
4500 2534 : create_one_window_path(PlannerInfo *root,
4501 : RelOptInfo *window_rel,
4502 : Path *path,
4503 : PathTarget *input_target,
4504 : PathTarget *output_target,
4505 : WindowFuncLists *wflists,
4506 : List *activeWindows)
4507 : {
4508 : PathTarget *window_target;
4509 : ListCell *l;
4510 2534 : List *topqual = NIL;
4511 :
4512 : /*
4513 : * Since each window clause could require a different sort order, we stack
4514 : * up a WindowAgg node for each clause, with sort steps between them as
4515 : * needed. (We assume that select_active_windows chose a good order for
4516 : * executing the clauses in.)
4517 : *
4518 : * input_target should contain all Vars and Aggs needed for the result.
4519 : * (In some cases we wouldn't need to propagate all of these all the way
4520 : * to the top, since they might only be needed as inputs to WindowFuncs.
4521 : * It's probably not worth trying to optimize that though.) It must also
4522 : * contain all window partitioning and sorting expressions, to ensure
4523 : * they're computed only once at the bottom of the stack (that's critical
4524 : * for volatile functions). As we climb up the stack, we'll add outputs
4525 : * for the WindowFuncs computed at each level.
4526 : */
4527 2534 : window_target = input_target;
4528 :
4529 5218 : foreach(l, activeWindows)
4530 : {
4531 2684 : WindowClause *wc = lfirst_node(WindowClause, l);
4532 : List *window_pathkeys;
4533 2684 : List *runcondition = NIL;
4534 : int presorted_keys;
4535 : bool is_sorted;
4536 : bool topwindow;
4537 : ListCell *lc2;
4538 :
4539 2684 : window_pathkeys = make_pathkeys_for_window(root,
4540 : wc,
4541 : root->processed_tlist);
4542 :
4543 2684 : is_sorted = pathkeys_count_contained_in(window_pathkeys,
4544 : path->pathkeys,
4545 : &presorted_keys);
4546 :
4547 : /* Sort if necessary */
4548 2684 : if (!is_sorted)
4549 : {
4550 : /*
4551 : * No presorted keys or incremental sort disabled, just perform a
4552 : * complete sort.
4553 : */
4554 2072 : if (presorted_keys == 0 || !enable_incremental_sort)
4555 2006 : path = (Path *) create_sort_path(root, window_rel,
4556 : path,
4557 : window_pathkeys,
4558 : -1.0);
4559 : else
4560 : {
4561 : /*
4562 : * Since we have presorted keys and incremental sort is
4563 : * enabled, just use incremental sort.
4564 : */
4565 66 : path = (Path *) create_incremental_sort_path(root,
4566 : window_rel,
4567 : path,
4568 : window_pathkeys,
4569 : presorted_keys,
4570 : -1.0);
4571 : }
4572 : }
4573 :
4574 2684 : if (lnext(activeWindows, l))
4575 : {
4576 : /*
4577 : * Add the current WindowFuncs to the output target for this
4578 : * intermediate WindowAggPath. We must copy window_target to
4579 : * avoid changing the previous path's target.
4580 : *
4581 : * Note: a WindowFunc adds nothing to the target's eval costs; but
4582 : * we do need to account for the increase in tlist width.
4583 : */
4584 150 : int64 tuple_width = window_target->width;
4585 :
4586 150 : window_target = copy_pathtarget(window_target);
4587 342 : foreach(lc2, wflists->windowFuncs[wc->winref])
4588 : {
4589 192 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4590 :
4591 192 : add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4592 192 : tuple_width += get_typavgwidth(wfunc->wintype, -1);
4593 : }
4594 150 : window_target->width = clamp_width_est(tuple_width);
4595 : }
4596 : else
4597 : {
4598 : /* Install the goal target in the topmost WindowAgg */
4599 2534 : window_target = output_target;
4600 : }
4601 :
4602 : /* mark the final item in the list as the top-level window */
4603 2684 : topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4604 :
4605 : /*
4606 : * Collect the WindowFuncRunConditions from each WindowFunc and
4607 : * convert them into OpExprs
4608 : */
4609 6076 : foreach(lc2, wflists->windowFuncs[wc->winref])
4610 : {
4611 : ListCell *lc3;
4612 3392 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4613 :
4614 3572 : foreach(lc3, wfunc->runCondition)
4615 : {
4616 180 : WindowFuncRunCondition *wfuncrc =
4617 : lfirst_node(WindowFuncRunCondition, lc3);
4618 : Expr *opexpr;
4619 : Expr *leftop;
4620 : Expr *rightop;
4621 :
4622 180 : if (wfuncrc->wfunc_left)
4623 : {
4624 162 : leftop = (Expr *) copyObject(wfunc);
4625 162 : rightop = copyObject(wfuncrc->arg);
4626 : }
4627 : else
4628 : {
4629 18 : leftop = copyObject(wfuncrc->arg);
4630 18 : rightop = (Expr *) copyObject(wfunc);
4631 : }
4632 :
4633 180 : opexpr = make_opclause(wfuncrc->opno,
4634 : BOOLOID,
4635 : false,
4636 : leftop,
4637 : rightop,
4638 : InvalidOid,
4639 : wfuncrc->inputcollid);
4640 :
4641 180 : runcondition = lappend(runcondition, opexpr);
4642 :
4643 180 : if (!topwindow)
4644 24 : topqual = lappend(topqual, opexpr);
4645 : }
4646 : }
4647 :
4648 : path = (Path *)
4649 2684 : create_windowagg_path(root, window_rel, path, window_target,
4650 2684 : wflists->windowFuncs[wc->winref],
4651 : runcondition, wc,
4652 : topwindow ? topqual : NIL, topwindow);
4653 : }
4654 :
4655 2534 : add_path(window_rel, path);
4656 2534 : }
4657 :
4658 : /*
4659 : * create_distinct_paths
4660 : *
4661 : * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
4662 : *
4663 : * input_rel: contains the source-data Paths
4664 : * target: the pathtarget for the result Paths to compute
4665 : *
4666 : * Note: input paths should already compute the desired pathtarget, since
4667 : * Sort/Unique won't project anything.
4668 : */
4669 : static RelOptInfo *
4670 2418 : create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4671 : PathTarget *target)
4672 : {
4673 : RelOptInfo *distinct_rel;
4674 :
4675 : /* For now, do all work in the (DISTINCT, NULL) upperrel */
4676 2418 : distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4677 :
4678 : /*
4679 : * We don't compute anything at this level, so distinct_rel will be
4680 : * parallel-safe if the input rel is parallel-safe. In particular, if
4681 : * there is a DISTINCT ON (...) clause, any path for the input_rel will
4682 : * output those expressions, and will not be parallel-safe unless those
4683 : * expressions are parallel-safe.
4684 : */
4685 2418 : distinct_rel->consider_parallel = input_rel->consider_parallel;
4686 :
4687 : /*
4688 : * If the input rel belongs to a single FDW, so does the distinct_rel.
4689 : */
4690 2418 : distinct_rel->serverid = input_rel->serverid;
4691 2418 : distinct_rel->userid = input_rel->userid;
4692 2418 : distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4693 2418 : distinct_rel->fdwroutine = input_rel->fdwroutine;
4694 :
4695 : /* build distinct paths based on input_rel's pathlist */
4696 2418 : create_final_distinct_paths(root, input_rel, distinct_rel);
4697 :
4698 : /* now build distinct paths based on input_rel's partial_pathlist */
4699 2418 : create_partial_distinct_paths(root, input_rel, distinct_rel, target);
4700 :
4701 : /* Give a helpful error if we failed to create any paths */
4702 2418 : if (distinct_rel->pathlist == NIL)
4703 0 : ereport(ERROR,
4704 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4705 : errmsg("could not implement DISTINCT"),
4706 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4707 :
4708 : /*
4709 : * If there is an FDW that's responsible for all baserels of the query,
4710 : * let it consider adding ForeignPaths.
4711 : */
4712 2418 : if (distinct_rel->fdwroutine &&
4713 16 : distinct_rel->fdwroutine->GetForeignUpperPaths)
4714 16 : distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4715 : UPPERREL_DISTINCT,
4716 : input_rel,
4717 : distinct_rel,
4718 : NULL);
4719 :
4720 : /* Let extensions possibly add some more paths */
4721 2418 : if (create_upper_paths_hook)
4722 0 : (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4723 : distinct_rel, NULL);
4724 :
4725 : /* Now choose the best path(s) */
4726 2418 : set_cheapest(distinct_rel);
4727 :
4728 2418 : return distinct_rel;
4729 : }
4730 :
4731 : /*
4732 : * create_partial_distinct_paths
4733 : *
4734 : * Process 'input_rel' partial paths and add unique/aggregate paths to the
4735 : * UPPERREL_PARTIAL_DISTINCT rel. For paths created, add Gather/GatherMerge
4736 : * paths on top and add a final unique/aggregate path to remove any duplicate
4737 : * produced from combining rows from parallel workers.
4738 : */
4739 : static void
4740 2418 : create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4741 : RelOptInfo *final_distinct_rel,
4742 : PathTarget *target)
4743 : {
4744 : RelOptInfo *partial_distinct_rel;
4745 : Query *parse;
4746 : List *distinctExprs;
4747 : double numDistinctRows;
4748 : Path *cheapest_partial_path;
4749 : ListCell *lc;
4750 :
4751 : /* nothing to do when there are no partial paths in the input rel */
4752 2418 : if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4753 2310 : return;
4754 :
4755 108 : parse = root->parse;
4756 :
4757 : /* can't do parallel DISTINCT ON */
4758 108 : if (parse->hasDistinctOn)
4759 0 : return;
4760 :
4761 108 : partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4762 : NULL);
4763 108 : partial_distinct_rel->reltarget = target;
4764 108 : partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4765 :
4766 : /*
4767 : * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4768 : */
4769 108 : partial_distinct_rel->serverid = input_rel->serverid;
4770 108 : partial_distinct_rel->userid = input_rel->userid;
4771 108 : partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4772 108 : partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4773 :
4774 108 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
4775 :
4776 108 : distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4777 : parse->targetList);
4778 :
4779 : /* estimate how many distinct rows we'll get from each worker */
4780 108 : numDistinctRows = estimate_num_groups(root, distinctExprs,
4781 : cheapest_partial_path->rows,
4782 : NULL, NULL);
4783 :
4784 : /*
4785 : * Try sorting the cheapest path and incrementally sorting any paths with
4786 : * presorted keys and put a unique paths atop of those. We'll also
4787 : * attempt to reorder the required pathkeys to match the input path's
4788 : * pathkeys as much as possible, in hopes of avoiding a possible need to
4789 : * re-sort.
4790 : */
4791 108 : if (grouping_is_sortable(root->processed_distinctClause))
4792 : {
4793 234 : foreach(lc, input_rel->partial_pathlist)
4794 : {
4795 126 : Path *input_path = (Path *) lfirst(lc);
4796 : Path *sorted_path;
4797 126 : List *useful_pathkeys_list = NIL;
4798 :
4799 : useful_pathkeys_list =
4800 126 : get_useful_pathkeys_for_distinct(root,
4801 : root->distinct_pathkeys,
4802 : input_path->pathkeys);
4803 : Assert(list_length(useful_pathkeys_list) > 0);
4804 :
4805 390 : foreach_node(List, useful_pathkeys, useful_pathkeys_list)
4806 : {
4807 138 : sorted_path = make_ordered_path(root,
4808 : partial_distinct_rel,
4809 : input_path,
4810 : cheapest_partial_path,
4811 : useful_pathkeys,
4812 : -1.0);
4813 :
4814 138 : if (sorted_path == NULL)
4815 12 : continue;
4816 :
4817 : /*
4818 : * An empty distinct_pathkeys means all tuples have the same
4819 : * value for the DISTINCT clause. See
4820 : * create_final_distinct_paths()
4821 : */
4822 126 : if (root->distinct_pathkeys == NIL)
4823 : {
4824 : Node *limitCount;
4825 :
4826 6 : limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
4827 : sizeof(int64),
4828 : Int64GetDatum(1), false,
4829 : FLOAT8PASSBYVAL);
4830 :
4831 : /*
4832 : * Apply a LimitPath onto the partial path to restrict the
4833 : * tuples from each worker to 1.
4834 : * create_final_distinct_paths will need to apply an
4835 : * additional LimitPath to restrict this to a single row
4836 : * after the Gather node. If the query already has a
4837 : * LIMIT clause, then we could end up with three Limit
4838 : * nodes in the final plan. Consolidating the top two of
4839 : * these could be done, but does not seem worth troubling
4840 : * over.
4841 : */
4842 6 : add_partial_path(partial_distinct_rel, (Path *)
4843 6 : create_limit_path(root, partial_distinct_rel,
4844 : sorted_path,
4845 : NULL,
4846 : limitCount,
4847 : LIMIT_OPTION_COUNT,
4848 : 0, 1));
4849 : }
4850 : else
4851 : {
4852 120 : add_partial_path(partial_distinct_rel, (Path *)
4853 120 : create_upper_unique_path(root, partial_distinct_rel,
4854 : sorted_path,
4855 120 : list_length(root->distinct_pathkeys),
4856 : numDistinctRows));
4857 : }
4858 : }
4859 : }
4860 : }
4861 :
4862 : /*
4863 : * Now try hash aggregate paths, if enabled and hashing is possible. Since
4864 : * we're not on the hook to ensure we do our best to create at least one
4865 : * path here, we treat enable_hashagg as a hard off-switch rather than the
4866 : * slightly softer variant in create_final_distinct_paths.
4867 : */
4868 108 : if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
4869 : {
4870 78 : add_partial_path(partial_distinct_rel, (Path *)
4871 78 : create_agg_path(root,
4872 : partial_distinct_rel,
4873 : cheapest_partial_path,
4874 : cheapest_partial_path->pathtarget,
4875 : AGG_HASHED,
4876 : AGGSPLIT_SIMPLE,
4877 : root->processed_distinctClause,
4878 : NIL,
4879 : NULL,
4880 : numDistinctRows));
4881 : }
4882 :
4883 : /*
4884 : * If there is an FDW that's responsible for all baserels of the query,
4885 : * let it consider adding ForeignPaths.
4886 : */
4887 108 : if (partial_distinct_rel->fdwroutine &&
4888 0 : partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
4889 0 : partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4890 : UPPERREL_PARTIAL_DISTINCT,
4891 : input_rel,
4892 : partial_distinct_rel,
4893 : NULL);
4894 :
4895 : /* Let extensions possibly add some more partial paths */
4896 108 : if (create_upper_paths_hook)
4897 0 : (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
4898 : input_rel, partial_distinct_rel, NULL);
4899 :
4900 108 : if (partial_distinct_rel->partial_pathlist != NIL)
4901 : {
4902 108 : generate_useful_gather_paths(root, partial_distinct_rel, true);
4903 108 : set_cheapest(partial_distinct_rel);
4904 :
4905 : /*
4906 : * Finally, create paths to distinctify the final result. This step
4907 : * is needed to remove any duplicates due to combining rows from
4908 : * parallel workers.
4909 : */
4910 108 : create_final_distinct_paths(root, partial_distinct_rel,
4911 : final_distinct_rel);
4912 : }
4913 : }
4914 :
4915 : /*
4916 : * create_final_distinct_paths
4917 : * Create distinct paths in 'distinct_rel' based on 'input_rel' pathlist
4918 : *
4919 : * input_rel: contains the source-data paths
4920 : * distinct_rel: destination relation for storing created paths
4921 : */
4922 : static RelOptInfo *
4923 2526 : create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4924 : RelOptInfo *distinct_rel)
4925 : {
4926 2526 : Query *parse = root->parse;
4927 2526 : Path *cheapest_input_path = input_rel->cheapest_total_path;
4928 : double numDistinctRows;
4929 : bool allow_hash;
4930 :
4931 : /* Estimate number of distinct rows there will be */
4932 2526 : if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
4933 2452 : root->hasHavingQual)
4934 : {
4935 : /*
4936 : * If there was grouping or aggregation, use the number of input rows
4937 : * as the estimated number of DISTINCT rows (ie, assume the input is
4938 : * already mostly unique).
4939 : */
4940 74 : numDistinctRows = cheapest_input_path->rows;
4941 : }
4942 : else
4943 : {
4944 : /*
4945 : * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
4946 : */
4947 : List *distinctExprs;
4948 :
4949 2452 : distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4950 : parse->targetList);
4951 2452 : numDistinctRows = estimate_num_groups(root, distinctExprs,
4952 : cheapest_input_path->rows,
4953 : NULL, NULL);
4954 : }
4955 :
4956 : /*
4957 : * Consider sort-based implementations of DISTINCT, if possible.
4958 : */
4959 2526 : if (grouping_is_sortable(root->processed_distinctClause))
4960 : {
4961 : /*
4962 : * Firstly, if we have any adequately-presorted paths, just stick a
4963 : * Unique node on those. We also, consider doing an explicit sort of
4964 : * the cheapest input path and Unique'ing that. If any paths have
4965 : * presorted keys then we'll create an incremental sort atop of those
4966 : * before adding a unique node on the top. We'll also attempt to
4967 : * reorder the required pathkeys to match the input path's pathkeys as
4968 : * much as possible, in hopes of avoiding a possible need to re-sort.
4969 : *
4970 : * When we have DISTINCT ON, we must sort by the more rigorous of
4971 : * DISTINCT and ORDER BY, else it won't have the desired behavior.
4972 : * Also, if we do have to do an explicit sort, we might as well use
4973 : * the more rigorous ordering to avoid a second sort later. (Note
4974 : * that the parser will have ensured that one clause is a prefix of
4975 : * the other.)
4976 : */
4977 : List *needed_pathkeys;
4978 : ListCell *lc;
4979 2520 : double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
4980 :
4981 2758 : if (parse->hasDistinctOn &&
4982 238 : list_length(root->distinct_pathkeys) <
4983 238 : list_length(root->sort_pathkeys))
4984 54 : needed_pathkeys = root->sort_pathkeys;
4985 : else
4986 2466 : needed_pathkeys = root->distinct_pathkeys;
4987 :
4988 6438 : foreach(lc, input_rel->pathlist)
4989 : {
4990 3918 : Path *input_path = (Path *) lfirst(lc);
4991 : Path *sorted_path;
4992 3918 : List *useful_pathkeys_list = NIL;
4993 :
4994 : useful_pathkeys_list =
4995 3918 : get_useful_pathkeys_for_distinct(root,
4996 : needed_pathkeys,
4997 : input_path->pathkeys);
4998 : Assert(list_length(useful_pathkeys_list) > 0);
4999 :
5000 12220 : foreach_node(List, useful_pathkeys, useful_pathkeys_list)
5001 : {
5002 4384 : sorted_path = make_ordered_path(root,
5003 : distinct_rel,
5004 : input_path,
5005 : cheapest_input_path,
5006 : useful_pathkeys,
5007 : limittuples);
5008 :
5009 4384 : if (sorted_path == NULL)
5010 524 : continue;
5011 :
5012 : /*
5013 : * distinct_pathkeys may have become empty if all of the
5014 : * pathkeys were determined to be redundant. If all of the
5015 : * pathkeys are redundant then each DISTINCT target must only
5016 : * allow a single value, therefore all resulting tuples must
5017 : * be identical (or at least indistinguishable by an equality
5018 : * check). We can uniquify these tuples simply by just taking
5019 : * the first tuple. All we do here is add a path to do "LIMIT
5020 : * 1" atop of 'sorted_path'. When doing a DISTINCT ON we may
5021 : * still have a non-NIL sort_pathkeys list, so we must still
5022 : * only do this with paths which are correctly sorted by
5023 : * sort_pathkeys.
5024 : */
5025 3860 : if (root->distinct_pathkeys == NIL)
5026 : {
5027 : Node *limitCount;
5028 :
5029 98 : limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5030 : sizeof(int64),
5031 : Int64GetDatum(1), false,
5032 : FLOAT8PASSBYVAL);
5033 :
5034 : /*
5035 : * If the query already has a LIMIT clause, then we could
5036 : * end up with a duplicate LimitPath in the final plan.
5037 : * That does not seem worth troubling over too much.
5038 : */
5039 98 : add_path(distinct_rel, (Path *)
5040 98 : create_limit_path(root, distinct_rel, sorted_path,
5041 : NULL, limitCount,
5042 : LIMIT_OPTION_COUNT, 0, 1));
5043 : }
5044 : else
5045 : {
5046 3762 : add_path(distinct_rel, (Path *)
5047 3762 : create_upper_unique_path(root, distinct_rel,
5048 : sorted_path,
5049 3762 : list_length(root->distinct_pathkeys),
5050 : numDistinctRows));
5051 : }
5052 : }
5053 : }
5054 : }
5055 :
5056 : /*
5057 : * Consider hash-based implementations of DISTINCT, if possible.
5058 : *
5059 : * If we were not able to make any other types of path, we *must* hash or
5060 : * die trying. If we do have other choices, there are two things that
5061 : * should prevent selection of hashing: if the query uses DISTINCT ON
5062 : * (because it won't really have the expected behavior if we hash), or if
5063 : * enable_hashagg is off.
5064 : *
5065 : * Note: grouping_is_hashable() is much more expensive to check than the
5066 : * other gating conditions, so we want to do it last.
5067 : */
5068 2526 : if (distinct_rel->pathlist == NIL)
5069 6 : allow_hash = true; /* we have no alternatives */
5070 2520 : else if (parse->hasDistinctOn || !enable_hashagg)
5071 388 : allow_hash = false; /* policy-based decision not to hash */
5072 : else
5073 2132 : allow_hash = true; /* default */
5074 :
5075 2526 : if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5076 : {
5077 : /* Generate hashed aggregate path --- no sort needed */
5078 2138 : add_path(distinct_rel, (Path *)
5079 2138 : create_agg_path(root,
5080 : distinct_rel,
5081 : cheapest_input_path,
5082 : cheapest_input_path->pathtarget,
5083 : AGG_HASHED,
5084 : AGGSPLIT_SIMPLE,
5085 : root->processed_distinctClause,
5086 : NIL,
5087 : NULL,
5088 : numDistinctRows));
5089 : }
5090 :
5091 2526 : return distinct_rel;
5092 : }
5093 :
5094 : /*
5095 : * get_useful_pathkeys_for_distinct
5096 : * Get useful orderings of pathkeys for distinctClause by reordering
5097 : * 'needed_pathkeys' to match the given 'path_pathkeys' as much as possible.
5098 : *
5099 : * This returns a list of pathkeys that can be useful for DISTINCT or DISTINCT
5100 : * ON clause. For convenience, it always includes the given 'needed_pathkeys'.
5101 : */
5102 : static List *
5103 4044 : get_useful_pathkeys_for_distinct(PlannerInfo *root, List *needed_pathkeys,
5104 : List *path_pathkeys)
5105 : {
5106 4044 : List *useful_pathkeys_list = NIL;
5107 4044 : List *useful_pathkeys = NIL;
5108 :
5109 : /* always include the given 'needed_pathkeys' */
5110 4044 : useful_pathkeys_list = lappend(useful_pathkeys_list,
5111 : needed_pathkeys);
5112 :
5113 4044 : if (!enable_distinct_reordering)
5114 0 : return useful_pathkeys_list;
5115 :
5116 : /*
5117 : * Scan the given 'path_pathkeys' and construct a list of PathKey nodes
5118 : * that match 'needed_pathkeys', but only up to the longest matching
5119 : * prefix.
5120 : *
5121 : * When we have DISTINCT ON, we must ensure that the resulting pathkey
5122 : * list matches initial distinctClause pathkeys; otherwise, it won't have
5123 : * the desired behavior.
5124 : */
5125 9890 : foreach_node(PathKey, pathkey, path_pathkeys)
5126 : {
5127 : /*
5128 : * The PathKey nodes are canonical, so they can be checked for
5129 : * equality by simple pointer comparison.
5130 : */
5131 1830 : if (!list_member_ptr(needed_pathkeys, pathkey))
5132 10 : break;
5133 1820 : if (root->parse->hasDistinctOn &&
5134 202 : !list_member_ptr(root->distinct_pathkeys, pathkey))
5135 18 : break;
5136 :
5137 1802 : useful_pathkeys = lappend(useful_pathkeys, pathkey);
5138 : }
5139 :
5140 : /* If no match at all, no point in reordering needed_pathkeys */
5141 4044 : if (useful_pathkeys == NIL)
5142 2506 : return useful_pathkeys_list;
5143 :
5144 : /*
5145 : * If not full match, the resulting pathkey list is not useful without
5146 : * incremental sort.
5147 : */
5148 1538 : if (list_length(useful_pathkeys) < list_length(needed_pathkeys) &&
5149 848 : !enable_incremental_sort)
5150 60 : return useful_pathkeys_list;
5151 :
5152 : /* Append the remaining PathKey nodes in needed_pathkeys */
5153 1478 : useful_pathkeys = list_concat_unique_ptr(useful_pathkeys,
5154 : needed_pathkeys);
5155 :
5156 : /*
5157 : * If the resulting pathkey list is the same as the 'needed_pathkeys',
5158 : * just drop it.
5159 : */
5160 1478 : if (compare_pathkeys(needed_pathkeys,
5161 : useful_pathkeys) == PATHKEYS_EQUAL)
5162 1000 : return useful_pathkeys_list;
5163 :
5164 478 : useful_pathkeys_list = lappend(useful_pathkeys_list,
5165 : useful_pathkeys);
5166 :
5167 478 : return useful_pathkeys_list;
5168 : }
5169 :
5170 : /*
5171 : * create_ordered_paths
5172 : *
5173 : * Build a new upperrel containing Paths for ORDER BY evaluation.
5174 : *
5175 : * All paths in the result must satisfy the ORDER BY ordering.
5176 : * The only new paths we need consider are an explicit full sort
5177 : * and incremental sort on the cheapest-total existing path.
5178 : *
5179 : * input_rel: contains the source-data Paths
5180 : * target: the output tlist the result Paths must emit
5181 : * limit_tuples: estimated bound on the number of output tuples,
5182 : * or -1 if no LIMIT or couldn't estimate
5183 : *
5184 : * XXX This only looks at sort_pathkeys. I wonder if it needs to look at the
5185 : * other pathkeys (grouping, ...) like generate_useful_gather_paths.
5186 : */
5187 : static RelOptInfo *
5188 63276 : create_ordered_paths(PlannerInfo *root,
5189 : RelOptInfo *input_rel,
5190 : PathTarget *target,
5191 : bool target_parallel_safe,
5192 : double limit_tuples)
5193 : {
5194 63276 : Path *cheapest_input_path = input_rel->cheapest_total_path;
5195 : RelOptInfo *ordered_rel;
5196 : ListCell *lc;
5197 :
5198 : /* For now, do all work in the (ORDERED, NULL) upperrel */
5199 63276 : ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5200 :
5201 : /*
5202 : * If the input relation is not parallel-safe, then the ordered relation
5203 : * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5204 : * target list is parallel-safe.
5205 : */
5206 63276 : if (input_rel->consider_parallel && target_parallel_safe)
5207 42486 : ordered_rel->consider_parallel = true;
5208 :
5209 : /*
5210 : * If the input rel belongs to a single FDW, so does the ordered_rel.
5211 : */
5212 63276 : ordered_rel->serverid = input_rel->serverid;
5213 63276 : ordered_rel->userid = input_rel->userid;
5214 63276 : ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5215 63276 : ordered_rel->fdwroutine = input_rel->fdwroutine;
5216 :
5217 158100 : foreach(lc, input_rel->pathlist)
5218 : {
5219 94824 : Path *input_path = (Path *) lfirst(lc);
5220 : Path *sorted_path;
5221 : bool is_sorted;
5222 : int presorted_keys;
5223 :
5224 94824 : is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5225 : input_path->pathkeys, &presorted_keys);
5226 :
5227 94824 : if (is_sorted)
5228 34114 : sorted_path = input_path;
5229 : else
5230 : {
5231 : /*
5232 : * Try at least sorting the cheapest path and also try
5233 : * incrementally sorting any path which is partially sorted
5234 : * already (no need to deal with paths which have presorted keys
5235 : * when incremental sort is disabled unless it's the cheapest
5236 : * input path).
5237 : */
5238 60710 : if (input_path != cheapest_input_path &&
5239 5518 : (presorted_keys == 0 || !enable_incremental_sort))
5240 1822 : continue;
5241 :
5242 : /*
5243 : * We've no need to consider both a sort and incremental sort.
5244 : * We'll just do a sort if there are no presorted keys and an
5245 : * incremental sort when there are presorted keys.
5246 : */
5247 58888 : if (presorted_keys == 0 || !enable_incremental_sort)
5248 54760 : sorted_path = (Path *) create_sort_path(root,
5249 : ordered_rel,
5250 : input_path,
5251 : root->sort_pathkeys,
5252 : limit_tuples);
5253 : else
5254 4128 : sorted_path = (Path *) create_incremental_sort_path(root,
5255 : ordered_rel,
5256 : input_path,
5257 : root->sort_pathkeys,
5258 : presorted_keys,
5259 : limit_tuples);
5260 : }
5261 :
5262 : /*
5263 : * If the pathtarget of the result path has different expressions from
5264 : * the target to be applied, a projection step is needed.
5265 : */
5266 93002 : if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5267 306 : sorted_path = apply_projection_to_path(root, ordered_rel,
5268 : sorted_path, target);
5269 :
5270 93002 : add_path(ordered_rel, sorted_path);
5271 : }
5272 :
5273 : /*
5274 : * generate_gather_paths() will have already generated a simple Gather
5275 : * path for the best parallel path, if any, and the loop above will have
5276 : * considered sorting it. Similarly, generate_gather_paths() will also
5277 : * have generated order-preserving Gather Merge plans which can be used
5278 : * without sorting if they happen to match the sort_pathkeys, and the loop
5279 : * above will have handled those as well. However, there's one more
5280 : * possibility: it may make sense to sort the cheapest partial path or
5281 : * incrementally sort any partial path that is partially sorted according
5282 : * to the required output order and then use Gather Merge.
5283 : */
5284 63276 : if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5285 42348 : input_rel->partial_pathlist != NIL)
5286 : {
5287 : Path *cheapest_partial_path;
5288 :
5289 2174 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
5290 :
5291 4554 : foreach(lc, input_rel->partial_pathlist)
5292 : {
5293 2380 : Path *input_path = (Path *) lfirst(lc);
5294 : Path *sorted_path;
5295 : bool is_sorted;
5296 : int presorted_keys;
5297 : double total_groups;
5298 :
5299 2380 : is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5300 : input_path->pathkeys,
5301 : &presorted_keys);
5302 :
5303 2380 : if (is_sorted)
5304 182 : continue;
5305 :
5306 : /*
5307 : * Try at least sorting the cheapest path and also try
5308 : * incrementally sorting any path which is partially sorted
5309 : * already (no need to deal with paths which have presorted keys
5310 : * when incremental sort is disabled unless it's the cheapest
5311 : * partial path).
5312 : */
5313 2198 : if (input_path != cheapest_partial_path &&
5314 42 : (presorted_keys == 0 || !enable_incremental_sort))
5315 0 : continue;
5316 :
5317 : /*
5318 : * We've no need to consider both a sort and incremental sort.
5319 : * We'll just do a sort if there are no presorted keys and an
5320 : * incremental sort when there are presorted keys.
5321 : */
5322 2198 : if (presorted_keys == 0 || !enable_incremental_sort)
5323 2138 : sorted_path = (Path *) create_sort_path(root,
5324 : ordered_rel,
5325 : input_path,
5326 : root->sort_pathkeys,
5327 : limit_tuples);
5328 : else
5329 60 : sorted_path = (Path *) create_incremental_sort_path(root,
5330 : ordered_rel,
5331 : input_path,
5332 : root->sort_pathkeys,
5333 : presorted_keys,
5334 : limit_tuples);
5335 2198 : total_groups = compute_gather_rows(sorted_path);
5336 : sorted_path = (Path *)
5337 2198 : create_gather_merge_path(root, ordered_rel,
5338 : sorted_path,
5339 : sorted_path->pathtarget,
5340 : root->sort_pathkeys, NULL,
5341 : &total_groups);
5342 :
5343 : /*
5344 : * If the pathtarget of the result path has different expressions
5345 : * from the target to be applied, a projection step is needed.
5346 : */
5347 2198 : if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5348 6 : sorted_path = apply_projection_to_path(root, ordered_rel,
5349 : sorted_path, target);
5350 :
5351 2198 : add_path(ordered_rel, sorted_path);
5352 : }
5353 : }
5354 :
5355 : /*
5356 : * If there is an FDW that's responsible for all baserels of the query,
5357 : * let it consider adding ForeignPaths.
5358 : */
5359 63276 : if (ordered_rel->fdwroutine &&
5360 380 : ordered_rel->fdwroutine->GetForeignUpperPaths)
5361 366 : ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5362 : input_rel, ordered_rel,
5363 : NULL);
5364 :
5365 : /* Let extensions possibly add some more paths */
5366 63276 : if (create_upper_paths_hook)
5367 0 : (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5368 : input_rel, ordered_rel, NULL);
5369 :
5370 : /*
5371 : * No need to bother with set_cheapest here; grouping_planner does not
5372 : * need us to do it.
5373 : */
5374 : Assert(ordered_rel->pathlist != NIL);
5375 :
5376 63276 : return ordered_rel;
5377 : }
5378 :
5379 :
5380 : /*
5381 : * make_group_input_target
5382 : * Generate appropriate PathTarget for initial input to grouping nodes.
5383 : *
5384 : * If there is grouping or aggregation, the scan/join subplan cannot emit
5385 : * the query's final targetlist; for example, it certainly can't emit any
5386 : * aggregate function calls. This routine generates the correct target
5387 : * for the scan/join subplan.
5388 : *
5389 : * The query target list passed from the parser already contains entries
5390 : * for all ORDER BY and GROUP BY expressions, but it will not have entries
5391 : * for variables used only in HAVING clauses; so we need to add those
5392 : * variables to the subplan target list. Also, we flatten all expressions
5393 : * except GROUP BY items into their component variables; other expressions
5394 : * will be computed by the upper plan nodes rather than by the subplan.
5395 : * For example, given a query like
5396 : * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
5397 : * we want to pass this targetlist to the subplan:
5398 : * a+b,c,d
5399 : * where the a+b target will be used by the Sort/Group steps, and the
5400 : * other targets will be used for computing the final results.
5401 : *
5402 : * 'final_target' is the query's final target list (in PathTarget form)
5403 : *
5404 : * The result is the PathTarget to be computed by the Paths returned from
5405 : * query_planner().
5406 : */
5407 : static PathTarget *
5408 39814 : make_group_input_target(PlannerInfo *root, PathTarget *final_target)
5409 : {
5410 39814 : Query *parse = root->parse;
5411 : PathTarget *input_target;
5412 : List *non_group_cols;
5413 : List *non_group_vars;
5414 : int i;
5415 : ListCell *lc;
5416 :
5417 : /*
5418 : * We must build a target containing all grouping columns, plus any other
5419 : * Vars mentioned in the query's targetlist and HAVING qual.
5420 : */
5421 39814 : input_target = create_empty_pathtarget();
5422 39814 : non_group_cols = NIL;
5423 :
5424 39814 : i = 0;
5425 95714 : foreach(lc, final_target->exprs)
5426 : {
5427 55900 : Expr *expr = (Expr *) lfirst(lc);
5428 55900 : Index sgref = get_pathtarget_sortgroupref(final_target, i);
5429 :
5430 64532 : if (sgref && root->processed_groupClause &&
5431 8632 : get_sortgroupref_clause_noerr(sgref,
5432 : root->processed_groupClause) != NULL)
5433 : {
5434 : /*
5435 : * It's a grouping column, so add it to the input target as-is.
5436 : *
5437 : * Note that the target is logically below the grouping step. So
5438 : * with grouping sets we need to remove the RT index of the
5439 : * grouping step if there is any from the target expression.
5440 : */
5441 6896 : if (parse->hasGroupRTE && parse->groupingSets != NIL)
5442 : {
5443 : Assert(root->group_rtindex > 0);
5444 : expr = (Expr *)
5445 1752 : remove_nulling_relids((Node *) expr,
5446 1752 : bms_make_singleton(root->group_rtindex),
5447 : NULL);
5448 : }
5449 6896 : add_column_to_pathtarget(input_target, expr, sgref);
5450 : }
5451 : else
5452 : {
5453 : /*
5454 : * Non-grouping column, so just remember the expression for later
5455 : * call to pull_var_clause.
5456 : */
5457 49004 : non_group_cols = lappend(non_group_cols, expr);
5458 : }
5459 :
5460 55900 : i++;
5461 : }
5462 :
5463 : /*
5464 : * If there's a HAVING clause, we'll need the Vars it uses, too.
5465 : */
5466 39814 : if (parse->havingQual)
5467 1172 : non_group_cols = lappend(non_group_cols, parse->havingQual);
5468 :
5469 : /*
5470 : * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5471 : * add them to the input target if not already present. (A Var used
5472 : * directly as a GROUP BY item will be present already.) Note this
5473 : * includes Vars used in resjunk items, so we are covering the needs of
5474 : * ORDER BY and window specifications. Vars used within Aggrefs and
5475 : * WindowFuncs will be pulled out here, too.
5476 : *
5477 : * Note that the target is logically below the grouping step. So with
5478 : * grouping sets we need to remove the RT index of the grouping step if
5479 : * there is any from the non-group Vars.
5480 : */
5481 39814 : non_group_vars = pull_var_clause((Node *) non_group_cols,
5482 : PVC_RECURSE_AGGREGATES |
5483 : PVC_RECURSE_WINDOWFUNCS |
5484 : PVC_INCLUDE_PLACEHOLDERS);
5485 39814 : if (parse->hasGroupRTE && parse->groupingSets != NIL)
5486 : {
5487 : Assert(root->group_rtindex > 0);
5488 : non_group_vars = (List *)
5489 806 : remove_nulling_relids((Node *) non_group_vars,
5490 806 : bms_make_singleton(root->group_rtindex),
5491 : NULL);
5492 : }
5493 39814 : add_new_columns_to_pathtarget(input_target, non_group_vars);
5494 :
5495 : /* clean up cruft */
5496 39814 : list_free(non_group_vars);
5497 39814 : list_free(non_group_cols);
5498 :
5499 : /* XXX this causes some redundant cost calculation ... */
5500 39814 : return set_pathtarget_cost_width(root, input_target);
5501 : }
5502 :
5503 : /*
5504 : * make_partial_grouping_target
5505 : * Generate appropriate PathTarget for output of partial aggregate
5506 : * (or partial grouping, if there are no aggregates) nodes.
5507 : *
5508 : * A partial aggregation node needs to emit all the same aggregates that
5509 : * a regular aggregation node would, plus any aggregates used in HAVING;
5510 : * except that the Aggref nodes should be marked as partial aggregates.
5511 : *
5512 : * In addition, we'd better emit any Vars and PlaceHolderVars that are
5513 : * used outside of Aggrefs in the aggregation tlist and HAVING. (Presumably,
5514 : * these would be Vars that are grouped by or used in grouping expressions.)
5515 : *
5516 : * grouping_target is the tlist to be emitted by the topmost aggregation step.
5517 : * havingQual represents the HAVING clause.
5518 : */
5519 : static PathTarget *
5520 2190 : make_partial_grouping_target(PlannerInfo *root,
5521 : PathTarget *grouping_target,
5522 : Node *havingQual)
5523 : {
5524 : PathTarget *partial_target;
5525 : List *non_group_cols;
5526 : List *non_group_exprs;
5527 : int i;
5528 : ListCell *lc;
5529 :
5530 2190 : partial_target = create_empty_pathtarget();
5531 2190 : non_group_cols = NIL;
5532 :
5533 2190 : i = 0;
5534 7798 : foreach(lc, grouping_target->exprs)
5535 : {
5536 5608 : Expr *expr = (Expr *) lfirst(lc);
5537 5608 : Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5538 :
5539 9422 : if (sgref && root->processed_groupClause &&
5540 3814 : get_sortgroupref_clause_noerr(sgref,
5541 : root->processed_groupClause) != NULL)
5542 : {
5543 : /*
5544 : * It's a grouping column, so add it to the partial_target as-is.
5545 : * (This allows the upper agg step to repeat the grouping calcs.)
5546 : */
5547 1900 : add_column_to_pathtarget(partial_target, expr, sgref);
5548 : }
5549 : else
5550 : {
5551 : /*
5552 : * Non-grouping column, so just remember the expression for later
5553 : * call to pull_var_clause.
5554 : */
5555 3708 : non_group_cols = lappend(non_group_cols, expr);
5556 : }
5557 :
5558 5608 : i++;
5559 : }
5560 :
5561 : /*
5562 : * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5563 : */
5564 2190 : if (havingQual)
5565 824 : non_group_cols = lappend(non_group_cols, havingQual);
5566 :
5567 : /*
5568 : * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5569 : * non-group cols (plus HAVING), and add them to the partial_target if not
5570 : * already present. (An expression used directly as a GROUP BY item will
5571 : * be present already.) Note this includes Vars used in resjunk items, so
5572 : * we are covering the needs of ORDER BY and window specifications.
5573 : */
5574 2190 : non_group_exprs = pull_var_clause((Node *) non_group_cols,
5575 : PVC_INCLUDE_AGGREGATES |
5576 : PVC_RECURSE_WINDOWFUNCS |
5577 : PVC_INCLUDE_PLACEHOLDERS);
5578 :
5579 2190 : add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5580 :
5581 : /*
5582 : * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5583 : * are at the top level of the target list, so we can just scan the list
5584 : * rather than recursing through the expression trees.
5585 : */
5586 8370 : foreach(lc, partial_target->exprs)
5587 : {
5588 6180 : Aggref *aggref = (Aggref *) lfirst(lc);
5589 :
5590 6180 : if (IsA(aggref, Aggref))
5591 : {
5592 : Aggref *newaggref;
5593 :
5594 : /*
5595 : * We shouldn't need to copy the substructure of the Aggref node,
5596 : * but flat-copy the node itself to avoid damaging other trees.
5597 : */
5598 4250 : newaggref = makeNode(Aggref);
5599 4250 : memcpy(newaggref, aggref, sizeof(Aggref));
5600 :
5601 : /* For now, assume serialization is required */
5602 4250 : mark_partial_aggref(newaggref, AGGSPLIT_INITIAL_SERIAL);
5603 :
5604 4250 : lfirst(lc) = newaggref;
5605 : }
5606 : }
5607 :
5608 : /* clean up cruft */
5609 2190 : list_free(non_group_exprs);
5610 2190 : list_free(non_group_cols);
5611 :
5612 : /* XXX this causes some redundant cost calculation ... */
5613 2190 : return set_pathtarget_cost_width(root, partial_target);
5614 : }
5615 :
5616 : /*
5617 : * mark_partial_aggref
5618 : * Adjust an Aggref to make it represent a partial-aggregation step.
5619 : *
5620 : * The Aggref node is modified in-place; caller must do any copying required.
5621 : */
5622 : void
5623 7066 : mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
5624 : {
5625 : /* aggtranstype should be computed by this point */
5626 : Assert(OidIsValid(agg->aggtranstype));
5627 : /* ... but aggsplit should still be as the parser left it */
5628 : Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5629 :
5630 : /* Mark the Aggref with the intended partial-aggregation mode */
5631 7066 : agg->aggsplit = aggsplit;
5632 :
5633 : /*
5634 : * Adjust result type if needed. Normally, a partial aggregate returns
5635 : * the aggregate's transition type; but if that's INTERNAL and we're
5636 : * serializing, it returns BYTEA instead.
5637 : */
5638 7066 : if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5639 : {
5640 5658 : if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5641 242 : agg->aggtype = BYTEAOID;
5642 : else
5643 5416 : agg->aggtype = agg->aggtranstype;
5644 : }
5645 7066 : }
5646 :
5647 : /*
5648 : * postprocess_setop_tlist
5649 : * Fix up targetlist returned by plan_set_operations().
5650 : *
5651 : * We need to transpose sort key info from the orig_tlist into new_tlist.
5652 : * NOTE: this would not be good enough if we supported resjunk sort keys
5653 : * for results of set operations --- then, we'd need to project a whole
5654 : * new tlist to evaluate the resjunk columns. For now, just ereport if we
5655 : * find any resjunk columns in orig_tlist.
5656 : */
5657 : static List *
5658 5496 : postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
5659 : {
5660 : ListCell *l;
5661 5496 : ListCell *orig_tlist_item = list_head(orig_tlist);
5662 :
5663 22106 : foreach(l, new_tlist)
5664 : {
5665 16610 : TargetEntry *new_tle = lfirst_node(TargetEntry, l);
5666 : TargetEntry *orig_tle;
5667 :
5668 : /* ignore resjunk columns in setop result */
5669 16610 : if (new_tle->resjunk)
5670 0 : continue;
5671 :
5672 : Assert(orig_tlist_item != NULL);
5673 16610 : orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
5674 16610 : orig_tlist_item = lnext(orig_tlist, orig_tlist_item);
5675 16610 : if (orig_tle->resjunk) /* should not happen */
5676 0 : elog(ERROR, "resjunk output columns are not implemented");
5677 : Assert(new_tle->resno == orig_tle->resno);
5678 16610 : new_tle->ressortgroupref = orig_tle->ressortgroupref;
5679 : }
5680 5496 : if (orig_tlist_item != NULL)
5681 0 : elog(ERROR, "resjunk output columns are not implemented");
5682 5496 : return new_tlist;
5683 : }
5684 :
5685 : /*
5686 : * optimize_window_clauses
5687 : * Call each WindowFunc's prosupport function to see if we're able to
5688 : * make any adjustments to any of the WindowClause's so that the executor
5689 : * can execute the window functions in a more optimal way.
5690 : *
5691 : * Currently we only allow adjustments to the WindowClause's frameOptions. We
5692 : * may allow more things to be done here in the future.
5693 : */
5694 : static void
5695 2336 : optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
5696 : {
5697 2336 : List *windowClause = root->parse->windowClause;
5698 : ListCell *lc;
5699 :
5700 4882 : foreach(lc, windowClause)
5701 : {
5702 2546 : WindowClause *wc = lfirst_node(WindowClause, lc);
5703 : ListCell *lc2;
5704 2546 : int optimizedFrameOptions = 0;
5705 :
5706 : Assert(wc->winref <= wflists->maxWinRef);
5707 :
5708 : /* skip any WindowClauses that have no WindowFuncs */
5709 2546 : if (wflists->windowFuncs[wc->winref] == NIL)
5710 24 : continue;
5711 :
5712 3062 : foreach(lc2, wflists->windowFuncs[wc->winref])
5713 : {
5714 : SupportRequestOptimizeWindowClause req;
5715 : SupportRequestOptimizeWindowClause *res;
5716 2564 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
5717 : Oid prosupport;
5718 :
5719 2564 : prosupport = get_func_support(wfunc->winfnoid);
5720 :
5721 : /* Check if there's a support function for 'wfunc' */
5722 2564 : if (!OidIsValid(prosupport))
5723 2024 : break; /* can't optimize this WindowClause */
5724 :
5725 760 : req.type = T_SupportRequestOptimizeWindowClause;
5726 760 : req.window_clause = wc;
5727 760 : req.window_func = wfunc;
5728 760 : req.frameOptions = wc->frameOptions;
5729 :
5730 : /* call the support function */
5731 : res = (SupportRequestOptimizeWindowClause *)
5732 760 : DatumGetPointer(OidFunctionCall1(prosupport,
5733 : PointerGetDatum(&req)));
5734 :
5735 : /*
5736 : * Skip to next WindowClause if the support function does not
5737 : * support this request type.
5738 : */
5739 760 : if (res == NULL)
5740 220 : break;
5741 :
5742 : /*
5743 : * Save these frameOptions for the first WindowFunc for this
5744 : * WindowClause.
5745 : */
5746 540 : if (foreach_current_index(lc2) == 0)
5747 516 : optimizedFrameOptions = res->frameOptions;
5748 :
5749 : /*
5750 : * On subsequent WindowFuncs, if the frameOptions are not the same
5751 : * then we're unable to optimize the frameOptions for this
5752 : * WindowClause.
5753 : */
5754 24 : else if (optimizedFrameOptions != res->frameOptions)
5755 0 : break; /* skip to the next WindowClause, if any */
5756 : }
5757 :
5758 : /* adjust the frameOptions if all WindowFunc's agree that it's ok */
5759 2522 : if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5760 : {
5761 : ListCell *lc3;
5762 :
5763 : /* apply the new frame options */
5764 498 : wc->frameOptions = optimizedFrameOptions;
5765 :
5766 : /*
5767 : * We now check to see if changing the frameOptions has caused
5768 : * this WindowClause to be a duplicate of some other WindowClause.
5769 : * This can only happen if we have multiple WindowClauses, so
5770 : * don't bother if there's only 1.
5771 : */
5772 498 : if (list_length(windowClause) == 1)
5773 408 : continue;
5774 :
5775 : /*
5776 : * Do the duplicate check and reuse the existing WindowClause if
5777 : * we find a duplicate.
5778 : */
5779 228 : foreach(lc3, windowClause)
5780 : {
5781 174 : WindowClause *existing_wc = lfirst_node(WindowClause, lc3);
5782 :
5783 : /* skip over the WindowClause we're currently editing */
5784 174 : if (existing_wc == wc)
5785 54 : continue;
5786 :
5787 : /*
5788 : * Perform the same duplicate check that is done in
5789 : * transformWindowFuncCall.
5790 : */
5791 240 : if (equal(wc->partitionClause, existing_wc->partitionClause) &&
5792 120 : equal(wc->orderClause, existing_wc->orderClause) &&
5793 120 : wc->frameOptions == existing_wc->frameOptions &&
5794 72 : equal(wc->startOffset, existing_wc->startOffset) &&
5795 36 : equal(wc->endOffset, existing_wc->endOffset))
5796 : {
5797 : ListCell *lc4;
5798 :
5799 : /*
5800 : * Now move each WindowFunc in 'wc' into 'existing_wc'.
5801 : * This required adjusting each WindowFunc's winref and
5802 : * moving the WindowFuncs in 'wc' to the list of
5803 : * WindowFuncs in 'existing_wc'.
5804 : */
5805 78 : foreach(lc4, wflists->windowFuncs[wc->winref])
5806 : {
5807 42 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc4);
5808 :
5809 42 : wfunc->winref = existing_wc->winref;
5810 : }
5811 :
5812 : /* move list items */
5813 72 : wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
5814 36 : wflists->windowFuncs[wc->winref]);
5815 36 : wflists->windowFuncs[wc->winref] = NIL;
5816 :
5817 : /*
5818 : * transformWindowFuncCall() should have made sure there
5819 : * are no other duplicates, so we needn't bother looking
5820 : * any further.
5821 : */
5822 36 : break;
5823 : }
5824 : }
5825 : }
5826 : }
5827 2336 : }
5828 :
5829 : /*
5830 : * select_active_windows
5831 : * Create a list of the "active" window clauses (ie, those referenced
5832 : * by non-deleted WindowFuncs) in the order they are to be executed.
5833 : */
5834 : static List *
5835 2336 : select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
5836 : {
5837 2336 : List *windowClause = root->parse->windowClause;
5838 2336 : List *result = NIL;
5839 : ListCell *lc;
5840 2336 : int nActive = 0;
5841 2336 : WindowClauseSortData *actives = palloc(sizeof(WindowClauseSortData)
5842 2336 : * list_length(windowClause));
5843 :
5844 : /* First, construct an array of the active windows */
5845 4882 : foreach(lc, windowClause)
5846 : {
5847 2546 : WindowClause *wc = lfirst_node(WindowClause, lc);
5848 :
5849 : /* It's only active if wflists shows some related WindowFuncs */
5850 : Assert(wc->winref <= wflists->maxWinRef);
5851 2546 : if (wflists->windowFuncs[wc->winref] == NIL)
5852 60 : continue;
5853 :
5854 2486 : actives[nActive].wc = wc; /* original clause */
5855 :
5856 : /*
5857 : * For sorting, we want the list of partition keys followed by the
5858 : * list of sort keys. But pathkeys construction will remove duplicates
5859 : * between the two, so we can as well (even though we can't detect all
5860 : * of the duplicates, since some may come from ECs - that might mean
5861 : * we miss optimization chances here). We must, however, ensure that
5862 : * the order of entries is preserved with respect to the ones we do
5863 : * keep.
5864 : *
5865 : * partitionClause and orderClause had their own duplicates removed in
5866 : * parse analysis, so we're only concerned here with removing
5867 : * orderClause entries that also appear in partitionClause.
5868 : */
5869 4972 : actives[nActive].uniqueOrder =
5870 2486 : list_concat_unique(list_copy(wc->partitionClause),
5871 2486 : wc->orderClause);
5872 2486 : nActive++;
5873 : }
5874 :
5875 : /*
5876 : * Sort active windows by their partitioning/ordering clauses, ignoring
5877 : * any framing clauses, so that the windows that need the same sorting are
5878 : * adjacent in the list. When we come to generate paths, this will avoid
5879 : * inserting additional Sort nodes.
5880 : *
5881 : * This is how we implement a specific requirement from the SQL standard,
5882 : * which says that when two or more windows are order-equivalent (i.e.
5883 : * have matching partition and order clauses, even if their names or
5884 : * framing clauses differ), then all peer rows must be presented in the
5885 : * same order in all of them. If we allowed multiple sort nodes for such
5886 : * cases, we'd risk having the peer rows end up in different orders in
5887 : * equivalent windows due to sort instability. (See General Rule 4 of
5888 : * <window clause> in SQL2008 - SQL2016.)
5889 : *
5890 : * Additionally, if the entire list of clauses of one window is a prefix
5891 : * of another, put first the window with stronger sorting requirements.
5892 : * This way we will first sort for stronger window, and won't have to sort
5893 : * again for the weaker one.
5894 : */
5895 2336 : qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp);
5896 :
5897 : /* build ordered list of the original WindowClause nodes */
5898 4822 : for (int i = 0; i < nActive; i++)
5899 2486 : result = lappend(result, actives[i].wc);
5900 :
5901 2336 : pfree(actives);
5902 :
5903 2336 : return result;
5904 : }
5905 :
5906 : /*
5907 : * common_prefix_cmp
5908 : * QSort comparison function for WindowClauseSortData
5909 : *
5910 : * Sort the windows by the required sorting clauses. First, compare the sort
5911 : * clauses themselves. Second, if one window's clauses are a prefix of another
5912 : * one's clauses, put the window with more sort clauses first.
5913 : *
5914 : * We purposefully sort by the highest tleSortGroupRef first. Since
5915 : * tleSortGroupRefs are assigned for the query's DISTINCT and ORDER BY first
5916 : * and because here we sort the lowest tleSortGroupRefs last, if a
5917 : * WindowClause is sharing a tleSortGroupRef with the query's DISTINCT or
5918 : * ORDER BY clause, this makes it more likely that the final WindowAgg will
5919 : * provide presorted input for the query's DISTINCT or ORDER BY clause, thus
5920 : * reducing the total number of sorts required for the query.
5921 : */
5922 : static int
5923 162 : common_prefix_cmp(const void *a, const void *b)
5924 : {
5925 162 : const WindowClauseSortData *wcsa = a;
5926 162 : const WindowClauseSortData *wcsb = b;
5927 : ListCell *item_a;
5928 : ListCell *item_b;
5929 :
5930 276 : forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
5931 : {
5932 216 : SortGroupClause *sca = lfirst_node(SortGroupClause, item_a);
5933 216 : SortGroupClause *scb = lfirst_node(SortGroupClause, item_b);
5934 :
5935 216 : if (sca->tleSortGroupRef > scb->tleSortGroupRef)
5936 102 : return -1;
5937 204 : else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
5938 66 : return 1;
5939 138 : else if (sca->sortop > scb->sortop)
5940 0 : return -1;
5941 138 : else if (sca->sortop < scb->sortop)
5942 24 : return 1;
5943 114 : else if (sca->nulls_first && !scb->nulls_first)
5944 0 : return -1;
5945 114 : else if (!sca->nulls_first && scb->nulls_first)
5946 0 : return 1;
5947 : /* no need to compare eqop, since it is fully determined by sortop */
5948 : }
5949 :
5950 60 : if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
5951 6 : return -1;
5952 54 : else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
5953 12 : return 1;
5954 :
5955 42 : return 0;
5956 : }
5957 :
5958 : /*
5959 : * make_window_input_target
5960 : * Generate appropriate PathTarget for initial input to WindowAgg nodes.
5961 : *
5962 : * When the query has window functions, this function computes the desired
5963 : * target to be computed by the node just below the first WindowAgg.
5964 : * This tlist must contain all values needed to evaluate the window functions,
5965 : * compute the final target list, and perform any required final sort step.
5966 : * If multiple WindowAggs are needed, each intermediate one adds its window
5967 : * function results onto this base tlist; only the topmost WindowAgg computes
5968 : * the actual desired target list.
5969 : *
5970 : * This function is much like make_group_input_target, though not quite enough
5971 : * like it to share code. As in that function, we flatten most expressions
5972 : * into their component variables. But we do not want to flatten window
5973 : * PARTITION BY/ORDER BY clauses, since that might result in multiple
5974 : * evaluations of them, which would be bad (possibly even resulting in
5975 : * inconsistent answers, if they contain volatile functions).
5976 : * Also, we must not flatten GROUP BY clauses that were left unflattened by
5977 : * make_group_input_target, because we may no longer have access to the
5978 : * individual Vars in them.
5979 : *
5980 : * Another key difference from make_group_input_target is that we don't
5981 : * flatten Aggref expressions, since those are to be computed below the
5982 : * window functions and just referenced like Vars above that.
5983 : *
5984 : * 'final_target' is the query's final target list (in PathTarget form)
5985 : * 'activeWindows' is the list of active windows previously identified by
5986 : * select_active_windows.
5987 : *
5988 : * The result is the PathTarget to be computed by the plan node immediately
5989 : * below the first WindowAgg node.
5990 : */
5991 : static PathTarget *
5992 2336 : make_window_input_target(PlannerInfo *root,
5993 : PathTarget *final_target,
5994 : List *activeWindows)
5995 : {
5996 : PathTarget *input_target;
5997 : Bitmapset *sgrefs;
5998 : List *flattenable_cols;
5999 : List *flattenable_vars;
6000 : int i;
6001 : ListCell *lc;
6002 :
6003 : Assert(root->parse->hasWindowFuncs);
6004 :
6005 : /*
6006 : * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
6007 : * into a bitmapset for convenient reference below.
6008 : */
6009 2336 : sgrefs = NULL;
6010 4822 : foreach(lc, activeWindows)
6011 : {
6012 2486 : WindowClause *wc = lfirst_node(WindowClause, lc);
6013 : ListCell *lc2;
6014 :
6015 3198 : foreach(lc2, wc->partitionClause)
6016 : {
6017 712 : SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
6018 :
6019 712 : sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6020 : }
6021 4630 : foreach(lc2, wc->orderClause)
6022 : {
6023 2144 : SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
6024 :
6025 2144 : sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6026 : }
6027 : }
6028 :
6029 : /* Add in sortgroupref numbers of GROUP BY clauses, too */
6030 2522 : foreach(lc, root->processed_groupClause)
6031 : {
6032 186 : SortGroupClause *grpcl = lfirst_node(SortGroupClause, lc);
6033 :
6034 186 : sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
6035 : }
6036 :
6037 : /*
6038 : * Construct a target containing all the non-flattenable targetlist items,
6039 : * and save aside the others for a moment.
6040 : */
6041 2336 : input_target = create_empty_pathtarget();
6042 2336 : flattenable_cols = NIL;
6043 :
6044 2336 : i = 0;
6045 10114 : foreach(lc, final_target->exprs)
6046 : {
6047 7778 : Expr *expr = (Expr *) lfirst(lc);
6048 7778 : Index sgref = get_pathtarget_sortgroupref(final_target, i);
6049 :
6050 : /*
6051 : * Don't want to deconstruct window clauses or GROUP BY items. (Note
6052 : * that such items can't contain window functions, so it's okay to
6053 : * compute them below the WindowAgg nodes.)
6054 : */
6055 7778 : if (sgref != 0 && bms_is_member(sgref, sgrefs))
6056 : {
6057 : /*
6058 : * Don't want to deconstruct this value, so add it to the input
6059 : * target as-is.
6060 : */
6061 2746 : add_column_to_pathtarget(input_target, expr, sgref);
6062 : }
6063 : else
6064 : {
6065 : /*
6066 : * Column is to be flattened, so just remember the expression for
6067 : * later call to pull_var_clause.
6068 : */
6069 5032 : flattenable_cols = lappend(flattenable_cols, expr);
6070 : }
6071 :
6072 7778 : i++;
6073 : }
6074 :
6075 : /*
6076 : * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
6077 : * add them to the input target if not already present. (Some might be
6078 : * there already because they're used directly as window/group clauses.)
6079 : *
6080 : * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
6081 : * Aggrefs are placed in the Agg node's tlist and not left to be computed
6082 : * at higher levels. On the other hand, we should recurse into
6083 : * WindowFuncs to make sure their input expressions are available.
6084 : */
6085 2336 : flattenable_vars = pull_var_clause((Node *) flattenable_cols,
6086 : PVC_INCLUDE_AGGREGATES |
6087 : PVC_RECURSE_WINDOWFUNCS |
6088 : PVC_INCLUDE_PLACEHOLDERS);
6089 2336 : add_new_columns_to_pathtarget(input_target, flattenable_vars);
6090 :
6091 : /* clean up cruft */
6092 2336 : list_free(flattenable_vars);
6093 2336 : list_free(flattenable_cols);
6094 :
6095 : /* XXX this causes some redundant cost calculation ... */
6096 2336 : return set_pathtarget_cost_width(root, input_target);
6097 : }
6098 :
6099 : /*
6100 : * make_pathkeys_for_window
6101 : * Create a pathkeys list describing the required input ordering
6102 : * for the given WindowClause.
6103 : *
6104 : * Modifies wc's partitionClause to remove any clauses which are deemed
6105 : * redundant by the pathkey logic.
6106 : *
6107 : * The required ordering is first the PARTITION keys, then the ORDER keys.
6108 : * In the future we might try to implement windowing using hashing, in which
6109 : * case the ordering could be relaxed, but for now we always sort.
6110 : */
6111 : static List *
6112 5020 : make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
6113 : List *tlist)
6114 : {
6115 5020 : List *window_pathkeys = NIL;
6116 :
6117 : /* Throw error if can't sort */
6118 5020 : if (!grouping_is_sortable(wc->partitionClause))
6119 0 : ereport(ERROR,
6120 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6121 : errmsg("could not implement window PARTITION BY"),
6122 : errdetail("Window partitioning columns must be of sortable datatypes.")));
6123 5020 : if (!grouping_is_sortable(wc->orderClause))
6124 0 : ereport(ERROR,
6125 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6126 : errmsg("could not implement window ORDER BY"),
6127 : errdetail("Window ordering columns must be of sortable datatypes.")));
6128 :
6129 : /*
6130 : * First fetch the pathkeys for the PARTITION BY clause. We can safely
6131 : * remove any clauses from the wc->partitionClause for redundant pathkeys.
6132 : */
6133 5020 : if (wc->partitionClause != NIL)
6134 : {
6135 : bool sortable;
6136 :
6137 1198 : window_pathkeys = make_pathkeys_for_sortclauses_extended(root,
6138 : &wc->partitionClause,
6139 : tlist,
6140 : true,
6141 : false,
6142 : &sortable,
6143 : false);
6144 :
6145 : Assert(sortable);
6146 : }
6147 :
6148 : /*
6149 : * In principle, we could also consider removing redundant ORDER BY items
6150 : * too as doing so does not alter the result of peer row checks done by
6151 : * the executor. However, we must *not* remove the ordering column for
6152 : * RANGE OFFSET cases, as the executor needs that for in_range tests even
6153 : * if it's known to be equal to some partitioning column.
6154 : */
6155 5020 : if (wc->orderClause != NIL)
6156 : {
6157 : List *orderby_pathkeys;
6158 :
6159 4210 : orderby_pathkeys = make_pathkeys_for_sortclauses(root,
6160 : wc->orderClause,
6161 : tlist);
6162 :
6163 : /* Okay, make the combined pathkeys */
6164 4210 : if (window_pathkeys != NIL)
6165 880 : window_pathkeys = append_pathkeys(window_pathkeys, orderby_pathkeys);
6166 : else
6167 3330 : window_pathkeys = orderby_pathkeys;
6168 : }
6169 :
6170 5020 : return window_pathkeys;
6171 : }
6172 :
6173 : /*
6174 : * make_sort_input_target
6175 : * Generate appropriate PathTarget for initial input to Sort step.
6176 : *
6177 : * If the query has ORDER BY, this function chooses the target to be computed
6178 : * by the node just below the Sort (and DISTINCT, if any, since Unique can't
6179 : * project) steps. This might or might not be identical to the query's final
6180 : * output target.
6181 : *
6182 : * The main argument for keeping the sort-input tlist the same as the final
6183 : * is that we avoid a separate projection node (which will be needed if
6184 : * they're different, because Sort can't project). However, there are also
6185 : * advantages to postponing tlist evaluation till after the Sort: it ensures
6186 : * a consistent order of evaluation for any volatile functions in the tlist,
6187 : * and if there's also a LIMIT, we can stop the query without ever computing
6188 : * tlist functions for later rows, which is beneficial for both volatile and
6189 : * expensive functions.
6190 : *
6191 : * Our current policy is to postpone volatile expressions till after the sort
6192 : * unconditionally (assuming that that's possible, ie they are in plain tlist
6193 : * columns and not ORDER BY/GROUP BY/DISTINCT columns). We also prefer to
6194 : * postpone set-returning expressions, because running them beforehand would
6195 : * bloat the sort dataset, and because it might cause unexpected output order
6196 : * if the sort isn't stable. However there's a constraint on that: all SRFs
6197 : * in the tlist should be evaluated at the same plan step, so that they can
6198 : * run in sync in nodeProjectSet. So if any SRFs are in sort columns, we
6199 : * mustn't postpone any SRFs. (Note that in principle that policy should
6200 : * probably get applied to the group/window input targetlists too, but we
6201 : * have not done that historically.) Lastly, expensive expressions are
6202 : * postponed if there is a LIMIT, or if root->tuple_fraction shows that
6203 : * partial evaluation of the query is possible (if neither is true, we expect
6204 : * to have to evaluate the expressions for every row anyway), or if there are
6205 : * any volatile or set-returning expressions (since once we've put in a
6206 : * projection at all, it won't cost any more to postpone more stuff).
6207 : *
6208 : * Another issue that could potentially be considered here is that
6209 : * evaluating tlist expressions could result in data that's either wider
6210 : * or narrower than the input Vars, thus changing the volume of data that
6211 : * has to go through the Sort. However, we usually have only a very bad
6212 : * idea of the output width of any expression more complex than a Var,
6213 : * so for now it seems too risky to try to optimize on that basis.
6214 : *
6215 : * Note that if we do produce a modified sort-input target, and then the
6216 : * query ends up not using an explicit Sort, no particular harm is done:
6217 : * we'll initially use the modified target for the preceding path nodes,
6218 : * but then change them to the final target with apply_projection_to_path.
6219 : * Moreover, in such a case the guarantees about evaluation order of
6220 : * volatile functions still hold, since the rows are sorted already.
6221 : *
6222 : * This function has some things in common with make_group_input_target and
6223 : * make_window_input_target, though the detailed rules for what to do are
6224 : * different. We never flatten/postpone any grouping or ordering columns;
6225 : * those are needed before the sort. If we do flatten a particular
6226 : * expression, we leave Aggref and WindowFunc nodes alone, since those were
6227 : * computed earlier.
6228 : *
6229 : * 'final_target' is the query's final target list (in PathTarget form)
6230 : * 'have_postponed_srfs' is an output argument, see below
6231 : *
6232 : * The result is the PathTarget to be computed by the plan node immediately
6233 : * below the Sort step (and the Distinct step, if any). This will be
6234 : * exactly final_target if we decide a projection step wouldn't be helpful.
6235 : *
6236 : * In addition, *have_postponed_srfs is set to true if we choose to postpone
6237 : * any set-returning functions to after the Sort.
6238 : */
6239 : static PathTarget *
6240 59726 : make_sort_input_target(PlannerInfo *root,
6241 : PathTarget *final_target,
6242 : bool *have_postponed_srfs)
6243 : {
6244 59726 : Query *parse = root->parse;
6245 : PathTarget *input_target;
6246 : int ncols;
6247 : bool *col_is_srf;
6248 : bool *postpone_col;
6249 : bool have_srf;
6250 : bool have_volatile;
6251 : bool have_expensive;
6252 : bool have_srf_sortcols;
6253 : bool postpone_srfs;
6254 : List *postponable_cols;
6255 : List *postponable_vars;
6256 : int i;
6257 : ListCell *lc;
6258 :
6259 : /* Shouldn't get here unless query has ORDER BY */
6260 : Assert(parse->sortClause);
6261 :
6262 59726 : *have_postponed_srfs = false; /* default result */
6263 :
6264 : /* Inspect tlist and collect per-column information */
6265 59726 : ncols = list_length(final_target->exprs);
6266 59726 : col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6267 59726 : postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6268 59726 : have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
6269 :
6270 59726 : i = 0;
6271 357830 : foreach(lc, final_target->exprs)
6272 : {
6273 298104 : Expr *expr = (Expr *) lfirst(lc);
6274 :
6275 : /*
6276 : * If the column has a sortgroupref, assume it has to be evaluated
6277 : * before sorting. Generally such columns would be ORDER BY, GROUP
6278 : * BY, etc targets. One exception is columns that were removed from
6279 : * GROUP BY by remove_useless_groupby_columns() ... but those would
6280 : * only be Vars anyway. There don't seem to be any cases where it
6281 : * would be worth the trouble to double-check.
6282 : */
6283 298104 : if (get_pathtarget_sortgroupref(final_target, i) == 0)
6284 : {
6285 : /*
6286 : * Check for SRF or volatile functions. Check the SRF case first
6287 : * because we must know whether we have any postponed SRFs.
6288 : */
6289 212666 : if (parse->hasTargetSRFs &&
6290 216 : expression_returns_set((Node *) expr))
6291 : {
6292 : /* We'll decide below whether these are postponable */
6293 96 : col_is_srf[i] = true;
6294 96 : have_srf = true;
6295 : }
6296 212354 : else if (contain_volatile_functions((Node *) expr))
6297 : {
6298 : /* Unconditionally postpone */
6299 160 : postpone_col[i] = true;
6300 160 : have_volatile = true;
6301 : }
6302 : else
6303 : {
6304 : /*
6305 : * Else check the cost. XXX it's annoying to have to do this
6306 : * when set_pathtarget_cost_width() just did it. Refactor to
6307 : * allow sharing the work?
6308 : */
6309 : QualCost cost;
6310 :
6311 212194 : cost_qual_eval_node(&cost, (Node *) expr, root);
6312 :
6313 : /*
6314 : * We arbitrarily define "expensive" as "more than 10X
6315 : * cpu_operator_cost". Note this will take in any PL function
6316 : * with default cost.
6317 : */
6318 212194 : if (cost.per_tuple > 10 * cpu_operator_cost)
6319 : {
6320 15008 : postpone_col[i] = true;
6321 15008 : have_expensive = true;
6322 : }
6323 : }
6324 : }
6325 : else
6326 : {
6327 : /* For sortgroupref cols, just check if any contain SRFs */
6328 85654 : if (!have_srf_sortcols &&
6329 85964 : parse->hasTargetSRFs &&
6330 310 : expression_returns_set((Node *) expr))
6331 124 : have_srf_sortcols = true;
6332 : }
6333 :
6334 298104 : i++;
6335 : }
6336 :
6337 : /*
6338 : * We can postpone SRFs if we have some but none are in sortgroupref cols.
6339 : */
6340 59726 : postpone_srfs = (have_srf && !have_srf_sortcols);
6341 :
6342 : /*
6343 : * If we don't need a post-sort projection, just return final_target.
6344 : */
6345 59726 : if (!(postpone_srfs || have_volatile ||
6346 59510 : (have_expensive &&
6347 9012 : (parse->limitCount || root->tuple_fraction > 0))))
6348 59474 : return final_target;
6349 :
6350 : /*
6351 : * Report whether the post-sort projection will contain set-returning
6352 : * functions. This is important because it affects whether the Sort can
6353 : * rely on the query's LIMIT (if any) to bound the number of rows it needs
6354 : * to return.
6355 : */
6356 252 : *have_postponed_srfs = postpone_srfs;
6357 :
6358 : /*
6359 : * Construct the sort-input target, taking all non-postponable columns and
6360 : * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6361 : * the postponable ones.
6362 : */
6363 252 : input_target = create_empty_pathtarget();
6364 252 : postponable_cols = NIL;
6365 :
6366 252 : i = 0;
6367 2044 : foreach(lc, final_target->exprs)
6368 : {
6369 1792 : Expr *expr = (Expr *) lfirst(lc);
6370 :
6371 1792 : if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
6372 310 : postponable_cols = lappend(postponable_cols, expr);
6373 : else
6374 1482 : add_column_to_pathtarget(input_target, expr,
6375 1482 : get_pathtarget_sortgroupref(final_target, i));
6376 :
6377 1792 : i++;
6378 : }
6379 :
6380 : /*
6381 : * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6382 : * postponable columns, and add them to the sort-input target if not
6383 : * already present. (Some might be there already.) We mustn't
6384 : * deconstruct Aggrefs or WindowFuncs here, since the projection node
6385 : * would be unable to recompute them.
6386 : */
6387 252 : postponable_vars = pull_var_clause((Node *) postponable_cols,
6388 : PVC_INCLUDE_AGGREGATES |
6389 : PVC_INCLUDE_WINDOWFUNCS |
6390 : PVC_INCLUDE_PLACEHOLDERS);
6391 252 : add_new_columns_to_pathtarget(input_target, postponable_vars);
6392 :
6393 : /* clean up cruft */
6394 252 : list_free(postponable_vars);
6395 252 : list_free(postponable_cols);
6396 :
6397 : /* XXX this represents even more redundant cost calculation ... */
6398 252 : return set_pathtarget_cost_width(root, input_target);
6399 : }
6400 :
6401 : /*
6402 : * get_cheapest_fractional_path
6403 : * Find the cheapest path for retrieving a specified fraction of all
6404 : * the tuples expected to be returned by the given relation.
6405 : *
6406 : * We interpret tuple_fraction the same way as grouping_planner.
6407 : *
6408 : * We assume set_cheapest() has been run on the given rel.
6409 : */
6410 : Path *
6411 481694 : get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
6412 : {
6413 481694 : Path *best_path = rel->cheapest_total_path;
6414 : ListCell *l;
6415 :
6416 : /* If all tuples will be retrieved, just return the cheapest-total path */
6417 481694 : if (tuple_fraction <= 0.0)
6418 476200 : return best_path;
6419 :
6420 : /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6421 5494 : if (tuple_fraction >= 1.0 && best_path->rows > 0)
6422 2082 : tuple_fraction /= best_path->rows;
6423 :
6424 13170 : foreach(l, rel->pathlist)
6425 : {
6426 7676 : Path *path = (Path *) lfirst(l);
6427 :
6428 9858 : if (path == rel->cheapest_total_path ||
6429 2182 : compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6430 7444 : continue;
6431 :
6432 232 : best_path = path;
6433 : }
6434 :
6435 5494 : return best_path;
6436 : }
6437 :
6438 : /*
6439 : * adjust_paths_for_srfs
6440 : * Fix up the Paths of the given upperrel to handle tSRFs properly.
6441 : *
6442 : * The executor can only handle set-returning functions that appear at the
6443 : * top level of the targetlist of a ProjectSet plan node. If we have any SRFs
6444 : * that are not at top level, we need to split up the evaluation into multiple
6445 : * plan levels in which each level satisfies this constraint. This function
6446 : * modifies each Path of an upperrel that (might) compute any SRFs in its
6447 : * output tlist to insert appropriate projection steps.
6448 : *
6449 : * The given targets and targets_contain_srfs lists are from
6450 : * split_pathtarget_at_srfs(). We assume the existing Paths emit the first
6451 : * target in targets.
6452 : */
6453 : static void
6454 9348 : adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
6455 : List *targets, List *targets_contain_srfs)
6456 : {
6457 : ListCell *lc;
6458 :
6459 : Assert(list_length(targets) == list_length(targets_contain_srfs));
6460 : Assert(!linitial_int(targets_contain_srfs));
6461 :
6462 : /* If no SRFs appear at this plan level, nothing to do */
6463 9348 : if (list_length(targets) == 1)
6464 608 : return;
6465 :
6466 : /*
6467 : * Stack SRF-evaluation nodes atop each path for the rel.
6468 : *
6469 : * In principle we should re-run set_cheapest() here to identify the
6470 : * cheapest path, but it seems unlikely that adding the same tlist eval
6471 : * costs to all the paths would change that, so we don't bother. Instead,
6472 : * just assume that the cheapest-startup and cheapest-total paths remain
6473 : * so. (There should be no parameterized paths anymore, so we needn't
6474 : * worry about updating cheapest_parameterized_paths.)
6475 : */
6476 17506 : foreach(lc, rel->pathlist)
6477 : {
6478 8766 : Path *subpath = (Path *) lfirst(lc);
6479 8766 : Path *newpath = subpath;
6480 : ListCell *lc1,
6481 : *lc2;
6482 :
6483 : Assert(subpath->param_info == NULL);
6484 27424 : forboth(lc1, targets, lc2, targets_contain_srfs)
6485 : {
6486 18658 : PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6487 18658 : bool contains_srfs = (bool) lfirst_int(lc2);
6488 :
6489 : /* If this level doesn't contain SRFs, do regular projection */
6490 18658 : if (contains_srfs)
6491 8826 : newpath = (Path *) create_set_projection_path(root,
6492 : rel,
6493 : newpath,
6494 : thistarget);
6495 : else
6496 9832 : newpath = (Path *) apply_projection_to_path(root,
6497 : rel,
6498 : newpath,
6499 : thistarget);
6500 : }
6501 8766 : lfirst(lc) = newpath;
6502 8766 : if (subpath == rel->cheapest_startup_path)
6503 352 : rel->cheapest_startup_path = newpath;
6504 8766 : if (subpath == rel->cheapest_total_path)
6505 352 : rel->cheapest_total_path = newpath;
6506 : }
6507 :
6508 : /* Likewise for partial paths, if any */
6509 8746 : foreach(lc, rel->partial_pathlist)
6510 : {
6511 6 : Path *subpath = (Path *) lfirst(lc);
6512 6 : Path *newpath = subpath;
6513 : ListCell *lc1,
6514 : *lc2;
6515 :
6516 : Assert(subpath->param_info == NULL);
6517 24 : forboth(lc1, targets, lc2, targets_contain_srfs)
6518 : {
6519 18 : PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6520 18 : bool contains_srfs = (bool) lfirst_int(lc2);
6521 :
6522 : /* If this level doesn't contain SRFs, do regular projection */
6523 18 : if (contains_srfs)
6524 6 : newpath = (Path *) create_set_projection_path(root,
6525 : rel,
6526 : newpath,
6527 : thistarget);
6528 : else
6529 : {
6530 : /* avoid apply_projection_to_path, in case of multiple refs */
6531 12 : newpath = (Path *) create_projection_path(root,
6532 : rel,
6533 : newpath,
6534 : thistarget);
6535 : }
6536 : }
6537 6 : lfirst(lc) = newpath;
6538 : }
6539 : }
6540 :
6541 : /*
6542 : * expression_planner
6543 : * Perform planner's transformations on a standalone expression.
6544 : *
6545 : * Various utility commands need to evaluate expressions that are not part
6546 : * of a plannable query. They can do so using the executor's regular
6547 : * expression-execution machinery, but first the expression has to be fed
6548 : * through here to transform it from parser output to something executable.
6549 : *
6550 : * Currently, we disallow sublinks in standalone expressions, so there's no
6551 : * real "planning" involved here. (That might not always be true though.)
6552 : * What we must do is run eval_const_expressions to ensure that any function
6553 : * calls are converted to positional notation and function default arguments
6554 : * get inserted. The fact that constant subexpressions get simplified is a
6555 : * side-effect that is useful when the expression will get evaluated more than
6556 : * once. Also, we must fix operator function IDs.
6557 : *
6558 : * This does not return any information about dependencies of the expression.
6559 : * Hence callers should use the results only for the duration of the current
6560 : * query. Callers that would like to cache the results for longer should use
6561 : * expression_planner_with_deps, probably via the plancache.
6562 : *
6563 : * Note: this must not make any damaging changes to the passed-in expression
6564 : * tree. (It would actually be okay to apply fix_opfuncids to it, but since
6565 : * we first do an expression_tree_mutator-based walk, what is returned will
6566 : * be a new node tree.) The result is constructed in the current memory
6567 : * context; beware that this can leak a lot of additional stuff there, too.
6568 : */
6569 : Expr *
6570 235622 : expression_planner(Expr *expr)
6571 : {
6572 : Node *result;
6573 :
6574 : /*
6575 : * Convert named-argument function calls, insert default arguments and
6576 : * simplify constant subexprs
6577 : */
6578 235622 : result = eval_const_expressions(NULL, (Node *) expr);
6579 :
6580 : /* Fill in opfuncid values if missing */
6581 235604 : fix_opfuncids(result);
6582 :
6583 235604 : return (Expr *) result;
6584 : }
6585 :
6586 : /*
6587 : * expression_planner_with_deps
6588 : * Perform planner's transformations on a standalone expression,
6589 : * returning expression dependency information along with the result.
6590 : *
6591 : * This is identical to expression_planner() except that it also returns
6592 : * information about possible dependencies of the expression, ie identities of
6593 : * objects whose definitions affect the result. As in a PlannedStmt, these
6594 : * are expressed as a list of relation Oids and a list of PlanInvalItems.
6595 : */
6596 : Expr *
6597 346 : expression_planner_with_deps(Expr *expr,
6598 : List **relationOids,
6599 : List **invalItems)
6600 : {
6601 : Node *result;
6602 : PlannerGlobal glob;
6603 : PlannerInfo root;
6604 :
6605 : /* Make up dummy planner state so we can use setrefs machinery */
6606 6574 : MemSet(&glob, 0, sizeof(glob));
6607 346 : glob.type = T_PlannerGlobal;
6608 346 : glob.relationOids = NIL;
6609 346 : glob.invalItems = NIL;
6610 :
6611 30448 : MemSet(&root, 0, sizeof(root));
6612 346 : root.type = T_PlannerInfo;
6613 346 : root.glob = &glob;
6614 :
6615 : /*
6616 : * Convert named-argument function calls, insert default arguments and
6617 : * simplify constant subexprs. Collect identities of inlined functions
6618 : * and elided domains, too.
6619 : */
6620 346 : result = eval_const_expressions(&root, (Node *) expr);
6621 :
6622 : /* Fill in opfuncid values if missing */
6623 346 : fix_opfuncids(result);
6624 :
6625 : /*
6626 : * Now walk the finished expression to find anything else we ought to
6627 : * record as an expression dependency.
6628 : */
6629 346 : (void) extract_query_dependencies_walker(result, &root);
6630 :
6631 346 : *relationOids = glob.relationOids;
6632 346 : *invalItems = glob.invalItems;
6633 :
6634 346 : return (Expr *) result;
6635 : }
6636 :
6637 :
6638 : /*
6639 : * plan_cluster_use_sort
6640 : * Use the planner to decide how CLUSTER should implement sorting
6641 : *
6642 : * tableOid is the OID of a table to be clustered on its index indexOid
6643 : * (which is already known to be a btree index). Decide whether it's
6644 : * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
6645 : * Return true to use sorting, false to use an indexscan.
6646 : *
6647 : * Note: caller had better already hold some type of lock on the table.
6648 : */
6649 : bool
6650 188 : plan_cluster_use_sort(Oid tableOid, Oid indexOid)
6651 : {
6652 : PlannerInfo *root;
6653 : Query *query;
6654 : PlannerGlobal *glob;
6655 : RangeTblEntry *rte;
6656 : RelOptInfo *rel;
6657 : IndexOptInfo *indexInfo;
6658 : QualCost indexExprCost;
6659 : Cost comparisonCost;
6660 : Path *seqScanPath;
6661 : Path seqScanAndSortPath;
6662 : IndexPath *indexScanPath;
6663 : ListCell *lc;
6664 :
6665 : /* We can short-circuit the cost comparison if indexscans are disabled */
6666 188 : if (!enable_indexscan)
6667 30 : return true; /* use sort */
6668 :
6669 : /* Set up mostly-dummy planner state */
6670 158 : query = makeNode(Query);
6671 158 : query->commandType = CMD_SELECT;
6672 :
6673 158 : glob = makeNode(PlannerGlobal);
6674 :
6675 158 : root = makeNode(PlannerInfo);
6676 158 : root->parse = query;
6677 158 : root->glob = glob;
6678 158 : root->query_level = 1;
6679 158 : root->planner_cxt = CurrentMemoryContext;
6680 158 : root->wt_param_id = -1;
6681 158 : root->join_domains = list_make1(makeNode(JoinDomain));
6682 :
6683 : /* Build a minimal RTE for the rel */
6684 158 : rte = makeNode(RangeTblEntry);
6685 158 : rte->rtekind = RTE_RELATION;
6686 158 : rte->relid = tableOid;
6687 158 : rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6688 158 : rte->rellockmode = AccessShareLock;
6689 158 : rte->lateral = false;
6690 158 : rte->inh = false;
6691 158 : rte->inFromCl = true;
6692 158 : query->rtable = list_make1(rte);
6693 158 : addRTEPermissionInfo(&query->rteperminfos, rte);
6694 :
6695 : /* Set up RTE/RelOptInfo arrays */
6696 158 : setup_simple_rel_arrays(root);
6697 :
6698 : /* Build RelOptInfo */
6699 158 : rel = build_simple_rel(root, 1, NULL);
6700 :
6701 : /* Locate IndexOptInfo for the target index */
6702 158 : indexInfo = NULL;
6703 196 : foreach(lc, rel->indexlist)
6704 : {
6705 196 : indexInfo = lfirst_node(IndexOptInfo, lc);
6706 196 : if (indexInfo->indexoid == indexOid)
6707 158 : break;
6708 : }
6709 :
6710 : /*
6711 : * It's possible that get_relation_info did not generate an IndexOptInfo
6712 : * for the desired index; this could happen if it's not yet reached its
6713 : * indcheckxmin usability horizon, or if it's a system index and we're
6714 : * ignoring system indexes. In such cases we should tell CLUSTER to not
6715 : * trust the index contents but use seqscan-and-sort.
6716 : */
6717 158 : if (lc == NULL) /* not in the list? */
6718 0 : return true; /* use sort */
6719 :
6720 : /*
6721 : * Rather than doing all the pushups that would be needed to use
6722 : * set_baserel_size_estimates, just do a quick hack for rows and width.
6723 : */
6724 158 : rel->rows = rel->tuples;
6725 158 : rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6726 :
6727 158 : root->total_table_pages = rel->pages;
6728 :
6729 : /*
6730 : * Determine eval cost of the index expressions, if any. We need to
6731 : * charge twice that amount for each tuple comparison that happens during
6732 : * the sort, since tuplesort.c will have to re-evaluate the index
6733 : * expressions each time. (XXX that's pretty inefficient...)
6734 : */
6735 158 : cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6736 158 : comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6737 :
6738 : /* Estimate the cost of seq scan + sort */
6739 158 : seqScanPath = create_seqscan_path(root, rel, NULL, 0);
6740 158 : cost_sort(&seqScanAndSortPath, root, NIL,
6741 : seqScanPath->disabled_nodes,
6742 158 : seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6743 : comparisonCost, maintenance_work_mem, -1.0);
6744 :
6745 : /* Estimate the cost of index scan */
6746 158 : indexScanPath = create_index_path(root, indexInfo,
6747 : NIL, NIL, NIL, NIL,
6748 : ForwardScanDirection, false,
6749 : NULL, 1.0, false);
6750 :
6751 158 : return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
6752 : }
6753 :
6754 : /*
6755 : * plan_create_index_workers
6756 : * Use the planner to decide how many parallel worker processes
6757 : * CREATE INDEX should request for use
6758 : *
6759 : * tableOid is the table on which the index is to be built. indexOid is the
6760 : * OID of an index to be created or reindexed (which must be an index with
6761 : * support for parallel builds - currently btree or BRIN).
6762 : *
6763 : * Return value is the number of parallel worker processes to request. It
6764 : * may be unsafe to proceed if this is 0. Note that this does not include the
6765 : * leader participating as a worker (value is always a number of parallel
6766 : * worker processes).
6767 : *
6768 : * Note: caller had better already hold some type of lock on the table and
6769 : * index.
6770 : */
6771 : int
6772 33492 : plan_create_index_workers(Oid tableOid, Oid indexOid)
6773 : {
6774 : PlannerInfo *root;
6775 : Query *query;
6776 : PlannerGlobal *glob;
6777 : RangeTblEntry *rte;
6778 : Relation heap;
6779 : Relation index;
6780 : RelOptInfo *rel;
6781 : int parallel_workers;
6782 : BlockNumber heap_blocks;
6783 : double reltuples;
6784 : double allvisfrac;
6785 :
6786 : /*
6787 : * We don't allow performing parallel operation in standalone backend or
6788 : * when parallelism is disabled.
6789 : */
6790 33492 : if (!IsUnderPostmaster || max_parallel_maintenance_workers == 0)
6791 466 : return 0;
6792 :
6793 : /* Set up largely-dummy planner state */
6794 33026 : query = makeNode(Query);
6795 33026 : query->commandType = CMD_SELECT;
6796 :
6797 33026 : glob = makeNode(PlannerGlobal);
6798 :
6799 33026 : root = makeNode(PlannerInfo);
6800 33026 : root->parse = query;
6801 33026 : root->glob = glob;
6802 33026 : root->query_level = 1;
6803 33026 : root->planner_cxt = CurrentMemoryContext;
6804 33026 : root->wt_param_id = -1;
6805 33026 : root->join_domains = list_make1(makeNode(JoinDomain));
6806 :
6807 : /*
6808 : * Build a minimal RTE.
6809 : *
6810 : * Mark the RTE with inh = true. This is a kludge to prevent
6811 : * get_relation_info() from fetching index info, which is necessary
6812 : * because it does not expect that any IndexOptInfo is currently
6813 : * undergoing REINDEX.
6814 : */
6815 33026 : rte = makeNode(RangeTblEntry);
6816 33026 : rte->rtekind = RTE_RELATION;
6817 33026 : rte->relid = tableOid;
6818 33026 : rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6819 33026 : rte->rellockmode = AccessShareLock;
6820 33026 : rte->lateral = false;
6821 33026 : rte->inh = true;
6822 33026 : rte->inFromCl = true;
6823 33026 : query->rtable = list_make1(rte);
6824 33026 : addRTEPermissionInfo(&query->rteperminfos, rte);
6825 :
6826 : /* Set up RTE/RelOptInfo arrays */
6827 33026 : setup_simple_rel_arrays(root);
6828 :
6829 : /* Build RelOptInfo */
6830 33026 : rel = build_simple_rel(root, 1, NULL);
6831 :
6832 : /* Rels are assumed already locked by the caller */
6833 33026 : heap = table_open(tableOid, NoLock);
6834 33026 : index = index_open(indexOid, NoLock);
6835 :
6836 : /*
6837 : * Determine if it's safe to proceed.
6838 : *
6839 : * Currently, parallel workers can't access the leader's temporary tables.
6840 : * Furthermore, any index predicate or index expressions must be parallel
6841 : * safe.
6842 : */
6843 33026 : if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
6844 31062 : !is_parallel_safe(root, (Node *) RelationGetIndexExpressions(index)) ||
6845 30942 : !is_parallel_safe(root, (Node *) RelationGetIndexPredicate(index)))
6846 : {
6847 2084 : parallel_workers = 0;
6848 2084 : goto done;
6849 : }
6850 :
6851 : /*
6852 : * If parallel_workers storage parameter is set for the table, accept that
6853 : * as the number of parallel worker processes to launch (though still cap
6854 : * at max_parallel_maintenance_workers). Note that we deliberately do not
6855 : * consider any other factor when parallel_workers is set. (e.g., memory
6856 : * use by workers.)
6857 : */
6858 30942 : if (rel->rel_parallel_workers != -1)
6859 : {
6860 14 : parallel_workers = Min(rel->rel_parallel_workers,
6861 : max_parallel_maintenance_workers);
6862 14 : goto done;
6863 : }
6864 :
6865 : /*
6866 : * Estimate heap relation size ourselves, since rel->pages cannot be
6867 : * trusted (heap RTE was marked as inheritance parent)
6868 : */
6869 30928 : estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
6870 :
6871 : /*
6872 : * Determine number of workers to scan the heap relation using generic
6873 : * model
6874 : */
6875 30928 : parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
6876 : max_parallel_maintenance_workers);
6877 :
6878 : /*
6879 : * Cap workers based on available maintenance_work_mem as needed.
6880 : *
6881 : * Note that each tuplesort participant receives an even share of the
6882 : * total maintenance_work_mem budget. Aim to leave participants
6883 : * (including the leader as a participant) with no less than 32MB of
6884 : * memory. This leaves cases where maintenance_work_mem is set to 64MB
6885 : * immediately past the threshold of being capable of launching a single
6886 : * parallel worker to sort.
6887 : */
6888 31084 : while (parallel_workers > 0 &&
6889 314 : maintenance_work_mem / (parallel_workers + 1) < 32768L)
6890 156 : parallel_workers--;
6891 :
6892 30928 : done:
6893 33026 : index_close(index, NoLock);
6894 33026 : table_close(heap, NoLock);
6895 :
6896 33026 : return parallel_workers;
6897 : }
6898 :
6899 : /*
6900 : * add_paths_to_grouping_rel
6901 : *
6902 : * Add non-partial paths to grouping relation.
6903 : */
6904 : static void
6905 40672 : add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
6906 : RelOptInfo *grouped_rel,
6907 : RelOptInfo *partially_grouped_rel,
6908 : const AggClauseCosts *agg_costs,
6909 : grouping_sets_data *gd, double dNumGroups,
6910 : GroupPathExtraData *extra)
6911 : {
6912 40672 : Query *parse = root->parse;
6913 40672 : Path *cheapest_path = input_rel->cheapest_total_path;
6914 : ListCell *lc;
6915 40672 : bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
6916 40672 : bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
6917 40672 : List *havingQual = (List *) extra->havingQual;
6918 40672 : AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
6919 :
6920 40672 : if (can_sort)
6921 : {
6922 : /*
6923 : * Use any available suitably-sorted path as input, and also consider
6924 : * sorting the cheapest-total path and incremental sort on any paths
6925 : * with presorted keys.
6926 : */
6927 83972 : foreach(lc, input_rel->pathlist)
6928 : {
6929 : ListCell *lc2;
6930 43306 : Path *path = (Path *) lfirst(lc);
6931 43306 : Path *path_save = path;
6932 43306 : List *pathkey_orderings = NIL;
6933 :
6934 : /* generate alternative group orderings that might be useful */
6935 43306 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
6936 :
6937 : Assert(list_length(pathkey_orderings) > 0);
6938 :
6939 86756 : foreach(lc2, pathkey_orderings)
6940 : {
6941 43450 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
6942 :
6943 : /* restore the path (we replace it in the loop) */
6944 43450 : path = path_save;
6945 :
6946 43450 : path = make_ordered_path(root,
6947 : grouped_rel,
6948 : path,
6949 : cheapest_path,
6950 : info->pathkeys,
6951 : -1.0);
6952 43450 : if (path == NULL)
6953 368 : continue;
6954 :
6955 : /* Now decide what to stick atop it */
6956 43082 : if (parse->groupingSets)
6957 : {
6958 914 : consider_groupingsets_paths(root, grouped_rel,
6959 : path, true, can_hash,
6960 : gd, agg_costs, dNumGroups);
6961 : }
6962 42168 : else if (parse->hasAggs)
6963 : {
6964 : /*
6965 : * We have aggregation, possibly with plain GROUP BY. Make
6966 : * an AggPath.
6967 : */
6968 41414 : add_path(grouped_rel, (Path *)
6969 41414 : create_agg_path(root,
6970 : grouped_rel,
6971 : path,
6972 41414 : grouped_rel->reltarget,
6973 41414 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
6974 : AGGSPLIT_SIMPLE,
6975 : info->clauses,
6976 : havingQual,
6977 : agg_costs,
6978 : dNumGroups));
6979 : }
6980 754 : else if (parse->groupClause)
6981 : {
6982 : /*
6983 : * We have GROUP BY without aggregation or grouping sets.
6984 : * Make a GroupPath.
6985 : */
6986 754 : add_path(grouped_rel, (Path *)
6987 754 : create_group_path(root,
6988 : grouped_rel,
6989 : path,
6990 : info->clauses,
6991 : havingQual,
6992 : dNumGroups));
6993 : }
6994 : else
6995 : {
6996 : /* Other cases should have been handled above */
6997 : Assert(false);
6998 : }
6999 : }
7000 : }
7001 :
7002 : /*
7003 : * Instead of operating directly on the input relation, we can
7004 : * consider finalizing a partially aggregated path.
7005 : */
7006 40666 : if (partially_grouped_rel != NULL)
7007 : {
7008 3974 : foreach(lc, partially_grouped_rel->pathlist)
7009 : {
7010 : ListCell *lc2;
7011 2402 : Path *path = (Path *) lfirst(lc);
7012 2402 : Path *path_save = path;
7013 2402 : List *pathkey_orderings = NIL;
7014 :
7015 : /* generate alternative group orderings that might be useful */
7016 2402 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7017 :
7018 : Assert(list_length(pathkey_orderings) > 0);
7019 :
7020 : /* process all potentially interesting grouping reorderings */
7021 4804 : foreach(lc2, pathkey_orderings)
7022 : {
7023 2402 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7024 :
7025 : /* restore the path (we replace it in the loop) */
7026 2402 : path = path_save;
7027 :
7028 2402 : path = make_ordered_path(root,
7029 : grouped_rel,
7030 : path,
7031 2402 : partially_grouped_rel->cheapest_total_path,
7032 : info->pathkeys,
7033 : -1.0);
7034 :
7035 2402 : if (path == NULL)
7036 108 : continue;
7037 :
7038 2294 : if (parse->hasAggs)
7039 2052 : add_path(grouped_rel, (Path *)
7040 2052 : create_agg_path(root,
7041 : grouped_rel,
7042 : path,
7043 2052 : grouped_rel->reltarget,
7044 2052 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7045 : AGGSPLIT_FINAL_DESERIAL,
7046 : info->clauses,
7047 : havingQual,
7048 : agg_final_costs,
7049 : dNumGroups));
7050 : else
7051 242 : add_path(grouped_rel, (Path *)
7052 242 : create_group_path(root,
7053 : grouped_rel,
7054 : path,
7055 : info->clauses,
7056 : havingQual,
7057 : dNumGroups));
7058 :
7059 : }
7060 : }
7061 : }
7062 : }
7063 :
7064 40672 : if (can_hash)
7065 : {
7066 4926 : if (parse->groupingSets)
7067 : {
7068 : /*
7069 : * Try for a hash-only groupingsets path over unsorted input.
7070 : */
7071 770 : consider_groupingsets_paths(root, grouped_rel,
7072 : cheapest_path, false, true,
7073 : gd, agg_costs, dNumGroups);
7074 : }
7075 : else
7076 : {
7077 : /*
7078 : * Generate a HashAgg Path. We just need an Agg over the
7079 : * cheapest-total input path, since input order won't matter.
7080 : */
7081 4156 : add_path(grouped_rel, (Path *)
7082 4156 : create_agg_path(root, grouped_rel,
7083 : cheapest_path,
7084 4156 : grouped_rel->reltarget,
7085 : AGG_HASHED,
7086 : AGGSPLIT_SIMPLE,
7087 : root->processed_groupClause,
7088 : havingQual,
7089 : agg_costs,
7090 : dNumGroups));
7091 : }
7092 :
7093 : /*
7094 : * Generate a Finalize HashAgg Path atop of the cheapest partially
7095 : * grouped path, assuming there is one
7096 : */
7097 4926 : if (partially_grouped_rel && partially_grouped_rel->pathlist)
7098 : {
7099 778 : Path *path = partially_grouped_rel->cheapest_total_path;
7100 :
7101 778 : add_path(grouped_rel, (Path *)
7102 778 : create_agg_path(root,
7103 : grouped_rel,
7104 : path,
7105 778 : grouped_rel->reltarget,
7106 : AGG_HASHED,
7107 : AGGSPLIT_FINAL_DESERIAL,
7108 : root->processed_groupClause,
7109 : havingQual,
7110 : agg_final_costs,
7111 : dNumGroups));
7112 : }
7113 : }
7114 :
7115 : /*
7116 : * When partitionwise aggregate is used, we might have fully aggregated
7117 : * paths in the partial pathlist, because add_paths_to_append_rel() will
7118 : * consider a path for grouped_rel consisting of a Parallel Append of
7119 : * non-partial paths from each child.
7120 : */
7121 40672 : if (grouped_rel->partial_pathlist != NIL)
7122 162 : gather_grouping_paths(root, grouped_rel);
7123 40672 : }
7124 :
7125 : /*
7126 : * create_partial_grouping_paths
7127 : *
7128 : * Create a new upper relation representing the result of partial aggregation
7129 : * and populate it with appropriate paths. Note that we don't finalize the
7130 : * lists of paths here, so the caller can add additional partial or non-partial
7131 : * paths and must afterward call gather_grouping_paths and set_cheapest on
7132 : * the returned upper relation.
7133 : *
7134 : * All paths for this new upper relation -- both partial and non-partial --
7135 : * have been partially aggregated but require a subsequent FinalizeAggregate
7136 : * step.
7137 : *
7138 : * NB: This function is allowed to return NULL if it determines that there is
7139 : * no real need to create a new RelOptInfo.
7140 : */
7141 : static RelOptInfo *
7142 37130 : create_partial_grouping_paths(PlannerInfo *root,
7143 : RelOptInfo *grouped_rel,
7144 : RelOptInfo *input_rel,
7145 : grouping_sets_data *gd,
7146 : GroupPathExtraData *extra,
7147 : bool force_rel_creation)
7148 : {
7149 37130 : Query *parse = root->parse;
7150 : RelOptInfo *partially_grouped_rel;
7151 37130 : AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7152 37130 : AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7153 37130 : Path *cheapest_partial_path = NULL;
7154 37130 : Path *cheapest_total_path = NULL;
7155 37130 : double dNumPartialGroups = 0;
7156 37130 : double dNumPartialPartialGroups = 0;
7157 : ListCell *lc;
7158 37130 : bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7159 37130 : bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7160 :
7161 : /*
7162 : * Consider whether we should generate partially aggregated non-partial
7163 : * paths. We can only do this if we have a non-partial path, and only if
7164 : * the parent of the input rel is performing partial partitionwise
7165 : * aggregation. (Note that extra->patype is the type of partitionwise
7166 : * aggregation being used at the parent level, not this level.)
7167 : */
7168 37130 : if (input_rel->pathlist != NIL &&
7169 37130 : extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
7170 618 : cheapest_total_path = input_rel->cheapest_total_path;
7171 :
7172 : /*
7173 : * If parallelism is possible for grouped_rel, then we should consider
7174 : * generating partially-grouped partial paths. However, if the input rel
7175 : * has no partial paths, then we can't.
7176 : */
7177 37130 : if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7178 1774 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
7179 :
7180 : /*
7181 : * If we can't partially aggregate partial paths, and we can't partially
7182 : * aggregate non-partial paths, then don't bother creating the new
7183 : * RelOptInfo at all, unless the caller specified force_rel_creation.
7184 : */
7185 37130 : if (cheapest_total_path == NULL &&
7186 35038 : cheapest_partial_path == NULL &&
7187 35038 : !force_rel_creation)
7188 34940 : return NULL;
7189 :
7190 : /*
7191 : * Build a new upper relation to represent the result of partially
7192 : * aggregating the rows from the input relation.
7193 : */
7194 2190 : partially_grouped_rel = fetch_upper_rel(root,
7195 : UPPERREL_PARTIAL_GROUP_AGG,
7196 : grouped_rel->relids);
7197 2190 : partially_grouped_rel->consider_parallel =
7198 2190 : grouped_rel->consider_parallel;
7199 2190 : partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7200 2190 : partially_grouped_rel->serverid = grouped_rel->serverid;
7201 2190 : partially_grouped_rel->userid = grouped_rel->userid;
7202 2190 : partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7203 2190 : partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7204 :
7205 : /*
7206 : * Build target list for partial aggregate paths. These paths cannot just
7207 : * emit the same tlist as regular aggregate paths, because (1) we must
7208 : * include Vars and Aggrefs needed in HAVING, which might not appear in
7209 : * the result tlist, and (2) the Aggrefs must be set in partial mode.
7210 : */
7211 2190 : partially_grouped_rel->reltarget =
7212 2190 : make_partial_grouping_target(root, grouped_rel->reltarget,
7213 : extra->havingQual);
7214 :
7215 2190 : if (!extra->partial_costs_set)
7216 : {
7217 : /*
7218 : * Collect statistics about aggregates for estimating costs of
7219 : * performing aggregation in parallel.
7220 : */
7221 7704 : MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7222 7704 : MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7223 1284 : if (parse->hasAggs)
7224 : {
7225 : /* partial phase */
7226 1156 : get_agg_clause_costs(root, AGGSPLIT_INITIAL_SERIAL,
7227 : agg_partial_costs);
7228 :
7229 : /* final phase */
7230 1156 : get_agg_clause_costs(root, AGGSPLIT_FINAL_DESERIAL,
7231 : agg_final_costs);
7232 : }
7233 :
7234 1284 : extra->partial_costs_set = true;
7235 : }
7236 :
7237 : /* Estimate number of partial groups. */
7238 2190 : if (cheapest_total_path != NULL)
7239 : dNumPartialGroups =
7240 618 : get_number_of_groups(root,
7241 : cheapest_total_path->rows,
7242 : gd,
7243 : extra->targetList);
7244 2190 : if (cheapest_partial_path != NULL)
7245 : dNumPartialPartialGroups =
7246 1774 : get_number_of_groups(root,
7247 : cheapest_partial_path->rows,
7248 : gd,
7249 : extra->targetList);
7250 :
7251 2190 : if (can_sort && cheapest_total_path != NULL)
7252 : {
7253 : /* This should have been checked previously */
7254 : Assert(parse->hasAggs || parse->groupClause);
7255 :
7256 : /*
7257 : * Use any available suitably-sorted path as input, and also consider
7258 : * sorting the cheapest partial path.
7259 : */
7260 1236 : foreach(lc, input_rel->pathlist)
7261 : {
7262 : ListCell *lc2;
7263 618 : Path *path = (Path *) lfirst(lc);
7264 618 : Path *path_save = path;
7265 618 : List *pathkey_orderings = NIL;
7266 :
7267 : /* generate alternative group orderings that might be useful */
7268 618 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7269 :
7270 : Assert(list_length(pathkey_orderings) > 0);
7271 :
7272 : /* process all potentially interesting grouping reorderings */
7273 1236 : foreach(lc2, pathkey_orderings)
7274 : {
7275 618 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7276 :
7277 : /* restore the path (we replace it in the loop) */
7278 618 : path = path_save;
7279 :
7280 618 : path = make_ordered_path(root,
7281 : partially_grouped_rel,
7282 : path,
7283 : cheapest_total_path,
7284 : info->pathkeys,
7285 : -1.0);
7286 :
7287 618 : if (path == NULL)
7288 0 : continue;
7289 :
7290 618 : if (parse->hasAggs)
7291 546 : add_path(partially_grouped_rel, (Path *)
7292 546 : create_agg_path(root,
7293 : partially_grouped_rel,
7294 : path,
7295 546 : partially_grouped_rel->reltarget,
7296 546 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7297 : AGGSPLIT_INITIAL_SERIAL,
7298 : info->clauses,
7299 : NIL,
7300 : agg_partial_costs,
7301 : dNumPartialGroups));
7302 : else
7303 72 : add_path(partially_grouped_rel, (Path *)
7304 72 : create_group_path(root,
7305 : partially_grouped_rel,
7306 : path,
7307 : info->clauses,
7308 : NIL,
7309 : dNumPartialGroups));
7310 : }
7311 : }
7312 : }
7313 :
7314 2190 : if (can_sort && cheapest_partial_path != NULL)
7315 : {
7316 : /* Similar to above logic, but for partial paths. */
7317 3560 : foreach(lc, input_rel->partial_pathlist)
7318 : {
7319 : ListCell *lc2;
7320 1786 : Path *path = (Path *) lfirst(lc);
7321 1786 : Path *path_save = path;
7322 1786 : List *pathkey_orderings = NIL;
7323 :
7324 : /* generate alternative group orderings that might be useful */
7325 1786 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7326 :
7327 : Assert(list_length(pathkey_orderings) > 0);
7328 :
7329 : /* process all potentially interesting grouping reorderings */
7330 3572 : foreach(lc2, pathkey_orderings)
7331 : {
7332 1786 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7333 :
7334 :
7335 : /* restore the path (we replace it in the loop) */
7336 1786 : path = path_save;
7337 :
7338 1786 : path = make_ordered_path(root,
7339 : partially_grouped_rel,
7340 : path,
7341 : cheapest_partial_path,
7342 : info->pathkeys,
7343 : -1.0);
7344 :
7345 1786 : if (path == NULL)
7346 6 : continue;
7347 :
7348 1780 : if (parse->hasAggs)
7349 1664 : add_partial_path(partially_grouped_rel, (Path *)
7350 1664 : create_agg_path(root,
7351 : partially_grouped_rel,
7352 : path,
7353 1664 : partially_grouped_rel->reltarget,
7354 1664 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7355 : AGGSPLIT_INITIAL_SERIAL,
7356 : info->clauses,
7357 : NIL,
7358 : agg_partial_costs,
7359 : dNumPartialPartialGroups));
7360 : else
7361 116 : add_partial_path(partially_grouped_rel, (Path *)
7362 116 : create_group_path(root,
7363 : partially_grouped_rel,
7364 : path,
7365 : info->clauses,
7366 : NIL,
7367 : dNumPartialPartialGroups));
7368 : }
7369 : }
7370 : }
7371 :
7372 : /*
7373 : * Add a partially-grouped HashAgg Path where possible
7374 : */
7375 2190 : if (can_hash && cheapest_total_path != NULL)
7376 : {
7377 : /* Checked above */
7378 : Assert(parse->hasAggs || parse->groupClause);
7379 :
7380 618 : add_path(partially_grouped_rel, (Path *)
7381 618 : create_agg_path(root,
7382 : partially_grouped_rel,
7383 : cheapest_total_path,
7384 618 : partially_grouped_rel->reltarget,
7385 : AGG_HASHED,
7386 : AGGSPLIT_INITIAL_SERIAL,
7387 : root->processed_groupClause,
7388 : NIL,
7389 : agg_partial_costs,
7390 : dNumPartialGroups));
7391 : }
7392 :
7393 : /*
7394 : * Now add a partially-grouped HashAgg partial Path where possible
7395 : */
7396 2190 : if (can_hash && cheapest_partial_path != NULL)
7397 : {
7398 980 : add_partial_path(partially_grouped_rel, (Path *)
7399 980 : create_agg_path(root,
7400 : partially_grouped_rel,
7401 : cheapest_partial_path,
7402 980 : partially_grouped_rel->reltarget,
7403 : AGG_HASHED,
7404 : AGGSPLIT_INITIAL_SERIAL,
7405 : root->processed_groupClause,
7406 : NIL,
7407 : agg_partial_costs,
7408 : dNumPartialPartialGroups));
7409 : }
7410 :
7411 : /*
7412 : * If there is an FDW that's responsible for all baserels of the query,
7413 : * let it consider adding partially grouped ForeignPaths.
7414 : */
7415 2190 : if (partially_grouped_rel->fdwroutine &&
7416 6 : partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7417 : {
7418 6 : FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7419 :
7420 6 : fdwroutine->GetForeignUpperPaths(root,
7421 : UPPERREL_PARTIAL_GROUP_AGG,
7422 : input_rel, partially_grouped_rel,
7423 : extra);
7424 : }
7425 :
7426 2190 : return partially_grouped_rel;
7427 : }
7428 :
7429 : /*
7430 : * make_ordered_path
7431 : * Return a path ordered by 'pathkeys' based on the given 'path'. May
7432 : * return NULL if it doesn't make sense to generate an ordered path in
7433 : * this case.
7434 : */
7435 : static Path *
7436 52778 : make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path,
7437 : Path *cheapest_path, List *pathkeys, double limit_tuples)
7438 : {
7439 : bool is_sorted;
7440 : int presorted_keys;
7441 :
7442 52778 : is_sorted = pathkeys_count_contained_in(pathkeys,
7443 : path->pathkeys,
7444 : &presorted_keys);
7445 :
7446 52778 : if (!is_sorted)
7447 : {
7448 : /*
7449 : * Try at least sorting the cheapest path and also try incrementally
7450 : * sorting any path which is partially sorted already (no need to deal
7451 : * with paths which have presorted keys when incremental sort is
7452 : * disabled unless it's the cheapest input path).
7453 : */
7454 12458 : if (path != cheapest_path &&
7455 1968 : (presorted_keys == 0 || !enable_incremental_sort))
7456 1018 : return NULL;
7457 :
7458 : /*
7459 : * We've no need to consider both a sort and incremental sort. We'll
7460 : * just do a sort if there are no presorted keys and an incremental
7461 : * sort when there are presorted keys.
7462 : */
7463 11440 : if (presorted_keys == 0 || !enable_incremental_sort)
7464 10322 : path = (Path *) create_sort_path(root,
7465 : rel,
7466 : path,
7467 : pathkeys,
7468 : limit_tuples);
7469 : else
7470 1118 : path = (Path *) create_incremental_sort_path(root,
7471 : rel,
7472 : path,
7473 : pathkeys,
7474 : presorted_keys,
7475 : limit_tuples);
7476 : }
7477 :
7478 51760 : return path;
7479 : }
7480 :
7481 : /*
7482 : * Generate Gather and Gather Merge paths for a grouping relation or partial
7483 : * grouping relation.
7484 : *
7485 : * generate_useful_gather_paths does most of the work, but we also consider a
7486 : * special case: we could try sorting the data by the group_pathkeys and then
7487 : * applying Gather Merge.
7488 : *
7489 : * NB: This function shouldn't be used for anything other than a grouped or
7490 : * partially grouped relation not only because of the fact that it explicitly
7491 : * references group_pathkeys but we pass "true" as the third argument to
7492 : * generate_useful_gather_paths().
7493 : */
7494 : static void
7495 1636 : gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
7496 : {
7497 : ListCell *lc;
7498 : Path *cheapest_partial_path;
7499 : List *groupby_pathkeys;
7500 :
7501 : /*
7502 : * This occurs after any partial aggregation has taken place, so trim off
7503 : * any pathkeys added for ORDER BY / DISTINCT aggregates.
7504 : */
7505 1636 : if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7506 18 : groupby_pathkeys = list_copy_head(root->group_pathkeys,
7507 : root->num_groupby_pathkeys);
7508 : else
7509 1618 : groupby_pathkeys = root->group_pathkeys;
7510 :
7511 : /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7512 1636 : generate_useful_gather_paths(root, rel, true);
7513 :
7514 1636 : cheapest_partial_path = linitial(rel->partial_pathlist);
7515 :
7516 : /* XXX Shouldn't this also consider the group-key-reordering? */
7517 3872 : foreach(lc, rel->partial_pathlist)
7518 : {
7519 2236 : Path *path = (Path *) lfirst(lc);
7520 : bool is_sorted;
7521 : int presorted_keys;
7522 : double total_groups;
7523 :
7524 2236 : is_sorted = pathkeys_count_contained_in(groupby_pathkeys,
7525 : path->pathkeys,
7526 : &presorted_keys);
7527 :
7528 2236 : if (is_sorted)
7529 1462 : continue;
7530 :
7531 : /*
7532 : * Try at least sorting the cheapest path and also try incrementally
7533 : * sorting any path which is partially sorted already (no need to deal
7534 : * with paths which have presorted keys when incremental sort is
7535 : * disabled unless it's the cheapest input path).
7536 : */
7537 774 : if (path != cheapest_partial_path &&
7538 0 : (presorted_keys == 0 || !enable_incremental_sort))
7539 0 : continue;
7540 :
7541 : /*
7542 : * We've no need to consider both a sort and incremental sort. We'll
7543 : * just do a sort if there are no presorted keys and an incremental
7544 : * sort when there are presorted keys.
7545 : */
7546 774 : if (presorted_keys == 0 || !enable_incremental_sort)
7547 774 : path = (Path *) create_sort_path(root, rel, path,
7548 : groupby_pathkeys,
7549 : -1.0);
7550 : else
7551 0 : path = (Path *) create_incremental_sort_path(root,
7552 : rel,
7553 : path,
7554 : groupby_pathkeys,
7555 : presorted_keys,
7556 : -1.0);
7557 774 : total_groups = compute_gather_rows(path);
7558 : path = (Path *)
7559 774 : create_gather_merge_path(root,
7560 : rel,
7561 : path,
7562 774 : rel->reltarget,
7563 : groupby_pathkeys,
7564 : NULL,
7565 : &total_groups);
7566 :
7567 774 : add_path(rel, path);
7568 : }
7569 1636 : }
7570 :
7571 : /*
7572 : * can_partial_agg
7573 : *
7574 : * Determines whether or not partial grouping and/or aggregation is possible.
7575 : * Returns true when possible, false otherwise.
7576 : */
7577 : static bool
7578 39796 : can_partial_agg(PlannerInfo *root)
7579 : {
7580 39796 : Query *parse = root->parse;
7581 :
7582 39796 : if (!parse->hasAggs && parse->groupClause == NIL)
7583 : {
7584 : /*
7585 : * We don't know how to do parallel aggregation unless we have either
7586 : * some aggregates or a grouping clause.
7587 : */
7588 0 : return false;
7589 : }
7590 39796 : else if (parse->groupingSets)
7591 : {
7592 : /* We don't know how to do grouping sets in parallel. */
7593 848 : return false;
7594 : }
7595 38948 : else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7596 : {
7597 : /* Insufficient support for partial mode. */
7598 3240 : return false;
7599 : }
7600 :
7601 : /* Everything looks good. */
7602 35708 : return true;
7603 : }
7604 :
7605 : /*
7606 : * apply_scanjoin_target_to_paths
7607 : *
7608 : * Adjust the final scan/join relation, and recursively all of its children,
7609 : * to generate the final scan/join target. It would be more correct to model
7610 : * this as a separate planning step with a new RelOptInfo at the toplevel and
7611 : * for each child relation, but doing it this way is noticeably cheaper.
7612 : * Maybe that problem can be solved at some point, but for now we do this.
7613 : *
7614 : * If tlist_same_exprs is true, then the scan/join target to be applied has
7615 : * the same expressions as the existing reltarget, so we need only insert the
7616 : * appropriate sortgroupref information. By avoiding the creation of
7617 : * projection paths we save effort both immediately and at plan creation time.
7618 : */
7619 : static void
7620 525050 : apply_scanjoin_target_to_paths(PlannerInfo *root,
7621 : RelOptInfo *rel,
7622 : List *scanjoin_targets,
7623 : List *scanjoin_targets_contain_srfs,
7624 : bool scanjoin_target_parallel_safe,
7625 : bool tlist_same_exprs)
7626 : {
7627 525050 : bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7628 : PathTarget *scanjoin_target;
7629 : ListCell *lc;
7630 :
7631 : /* This recurses, so be paranoid. */
7632 525050 : check_stack_depth();
7633 :
7634 : /*
7635 : * If the rel is partitioned, we want to drop its existing paths and
7636 : * generate new ones. This function would still be correct if we kept the
7637 : * existing paths: we'd modify them to generate the correct target above
7638 : * the partitioning Append, and then they'd compete on cost with paths
7639 : * generating the target below the Append. However, in our current cost
7640 : * model the latter way is always the same or cheaper cost, so modifying
7641 : * the existing paths would just be useless work. Moreover, when the cost
7642 : * is the same, varying roundoff errors might sometimes allow an existing
7643 : * path to be picked, resulting in undesirable cross-platform plan
7644 : * variations. So we drop old paths and thereby force the work to be done
7645 : * below the Append, except in the case of a non-parallel-safe target.
7646 : *
7647 : * Some care is needed, because we have to allow
7648 : * generate_useful_gather_paths to see the old partial paths in the next
7649 : * stanza. Hence, zap the main pathlist here, then allow
7650 : * generate_useful_gather_paths to add path(s) to the main list, and
7651 : * finally zap the partial pathlist.
7652 : */
7653 525050 : if (rel_is_partitioned)
7654 12436 : rel->pathlist = NIL;
7655 :
7656 : /*
7657 : * If the scan/join target is not parallel-safe, partial paths cannot
7658 : * generate it.
7659 : */
7660 525050 : if (!scanjoin_target_parallel_safe)
7661 : {
7662 : /*
7663 : * Since we can't generate the final scan/join target in parallel
7664 : * workers, this is our last opportunity to use any partial paths that
7665 : * exist; so build Gather path(s) that use them and emit whatever the
7666 : * current reltarget is. We don't do this in the case where the
7667 : * target is parallel-safe, since we will be able to generate superior
7668 : * paths by doing it after the final scan/join target has been
7669 : * applied.
7670 : */
7671 79984 : generate_useful_gather_paths(root, rel, false);
7672 :
7673 : /* Can't use parallel query above this level. */
7674 79984 : rel->partial_pathlist = NIL;
7675 79984 : rel->consider_parallel = false;
7676 : }
7677 :
7678 : /* Finish dropping old paths for a partitioned rel, per comment above */
7679 525050 : if (rel_is_partitioned)
7680 12436 : rel->partial_pathlist = NIL;
7681 :
7682 : /* Extract SRF-free scan/join target. */
7683 525050 : scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7684 :
7685 : /*
7686 : * Apply the SRF-free scan/join target to each existing path.
7687 : *
7688 : * If the tlist exprs are the same, we can just inject the sortgroupref
7689 : * information into the existing pathtargets. Otherwise, replace each
7690 : * path with a projection path that generates the SRF-free scan/join
7691 : * target. This can't change the ordering of paths within rel->pathlist,
7692 : * so we just modify the list in place.
7693 : */
7694 1079650 : foreach(lc, rel->pathlist)
7695 : {
7696 554600 : Path *subpath = (Path *) lfirst(lc);
7697 :
7698 : /* Shouldn't have any parameterized paths anymore */
7699 : Assert(subpath->param_info == NULL);
7700 :
7701 554600 : if (tlist_same_exprs)
7702 188020 : subpath->pathtarget->sortgrouprefs =
7703 188020 : scanjoin_target->sortgrouprefs;
7704 : else
7705 : {
7706 : Path *newpath;
7707 :
7708 366580 : newpath = (Path *) create_projection_path(root, rel, subpath,
7709 : scanjoin_target);
7710 366580 : lfirst(lc) = newpath;
7711 : }
7712 : }
7713 :
7714 : /* Likewise adjust the targets for any partial paths. */
7715 544382 : foreach(lc, rel->partial_pathlist)
7716 : {
7717 19332 : Path *subpath = (Path *) lfirst(lc);
7718 :
7719 : /* Shouldn't have any parameterized paths anymore */
7720 : Assert(subpath->param_info == NULL);
7721 :
7722 19332 : if (tlist_same_exprs)
7723 15800 : subpath->pathtarget->sortgrouprefs =
7724 15800 : scanjoin_target->sortgrouprefs;
7725 : else
7726 : {
7727 : Path *newpath;
7728 :
7729 3532 : newpath = (Path *) create_projection_path(root, rel, subpath,
7730 : scanjoin_target);
7731 3532 : lfirst(lc) = newpath;
7732 : }
7733 : }
7734 :
7735 : /*
7736 : * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
7737 : * atop each existing path. (Note that this function doesn't look at the
7738 : * cheapest-path fields, which is a good thing because they're bogus right
7739 : * now.)
7740 : */
7741 525050 : if (root->parse->hasTargetSRFs)
7742 8740 : adjust_paths_for_srfs(root, rel,
7743 : scanjoin_targets,
7744 : scanjoin_targets_contain_srfs);
7745 :
7746 : /*
7747 : * Update the rel's target to be the final (with SRFs) scan/join target.
7748 : * This now matches the actual output of all the paths, and we might get
7749 : * confused in createplan.c if they don't agree. We must do this now so
7750 : * that any append paths made in the next part will use the correct
7751 : * pathtarget (cf. create_append_path).
7752 : *
7753 : * Note that this is also necessary if GetForeignUpperPaths() gets called
7754 : * on the final scan/join relation or on any of its children, since the
7755 : * FDW might look at the rel's target to create ForeignPaths.
7756 : */
7757 525050 : rel->reltarget = llast_node(PathTarget, scanjoin_targets);
7758 :
7759 : /*
7760 : * If the relation is partitioned, recursively apply the scan/join target
7761 : * to all partitions, and generate brand-new Append paths in which the
7762 : * scan/join target is computed below the Append rather than above it.
7763 : * Since Append is not projection-capable, that might save a separate
7764 : * Result node, and it also is important for partitionwise aggregate.
7765 : */
7766 525050 : if (rel_is_partitioned)
7767 : {
7768 12436 : List *live_children = NIL;
7769 : int i;
7770 :
7771 : /* Adjust each partition. */
7772 12436 : i = -1;
7773 35014 : while ((i = bms_next_member(rel->live_parts, i)) >= 0)
7774 : {
7775 22578 : RelOptInfo *child_rel = rel->part_rels[i];
7776 : AppendRelInfo **appinfos;
7777 : int nappinfos;
7778 22578 : List *child_scanjoin_targets = NIL;
7779 :
7780 : Assert(child_rel != NULL);
7781 :
7782 : /* Dummy children can be ignored. */
7783 22578 : if (IS_DUMMY_REL(child_rel))
7784 42 : continue;
7785 :
7786 : /* Translate scan/join targets for this child. */
7787 22536 : appinfos = find_appinfos_by_relids(root, child_rel->relids,
7788 : &nappinfos);
7789 45072 : foreach(lc, scanjoin_targets)
7790 : {
7791 22536 : PathTarget *target = lfirst_node(PathTarget, lc);
7792 :
7793 22536 : target = copy_pathtarget(target);
7794 22536 : target->exprs = (List *)
7795 22536 : adjust_appendrel_attrs(root,
7796 22536 : (Node *) target->exprs,
7797 : nappinfos, appinfos);
7798 22536 : child_scanjoin_targets = lappend(child_scanjoin_targets,
7799 : target);
7800 : }
7801 22536 : pfree(appinfos);
7802 :
7803 : /* Recursion does the real work. */
7804 22536 : apply_scanjoin_target_to_paths(root, child_rel,
7805 : child_scanjoin_targets,
7806 : scanjoin_targets_contain_srfs,
7807 : scanjoin_target_parallel_safe,
7808 : tlist_same_exprs);
7809 :
7810 : /* Save non-dummy children for Append paths. */
7811 22536 : if (!IS_DUMMY_REL(child_rel))
7812 22536 : live_children = lappend(live_children, child_rel);
7813 : }
7814 :
7815 : /* Build new paths for this relation by appending child paths. */
7816 12436 : add_paths_to_append_rel(root, rel, live_children);
7817 : }
7818 :
7819 : /*
7820 : * Consider generating Gather or Gather Merge paths. We must only do this
7821 : * if the relation is parallel safe, and we don't do it for child rels to
7822 : * avoid creating multiple Gather nodes within the same plan. We must do
7823 : * this after all paths have been generated and before set_cheapest, since
7824 : * one of the generated paths may turn out to be the cheapest one.
7825 : */
7826 525050 : if (rel->consider_parallel && !IS_OTHER_REL(rel))
7827 154942 : generate_useful_gather_paths(root, rel, false);
7828 :
7829 : /*
7830 : * Reassess which paths are the cheapest, now that we've potentially added
7831 : * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
7832 : * this relation.
7833 : */
7834 525050 : set_cheapest(rel);
7835 525050 : }
7836 :
7837 : /*
7838 : * create_partitionwise_grouping_paths
7839 : *
7840 : * If the partition keys of input relation are part of the GROUP BY clause, all
7841 : * the rows belonging to a given group come from a single partition. This
7842 : * allows aggregation/grouping over a partitioned relation to be broken down
7843 : * into aggregation/grouping on each partition. This should be no worse, and
7844 : * often better, than the normal approach.
7845 : *
7846 : * However, if the GROUP BY clause does not contain all the partition keys,
7847 : * rows from a given group may be spread across multiple partitions. In that
7848 : * case, we perform partial aggregation for each group, append the results,
7849 : * and then finalize aggregation. This is less certain to win than the
7850 : * previous case. It may win if the PartialAggregate stage greatly reduces
7851 : * the number of groups, because fewer rows will pass through the Append node.
7852 : * It may lose if we have lots of small groups.
7853 : */
7854 : static void
7855 562 : create_partitionwise_grouping_paths(PlannerInfo *root,
7856 : RelOptInfo *input_rel,
7857 : RelOptInfo *grouped_rel,
7858 : RelOptInfo *partially_grouped_rel,
7859 : const AggClauseCosts *agg_costs,
7860 : grouping_sets_data *gd,
7861 : PartitionwiseAggregateType patype,
7862 : GroupPathExtraData *extra)
7863 : {
7864 562 : List *grouped_live_children = NIL;
7865 562 : List *partially_grouped_live_children = NIL;
7866 562 : PathTarget *target = grouped_rel->reltarget;
7867 562 : bool partial_grouping_valid = true;
7868 : int i;
7869 :
7870 : Assert(patype != PARTITIONWISE_AGGREGATE_NONE);
7871 : Assert(patype != PARTITIONWISE_AGGREGATE_PARTIAL ||
7872 : partially_grouped_rel != NULL);
7873 :
7874 : /* Add paths for partitionwise aggregation/grouping. */
7875 562 : i = -1;
7876 2056 : while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
7877 : {
7878 1494 : RelOptInfo *child_input_rel = input_rel->part_rels[i];
7879 : PathTarget *child_target;
7880 : AppendRelInfo **appinfos;
7881 : int nappinfos;
7882 : GroupPathExtraData child_extra;
7883 : RelOptInfo *child_grouped_rel;
7884 : RelOptInfo *child_partially_grouped_rel;
7885 :
7886 : Assert(child_input_rel != NULL);
7887 :
7888 : /* Dummy children can be ignored. */
7889 1494 : if (IS_DUMMY_REL(child_input_rel))
7890 0 : continue;
7891 :
7892 1494 : child_target = copy_pathtarget(target);
7893 :
7894 : /*
7895 : * Copy the given "extra" structure as is and then override the
7896 : * members specific to this child.
7897 : */
7898 1494 : memcpy(&child_extra, extra, sizeof(child_extra));
7899 :
7900 1494 : appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
7901 : &nappinfos);
7902 :
7903 1494 : child_target->exprs = (List *)
7904 1494 : adjust_appendrel_attrs(root,
7905 1494 : (Node *) target->exprs,
7906 : nappinfos, appinfos);
7907 :
7908 : /* Translate havingQual and targetList. */
7909 1494 : child_extra.havingQual = (Node *)
7910 : adjust_appendrel_attrs(root,
7911 : extra->havingQual,
7912 : nappinfos, appinfos);
7913 1494 : child_extra.targetList = (List *)
7914 1494 : adjust_appendrel_attrs(root,
7915 1494 : (Node *) extra->targetList,
7916 : nappinfos, appinfos);
7917 :
7918 : /*
7919 : * extra->patype was the value computed for our parent rel; patype is
7920 : * the value for this relation. For the child, our value is its
7921 : * parent rel's value.
7922 : */
7923 1494 : child_extra.patype = patype;
7924 :
7925 : /*
7926 : * Create grouping relation to hold fully aggregated grouping and/or
7927 : * aggregation paths for the child.
7928 : */
7929 1494 : child_grouped_rel = make_grouping_rel(root, child_input_rel,
7930 : child_target,
7931 1494 : extra->target_parallel_safe,
7932 : child_extra.havingQual);
7933 :
7934 : /* Create grouping paths for this child relation. */
7935 1494 : create_ordinary_grouping_paths(root, child_input_rel,
7936 : child_grouped_rel,
7937 : agg_costs, gd, &child_extra,
7938 : &child_partially_grouped_rel);
7939 :
7940 1494 : if (child_partially_grouped_rel)
7941 : {
7942 : partially_grouped_live_children =
7943 906 : lappend(partially_grouped_live_children,
7944 : child_partially_grouped_rel);
7945 : }
7946 : else
7947 588 : partial_grouping_valid = false;
7948 :
7949 1494 : if (patype == PARTITIONWISE_AGGREGATE_FULL)
7950 : {
7951 876 : set_cheapest(child_grouped_rel);
7952 876 : grouped_live_children = lappend(grouped_live_children,
7953 : child_grouped_rel);
7954 : }
7955 :
7956 1494 : pfree(appinfos);
7957 : }
7958 :
7959 : /*
7960 : * Try to create append paths for partially grouped children. For full
7961 : * partitionwise aggregation, we might have paths in the partial_pathlist
7962 : * if parallel aggregation is possible. For partial partitionwise
7963 : * aggregation, we may have paths in both pathlist and partial_pathlist.
7964 : *
7965 : * NB: We must have a partially grouped path for every child in order to
7966 : * generate a partially grouped path for this relation.
7967 : */
7968 562 : if (partially_grouped_rel && partial_grouping_valid)
7969 : {
7970 : Assert(partially_grouped_live_children != NIL);
7971 :
7972 350 : add_paths_to_append_rel(root, partially_grouped_rel,
7973 : partially_grouped_live_children);
7974 :
7975 : /*
7976 : * We need call set_cheapest, since the finalization step will use the
7977 : * cheapest path from the rel.
7978 : */
7979 350 : if (partially_grouped_rel->pathlist)
7980 350 : set_cheapest(partially_grouped_rel);
7981 : }
7982 :
7983 : /* If possible, create append paths for fully grouped children. */
7984 562 : if (patype == PARTITIONWISE_AGGREGATE_FULL)
7985 : {
7986 : Assert(grouped_live_children != NIL);
7987 :
7988 320 : add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
7989 : }
7990 562 : }
7991 :
7992 : /*
7993 : * group_by_has_partkey
7994 : *
7995 : * Returns true if all the partition keys of the given relation are part of
7996 : * the GROUP BY clauses, including having matching collation, false otherwise.
7997 : */
7998 : static bool
7999 556 : group_by_has_partkey(RelOptInfo *input_rel,
8000 : List *targetList,
8001 : List *groupClause)
8002 : {
8003 556 : List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8004 556 : int cnt = 0;
8005 : int partnatts;
8006 :
8007 : /* Input relation should be partitioned. */
8008 : Assert(input_rel->part_scheme);
8009 :
8010 : /* Rule out early, if there are no partition keys present. */
8011 556 : if (!input_rel->partexprs)
8012 0 : return false;
8013 :
8014 556 : partnatts = input_rel->part_scheme->partnatts;
8015 :
8016 912 : for (cnt = 0; cnt < partnatts; cnt++)
8017 : {
8018 592 : List *partexprs = input_rel->partexprs[cnt];
8019 : ListCell *lc;
8020 592 : bool found = false;
8021 :
8022 810 : foreach(lc, partexprs)
8023 : {
8024 : ListCell *lg;
8025 586 : Expr *partexpr = lfirst(lc);
8026 586 : Oid partcoll = input_rel->part_scheme->partcollation[cnt];
8027 :
8028 924 : foreach(lg, groupexprs)
8029 : {
8030 706 : Expr *groupexpr = lfirst(lg);
8031 706 : Oid groupcoll = exprCollation((Node *) groupexpr);
8032 :
8033 : /*
8034 : * Note: we can assume there is at most one RelabelType node;
8035 : * eval_const_expressions() will have simplified if more than
8036 : * one.
8037 : */
8038 706 : if (IsA(groupexpr, RelabelType))
8039 24 : groupexpr = ((RelabelType *) groupexpr)->arg;
8040 :
8041 706 : if (equal(groupexpr, partexpr))
8042 : {
8043 : /*
8044 : * Reject a match if the grouping collation does not match
8045 : * the partitioning collation.
8046 : */
8047 368 : if (OidIsValid(partcoll) && OidIsValid(groupcoll) &&
8048 : partcoll != groupcoll)
8049 12 : return false;
8050 :
8051 356 : found = true;
8052 356 : break;
8053 : }
8054 : }
8055 :
8056 574 : if (found)
8057 356 : break;
8058 : }
8059 :
8060 : /*
8061 : * If none of the partition key expressions match with any of the
8062 : * GROUP BY expression, return false.
8063 : */
8064 580 : if (!found)
8065 224 : return false;
8066 : }
8067 :
8068 320 : return true;
8069 : }
8070 :
8071 : /*
8072 : * generate_setop_child_grouplist
8073 : * Build a SortGroupClause list defining the sort/grouping properties
8074 : * of the child of a set operation.
8075 : *
8076 : * This is similar to generate_setop_grouplist() but differs as the setop
8077 : * child query's targetlist entries may already have a tleSortGroupRef
8078 : * assigned for other purposes, such as GROUP BYs. Here we keep the
8079 : * SortGroupClause list in the same order as 'op' groupClauses and just adjust
8080 : * the tleSortGroupRef to reference the TargetEntry's 'ressortgroupref'. If
8081 : * any of the columns in the targetlist don't match to the setop's colTypes
8082 : * then we return an empty list. This may leave some TLEs with unreferenced
8083 : * ressortgroupref markings, but that's harmless.
8084 : */
8085 : static List *
8086 11528 : generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist)
8087 : {
8088 11528 : List *grouplist = copyObject(op->groupClauses);
8089 : ListCell *lg;
8090 : ListCell *lt;
8091 : ListCell *ct;
8092 :
8093 11528 : lg = list_head(grouplist);
8094 11528 : ct = list_head(op->colTypes);
8095 46282 : foreach(lt, targetlist)
8096 : {
8097 35176 : TargetEntry *tle = (TargetEntry *) lfirst(lt);
8098 : SortGroupClause *sgc;
8099 : Oid coltype;
8100 :
8101 : /* resjunk columns could have sortgrouprefs. Leave these alone */
8102 35176 : if (tle->resjunk)
8103 0 : continue;
8104 :
8105 : /*
8106 : * We expect every non-resjunk target to have a SortGroupClause and
8107 : * colTypes.
8108 : */
8109 : Assert(lg != NULL);
8110 : Assert(ct != NULL);
8111 35176 : sgc = (SortGroupClause *) lfirst(lg);
8112 35176 : coltype = lfirst_oid(ct);
8113 :
8114 : /* reject if target type isn't the same as the setop target type */
8115 35176 : if (coltype != exprType((Node *) tle->expr))
8116 422 : return NIL;
8117 :
8118 34754 : lg = lnext(grouplist, lg);
8119 34754 : ct = lnext(op->colTypes, ct);
8120 :
8121 : /* assign a tleSortGroupRef, or reuse the existing one */
8122 34754 : sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8123 : }
8124 :
8125 : Assert(lg == NULL);
8126 : Assert(ct == NULL);
8127 :
8128 11106 : return grouplist;
8129 : }
|