Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * planner.c
4 : * The query optimizer external interface.
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/optimizer/plan/planner.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 :
16 : #include "postgres.h"
17 :
18 : #include <limits.h>
19 : #include <math.h>
20 :
21 : #include "access/genam.h"
22 : #include "access/parallel.h"
23 : #include "access/sysattr.h"
24 : #include "access/table.h"
25 : #include "catalog/pg_aggregate.h"
26 : #include "catalog/pg_inherits.h"
27 : #include "catalog/pg_proc.h"
28 : #include "catalog/pg_type.h"
29 : #include "executor/executor.h"
30 : #include "foreign/fdwapi.h"
31 : #include "jit/jit.h"
32 : #include "lib/bipartite_match.h"
33 : #include "lib/knapsack.h"
34 : #include "miscadmin.h"
35 : #include "nodes/makefuncs.h"
36 : #include "nodes/nodeFuncs.h"
37 : #ifdef OPTIMIZER_DEBUG
38 : #include "nodes/print.h"
39 : #endif
40 : #include "nodes/supportnodes.h"
41 : #include "optimizer/appendinfo.h"
42 : #include "optimizer/clauses.h"
43 : #include "optimizer/cost.h"
44 : #include "optimizer/optimizer.h"
45 : #include "optimizer/paramassign.h"
46 : #include "optimizer/pathnode.h"
47 : #include "optimizer/paths.h"
48 : #include "optimizer/plancat.h"
49 : #include "optimizer/planmain.h"
50 : #include "optimizer/planner.h"
51 : #include "optimizer/prep.h"
52 : #include "optimizer/subselect.h"
53 : #include "optimizer/tlist.h"
54 : #include "parser/analyze.h"
55 : #include "parser/parse_agg.h"
56 : #include "parser/parse_clause.h"
57 : #include "parser/parse_relation.h"
58 : #include "parser/parsetree.h"
59 : #include "partitioning/partdesc.h"
60 : #include "rewrite/rewriteManip.h"
61 : #include "utils/backend_status.h"
62 : #include "utils/lsyscache.h"
63 : #include "utils/rel.h"
64 : #include "utils/selfuncs.h"
65 :
66 : /* GUC parameters */
67 : double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
68 : int debug_parallel_query = DEBUG_PARALLEL_OFF;
69 : bool parallel_leader_participation = true;
70 : bool enable_distinct_reordering = true;
71 :
72 : /* Hook for plugins to get control in planner() */
73 : planner_hook_type planner_hook = NULL;
74 :
75 : /* Hook for plugins to get control when grouping_planner() plans upper rels */
76 : create_upper_paths_hook_type create_upper_paths_hook = NULL;
77 :
78 :
79 : /* Expression kind codes for preprocess_expression */
80 : #define EXPRKIND_QUAL 0
81 : #define EXPRKIND_TARGET 1
82 : #define EXPRKIND_RTFUNC 2
83 : #define EXPRKIND_RTFUNC_LATERAL 3
84 : #define EXPRKIND_VALUES 4
85 : #define EXPRKIND_VALUES_LATERAL 5
86 : #define EXPRKIND_LIMIT 6
87 : #define EXPRKIND_APPINFO 7
88 : #define EXPRKIND_PHV 8
89 : #define EXPRKIND_TABLESAMPLE 9
90 : #define EXPRKIND_ARBITER_ELEM 10
91 : #define EXPRKIND_TABLEFUNC 11
92 : #define EXPRKIND_TABLEFUNC_LATERAL 12
93 : #define EXPRKIND_GROUPEXPR 13
94 :
95 : /*
96 : * Data specific to grouping sets
97 : */
98 : typedef struct
99 : {
100 : List *rollups;
101 : List *hash_sets_idx;
102 : double dNumHashGroups;
103 : bool any_hashable;
104 : Bitmapset *unsortable_refs;
105 : Bitmapset *unhashable_refs;
106 : List *unsortable_sets;
107 : int *tleref_to_colnum_map;
108 : } grouping_sets_data;
109 :
110 : /*
111 : * Temporary structure for use during WindowClause reordering in order to be
112 : * able to sort WindowClauses on partitioning/ordering prefix.
113 : */
114 : typedef struct
115 : {
116 : WindowClause *wc;
117 : List *uniqueOrder; /* A List of unique ordering/partitioning
118 : * clauses per Window */
119 : } WindowClauseSortData;
120 :
121 : /* Passthrough data for standard_qp_callback */
122 : typedef struct
123 : {
124 : List *activeWindows; /* active windows, if any */
125 : grouping_sets_data *gset_data; /* grouping sets data, if any */
126 : SetOperationStmt *setop; /* parent set operation or NULL if not a
127 : * subquery belonging to a set operation */
128 : } standard_qp_extra;
129 :
130 : /* Local functions */
131 : static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
132 : static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
133 : static void grouping_planner(PlannerInfo *root, double tuple_fraction,
134 : SetOperationStmt *setops);
135 : static grouping_sets_data *preprocess_grouping_sets(PlannerInfo *root);
136 : static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
137 : int *tleref_to_colnum_map);
138 : static void preprocess_rowmarks(PlannerInfo *root);
139 : static double preprocess_limit(PlannerInfo *root,
140 : double tuple_fraction,
141 : int64 *offset_est, int64 *count_est);
142 : static List *preprocess_groupclause(PlannerInfo *root, List *force);
143 : static List *extract_rollup_sets(List *groupingSets);
144 : static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
145 : static void standard_qp_callback(PlannerInfo *root, void *extra);
146 : static double get_number_of_groups(PlannerInfo *root,
147 : double path_rows,
148 : grouping_sets_data *gd,
149 : List *target_list);
150 : static RelOptInfo *create_grouping_paths(PlannerInfo *root,
151 : RelOptInfo *input_rel,
152 : PathTarget *target,
153 : bool target_parallel_safe,
154 : grouping_sets_data *gd);
155 : static bool is_degenerate_grouping(PlannerInfo *root);
156 : static void create_degenerate_grouping_paths(PlannerInfo *root,
157 : RelOptInfo *input_rel,
158 : RelOptInfo *grouped_rel);
159 : static RelOptInfo *make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
160 : PathTarget *target, bool target_parallel_safe,
161 : Node *havingQual);
162 : static void create_ordinary_grouping_paths(PlannerInfo *root,
163 : RelOptInfo *input_rel,
164 : RelOptInfo *grouped_rel,
165 : const AggClauseCosts *agg_costs,
166 : grouping_sets_data *gd,
167 : GroupPathExtraData *extra,
168 : RelOptInfo **partially_grouped_rel_p);
169 : static void consider_groupingsets_paths(PlannerInfo *root,
170 : RelOptInfo *grouped_rel,
171 : Path *path,
172 : bool is_sorted,
173 : bool can_hash,
174 : grouping_sets_data *gd,
175 : const AggClauseCosts *agg_costs,
176 : double dNumGroups);
177 : static RelOptInfo *create_window_paths(PlannerInfo *root,
178 : RelOptInfo *input_rel,
179 : PathTarget *input_target,
180 : PathTarget *output_target,
181 : bool output_target_parallel_safe,
182 : WindowFuncLists *wflists,
183 : List *activeWindows);
184 : static void create_one_window_path(PlannerInfo *root,
185 : RelOptInfo *window_rel,
186 : Path *path,
187 : PathTarget *input_target,
188 : PathTarget *output_target,
189 : WindowFuncLists *wflists,
190 : List *activeWindows);
191 : static RelOptInfo *create_distinct_paths(PlannerInfo *root,
192 : RelOptInfo *input_rel,
193 : PathTarget *target);
194 : static void create_partial_distinct_paths(PlannerInfo *root,
195 : RelOptInfo *input_rel,
196 : RelOptInfo *final_distinct_rel,
197 : PathTarget *target);
198 : static RelOptInfo *create_final_distinct_paths(PlannerInfo *root,
199 : RelOptInfo *input_rel,
200 : RelOptInfo *distinct_rel);
201 : static List *get_useful_pathkeys_for_distinct(PlannerInfo *root,
202 : List *needed_pathkeys,
203 : List *path_pathkeys);
204 : static RelOptInfo *create_ordered_paths(PlannerInfo *root,
205 : RelOptInfo *input_rel,
206 : PathTarget *target,
207 : bool target_parallel_safe,
208 : double limit_tuples);
209 : static PathTarget *make_group_input_target(PlannerInfo *root,
210 : PathTarget *final_target);
211 : static PathTarget *make_partial_grouping_target(PlannerInfo *root,
212 : PathTarget *grouping_target,
213 : Node *havingQual);
214 : static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
215 : static void optimize_window_clauses(PlannerInfo *root,
216 : WindowFuncLists *wflists);
217 : static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
218 : static void name_active_windows(List *activeWindows);
219 : static PathTarget *make_window_input_target(PlannerInfo *root,
220 : PathTarget *final_target,
221 : List *activeWindows);
222 : static List *make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
223 : List *tlist);
224 : static PathTarget *make_sort_input_target(PlannerInfo *root,
225 : PathTarget *final_target,
226 : bool *have_postponed_srfs);
227 : static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
228 : List *targets, List *targets_contain_srfs);
229 : static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
230 : RelOptInfo *grouped_rel,
231 : RelOptInfo *partially_grouped_rel,
232 : const AggClauseCosts *agg_costs,
233 : grouping_sets_data *gd,
234 : double dNumGroups,
235 : GroupPathExtraData *extra);
236 : static RelOptInfo *create_partial_grouping_paths(PlannerInfo *root,
237 : RelOptInfo *grouped_rel,
238 : RelOptInfo *input_rel,
239 : grouping_sets_data *gd,
240 : GroupPathExtraData *extra,
241 : bool force_rel_creation);
242 : static Path *make_ordered_path(PlannerInfo *root,
243 : RelOptInfo *rel,
244 : Path *path,
245 : Path *cheapest_path,
246 : List *pathkeys,
247 : double limit_tuples);
248 : static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel);
249 : static bool can_partial_agg(PlannerInfo *root);
250 : static void apply_scanjoin_target_to_paths(PlannerInfo *root,
251 : RelOptInfo *rel,
252 : List *scanjoin_targets,
253 : List *scanjoin_targets_contain_srfs,
254 : bool scanjoin_target_parallel_safe,
255 : bool tlist_same_exprs);
256 : static void create_partitionwise_grouping_paths(PlannerInfo *root,
257 : RelOptInfo *input_rel,
258 : RelOptInfo *grouped_rel,
259 : RelOptInfo *partially_grouped_rel,
260 : const AggClauseCosts *agg_costs,
261 : grouping_sets_data *gd,
262 : PartitionwiseAggregateType patype,
263 : GroupPathExtraData *extra);
264 : static bool group_by_has_partkey(RelOptInfo *input_rel,
265 : List *targetList,
266 : List *groupClause);
267 : static int common_prefix_cmp(const void *a, const void *b);
268 : static List *generate_setop_child_grouplist(SetOperationStmt *op,
269 : List *targetlist);
270 :
271 :
272 : /*****************************************************************************
273 : *
274 : * Query optimizer entry point
275 : *
276 : * To support loadable plugins that monitor or modify planner behavior,
277 : * we provide a hook variable that lets a plugin get control before and
278 : * after the standard planning process. The plugin would normally call
279 : * standard_planner().
280 : *
281 : * Note to plugin authors: standard_planner() scribbles on its Query input,
282 : * so you'd better copy that data structure if you want to plan more than once.
283 : *
284 : *****************************************************************************/
285 : PlannedStmt *
286 453680 : planner(Query *parse, const char *query_string, int cursorOptions,
287 : ParamListInfo boundParams)
288 : {
289 : PlannedStmt *result;
290 :
291 453680 : if (planner_hook)
292 94440 : result = (*planner_hook) (parse, query_string, cursorOptions, boundParams);
293 : else
294 359240 : result = standard_planner(parse, query_string, cursorOptions, boundParams);
295 :
296 449346 : pgstat_report_plan_id(result->planId, false);
297 :
298 449346 : return result;
299 : }
300 :
301 : PlannedStmt *
302 453680 : standard_planner(Query *parse, const char *query_string, int cursorOptions,
303 : ParamListInfo boundParams)
304 : {
305 : PlannedStmt *result;
306 : PlannerGlobal *glob;
307 : double tuple_fraction;
308 : PlannerInfo *root;
309 : RelOptInfo *final_rel;
310 : Path *best_path;
311 : Plan *top_plan;
312 : ListCell *lp,
313 : *lr;
314 :
315 : /*
316 : * Set up global state for this planner invocation. This data is needed
317 : * across all levels of sub-Query that might exist in the given command,
318 : * so we keep it in a separate struct that's linked to by each per-Query
319 : * PlannerInfo.
320 : */
321 453680 : glob = makeNode(PlannerGlobal);
322 :
323 453680 : glob->boundParams = boundParams;
324 453680 : glob->subplans = NIL;
325 453680 : glob->subpaths = NIL;
326 453680 : glob->subroots = NIL;
327 453680 : glob->rewindPlanIDs = NULL;
328 453680 : glob->finalrtable = NIL;
329 453680 : glob->allRelids = NULL;
330 453680 : glob->prunableRelids = NULL;
331 453680 : glob->finalrteperminfos = NIL;
332 453680 : glob->finalrowmarks = NIL;
333 453680 : glob->resultRelations = NIL;
334 453680 : glob->appendRelations = NIL;
335 453680 : glob->partPruneInfos = NIL;
336 453680 : glob->relationOids = NIL;
337 453680 : glob->invalItems = NIL;
338 453680 : glob->paramExecTypes = NIL;
339 453680 : glob->lastPHId = 0;
340 453680 : glob->lastRowMarkId = 0;
341 453680 : glob->lastPlanNodeId = 0;
342 453680 : glob->transientPlan = false;
343 453680 : glob->dependsOnRole = false;
344 453680 : glob->partition_directory = NULL;
345 453680 : glob->rel_notnullatts_hash = NULL;
346 :
347 : /*
348 : * Assess whether it's feasible to use parallel mode for this query. We
349 : * can't do this in a standalone backend, or if the command will try to
350 : * modify any data, or if this is a cursor operation, or if GUCs are set
351 : * to values that don't permit parallelism, or if parallel-unsafe
352 : * functions are present in the query tree.
353 : *
354 : * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
355 : * MATERIALIZED VIEW to use parallel plans, but this is safe only because
356 : * the command is writing into a completely new table which workers won't
357 : * be able to see. If the workers could see the table, the fact that
358 : * group locking would cause them to ignore the leader's heavyweight GIN
359 : * page locks would make this unsafe. We'll have to fix that somehow if
360 : * we want to allow parallel inserts in general; updates and deletes have
361 : * additional problems especially around combo CIDs.)
362 : *
363 : * For now, we don't try to use parallel mode if we're running inside a
364 : * parallel worker. We might eventually be able to relax this
365 : * restriction, but for now it seems best not to have parallel workers
366 : * trying to create their own parallel workers.
367 : */
368 453680 : if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
369 425154 : IsUnderPostmaster &&
370 425154 : parse->commandType == CMD_SELECT &&
371 345078 : !parse->hasModifyingCTE &&
372 344936 : max_parallel_workers_per_gather > 0 &&
373 344332 : !IsParallelWorker())
374 : {
375 : /* all the cheap tests pass, so scan the query tree */
376 344284 : glob->maxParallelHazard = max_parallel_hazard(parse);
377 344284 : glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE);
378 : }
379 : else
380 : {
381 : /* skip the query tree scan, just assume it's unsafe */
382 109396 : glob->maxParallelHazard = PROPARALLEL_UNSAFE;
383 109396 : glob->parallelModeOK = false;
384 : }
385 :
386 : /*
387 : * glob->parallelModeNeeded is normally set to false here and changed to
388 : * true during plan creation if a Gather or Gather Merge plan is actually
389 : * created (cf. create_gather_plan, create_gather_merge_plan).
390 : *
391 : * However, if debug_parallel_query = on or debug_parallel_query =
392 : * regress, then we impose parallel mode whenever it's safe to do so, even
393 : * if the final plan doesn't use parallelism. It's not safe to do so if
394 : * the query contains anything parallel-unsafe; parallelModeOK will be
395 : * false in that case. Note that parallelModeOK can't change after this
396 : * point. Otherwise, everything in the query is either parallel-safe or
397 : * parallel-restricted, and in either case it should be OK to impose
398 : * parallel-mode restrictions. If that ends up breaking something, then
399 : * either some function the user included in the query is incorrectly
400 : * labeled as parallel-safe or parallel-restricted when in reality it's
401 : * parallel-unsafe, or else the query planner itself has a bug.
402 : */
403 749268 : glob->parallelModeNeeded = glob->parallelModeOK &&
404 295588 : (debug_parallel_query != DEBUG_PARALLEL_OFF);
405 :
406 : /* Determine what fraction of the plan is likely to be scanned */
407 453680 : if (cursorOptions & CURSOR_OPT_FAST_PLAN)
408 : {
409 : /*
410 : * We have no real idea how many tuples the user will ultimately FETCH
411 : * from a cursor, but it is often the case that he doesn't want 'em
412 : * all, or would prefer a fast-start plan anyway so that he can
413 : * process some of the tuples sooner. Use a GUC parameter to decide
414 : * what fraction to optimize for.
415 : */
416 4676 : tuple_fraction = cursor_tuple_fraction;
417 :
418 : /*
419 : * We document cursor_tuple_fraction as simply being a fraction, which
420 : * means the edge cases 0 and 1 have to be treated specially here. We
421 : * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
422 : */
423 4676 : if (tuple_fraction >= 1.0)
424 0 : tuple_fraction = 0.0;
425 4676 : else if (tuple_fraction <= 0.0)
426 0 : tuple_fraction = 1e-10;
427 : }
428 : else
429 : {
430 : /* Default assumption is we need all the tuples */
431 449004 : tuple_fraction = 0.0;
432 : }
433 :
434 : /* primary planning entry point (may recurse for subqueries) */
435 453680 : root = subquery_planner(glob, parse, NULL, false, tuple_fraction, NULL);
436 :
437 : /* Select best Path and turn it into a Plan */
438 449742 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
439 449742 : best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
440 :
441 449742 : top_plan = create_plan(root, best_path);
442 :
443 : /*
444 : * If creating a plan for a scrollable cursor, make sure it can run
445 : * backwards on demand. Add a Material node at the top at need.
446 : */
447 449346 : if (cursorOptions & CURSOR_OPT_SCROLL)
448 : {
449 266 : if (!ExecSupportsBackwardScan(top_plan))
450 32 : top_plan = materialize_finished_plan(top_plan);
451 : }
452 :
453 : /*
454 : * Optionally add a Gather node for testing purposes, provided this is
455 : * actually a safe thing to do.
456 : *
457 : * We can add Gather even when top_plan has parallel-safe initPlans, but
458 : * then we have to move the initPlans to the Gather node because of
459 : * SS_finalize_plan's limitations. That would cause cosmetic breakage of
460 : * regression tests when debug_parallel_query = regress, because initPlans
461 : * that would normally appear on the top_plan move to the Gather, causing
462 : * them to disappear from EXPLAIN output. That doesn't seem worth kluging
463 : * EXPLAIN to hide, so skip it when debug_parallel_query = regress.
464 : */
465 449346 : if (debug_parallel_query != DEBUG_PARALLEL_OFF &&
466 194 : top_plan->parallel_safe &&
467 128 : (top_plan->initPlan == NIL ||
468 0 : debug_parallel_query != DEBUG_PARALLEL_REGRESS))
469 : {
470 128 : Gather *gather = makeNode(Gather);
471 : Cost initplan_cost;
472 : bool unsafe_initplans;
473 :
474 128 : gather->plan.targetlist = top_plan->targetlist;
475 128 : gather->plan.qual = NIL;
476 128 : gather->plan.lefttree = top_plan;
477 128 : gather->plan.righttree = NULL;
478 128 : gather->num_workers = 1;
479 128 : gather->single_copy = true;
480 128 : gather->invisible = (debug_parallel_query == DEBUG_PARALLEL_REGRESS);
481 :
482 : /* Transfer any initPlans to the new top node */
483 128 : gather->plan.initPlan = top_plan->initPlan;
484 128 : top_plan->initPlan = NIL;
485 :
486 : /*
487 : * Since this Gather has no parallel-aware descendants to signal to,
488 : * we don't need a rescan Param.
489 : */
490 128 : gather->rescan_param = -1;
491 :
492 : /*
493 : * Ideally we'd use cost_gather here, but setting up dummy path data
494 : * to satisfy it doesn't seem much cleaner than knowing what it does.
495 : */
496 128 : gather->plan.startup_cost = top_plan->startup_cost +
497 : parallel_setup_cost;
498 128 : gather->plan.total_cost = top_plan->total_cost +
499 128 : parallel_setup_cost + parallel_tuple_cost * top_plan->plan_rows;
500 128 : gather->plan.plan_rows = top_plan->plan_rows;
501 128 : gather->plan.plan_width = top_plan->plan_width;
502 128 : gather->plan.parallel_aware = false;
503 128 : gather->plan.parallel_safe = false;
504 :
505 : /*
506 : * Delete the initplans' cost from top_plan. We needn't add it to the
507 : * Gather node, since the above coding already included it there.
508 : */
509 128 : SS_compute_initplan_cost(gather->plan.initPlan,
510 : &initplan_cost, &unsafe_initplans);
511 128 : top_plan->startup_cost -= initplan_cost;
512 128 : top_plan->total_cost -= initplan_cost;
513 :
514 : /* use parallel mode for parallel plans. */
515 128 : root->glob->parallelModeNeeded = true;
516 :
517 128 : top_plan = &gather->plan;
518 : }
519 :
520 : /*
521 : * If any Params were generated, run through the plan tree and compute
522 : * each plan node's extParam/allParam sets. Ideally we'd merge this into
523 : * set_plan_references' tree traversal, but for now it has to be separate
524 : * because we need to visit subplans before not after main plan.
525 : */
526 449346 : if (glob->paramExecTypes != NIL)
527 : {
528 : Assert(list_length(glob->subplans) == list_length(glob->subroots));
529 199064 : forboth(lp, glob->subplans, lr, glob->subroots)
530 : {
531 45952 : Plan *subplan = (Plan *) lfirst(lp);
532 45952 : PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
533 :
534 45952 : SS_finalize_plan(subroot, subplan);
535 : }
536 153112 : SS_finalize_plan(root, top_plan);
537 : }
538 :
539 : /* final cleanup of the plan */
540 : Assert(glob->finalrtable == NIL);
541 : Assert(glob->finalrteperminfos == NIL);
542 : Assert(glob->finalrowmarks == NIL);
543 : Assert(glob->resultRelations == NIL);
544 : Assert(glob->appendRelations == NIL);
545 449346 : top_plan = set_plan_references(root, top_plan);
546 : /* ... and the subplans (both regular subplans and initplans) */
547 : Assert(list_length(glob->subplans) == list_length(glob->subroots));
548 495298 : forboth(lp, glob->subplans, lr, glob->subroots)
549 : {
550 45952 : Plan *subplan = (Plan *) lfirst(lp);
551 45952 : PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
552 :
553 45952 : lfirst(lp) = set_plan_references(subroot, subplan);
554 : }
555 :
556 : /* build the PlannedStmt result */
557 449346 : result = makeNode(PlannedStmt);
558 :
559 449346 : result->commandType = parse->commandType;
560 449346 : result->queryId = parse->queryId;
561 449346 : result->hasReturning = (parse->returningList != NIL);
562 449346 : result->hasModifyingCTE = parse->hasModifyingCTE;
563 449346 : result->canSetTag = parse->canSetTag;
564 449346 : result->transientPlan = glob->transientPlan;
565 449346 : result->dependsOnRole = glob->dependsOnRole;
566 449346 : result->parallelModeNeeded = glob->parallelModeNeeded;
567 449346 : result->planTree = top_plan;
568 449346 : result->partPruneInfos = glob->partPruneInfos;
569 449346 : result->rtable = glob->finalrtable;
570 898692 : result->unprunableRelids = bms_difference(glob->allRelids,
571 449346 : glob->prunableRelids);
572 449346 : result->permInfos = glob->finalrteperminfos;
573 449346 : result->resultRelations = glob->resultRelations;
574 449346 : result->appendRelations = glob->appendRelations;
575 449346 : result->subplans = glob->subplans;
576 449346 : result->rewindPlanIDs = glob->rewindPlanIDs;
577 449346 : result->rowMarks = glob->finalrowmarks;
578 449346 : result->relationOids = glob->relationOids;
579 449346 : result->invalItems = glob->invalItems;
580 449346 : result->paramExecTypes = glob->paramExecTypes;
581 : /* utilityStmt should be null, but we might as well copy it */
582 449346 : result->utilityStmt = parse->utilityStmt;
583 449346 : result->stmt_location = parse->stmt_location;
584 449346 : result->stmt_len = parse->stmt_len;
585 449346 : result->cached_plan_type = PLAN_CACHE_NONE;
586 :
587 449346 : result->jitFlags = PGJIT_NONE;
588 449346 : if (jit_enabled && jit_above_cost >= 0 &&
589 448520 : top_plan->total_cost > jit_above_cost)
590 : {
591 950 : result->jitFlags |= PGJIT_PERFORM;
592 :
593 : /*
594 : * Decide how much effort should be put into generating better code.
595 : */
596 950 : if (jit_optimize_above_cost >= 0 &&
597 950 : top_plan->total_cost > jit_optimize_above_cost)
598 432 : result->jitFlags |= PGJIT_OPT3;
599 950 : if (jit_inline_above_cost >= 0 &&
600 950 : top_plan->total_cost > jit_inline_above_cost)
601 432 : result->jitFlags |= PGJIT_INLINE;
602 :
603 : /*
604 : * Decide which operations should be JITed.
605 : */
606 950 : if (jit_expressions)
607 950 : result->jitFlags |= PGJIT_EXPR;
608 950 : if (jit_tuple_deforming)
609 950 : result->jitFlags |= PGJIT_DEFORM;
610 : }
611 :
612 449346 : if (glob->partition_directory != NULL)
613 11570 : DestroyPartitionDirectory(glob->partition_directory);
614 :
615 449346 : return result;
616 : }
617 :
618 :
619 : /*--------------------
620 : * subquery_planner
621 : * Invokes the planner on a subquery. We recurse to here for each
622 : * sub-SELECT found in the query tree.
623 : *
624 : * glob is the global state for the current planner run.
625 : * parse is the querytree produced by the parser & rewriter.
626 : * parent_root is the immediate parent Query's info (NULL at the top level).
627 : * hasRecursion is true if this is a recursive WITH query.
628 : * tuple_fraction is the fraction of tuples we expect will be retrieved.
629 : * tuple_fraction is interpreted as explained for grouping_planner, below.
630 : * setops is used for set operation subqueries to provide the subquery with
631 : * the context in which it's being used so that Paths correctly sorted for the
632 : * set operation can be generated. NULL when not planning a set operation
633 : * child, or when a child of a set op that isn't interested in sorted input.
634 : *
635 : * Basically, this routine does the stuff that should only be done once
636 : * per Query object. It then calls grouping_planner. At one time,
637 : * grouping_planner could be invoked recursively on the same Query object;
638 : * that's not currently true, but we keep the separation between the two
639 : * routines anyway, in case we need it again someday.
640 : *
641 : * subquery_planner will be called recursively to handle sub-Query nodes
642 : * found within the query's expressions and rangetable.
643 : *
644 : * Returns the PlannerInfo struct ("root") that contains all data generated
645 : * while planning the subquery. In particular, the Path(s) attached to
646 : * the (UPPERREL_FINAL, NULL) upperrel represent our conclusions about the
647 : * cheapest way(s) to implement the query. The top level will select the
648 : * best Path and pass it through createplan.c to produce a finished Plan.
649 : *--------------------
650 : */
651 : PlannerInfo *
652 527244 : subquery_planner(PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root,
653 : bool hasRecursion, double tuple_fraction,
654 : SetOperationStmt *setops)
655 : {
656 : PlannerInfo *root;
657 : List *newWithCheckOptions;
658 : List *newHaving;
659 : bool hasOuterJoins;
660 : bool hasResultRTEs;
661 : RelOptInfo *final_rel;
662 : ListCell *l;
663 :
664 : /* Create a PlannerInfo data structure for this subquery */
665 527244 : root = makeNode(PlannerInfo);
666 527244 : root->parse = parse;
667 527244 : root->glob = glob;
668 527244 : root->query_level = parent_root ? parent_root->query_level + 1 : 1;
669 527244 : root->parent_root = parent_root;
670 527244 : root->plan_params = NIL;
671 527244 : root->outer_params = NULL;
672 527244 : root->planner_cxt = CurrentMemoryContext;
673 527244 : root->init_plans = NIL;
674 527244 : root->cte_plan_ids = NIL;
675 527244 : root->multiexpr_params = NIL;
676 527244 : root->join_domains = NIL;
677 527244 : root->eq_classes = NIL;
678 527244 : root->ec_merging_done = false;
679 527244 : root->last_rinfo_serial = 0;
680 527244 : root->all_result_relids =
681 527244 : parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
682 527244 : root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
683 527244 : root->append_rel_list = NIL;
684 527244 : root->row_identity_vars = NIL;
685 527244 : root->rowMarks = NIL;
686 527244 : memset(root->upper_rels, 0, sizeof(root->upper_rels));
687 527244 : memset(root->upper_targets, 0, sizeof(root->upper_targets));
688 527244 : root->processed_groupClause = NIL;
689 527244 : root->processed_distinctClause = NIL;
690 527244 : root->processed_tlist = NIL;
691 527244 : root->update_colnos = NIL;
692 527244 : root->grouping_map = NULL;
693 527244 : root->minmax_aggs = NIL;
694 527244 : root->qual_security_level = 0;
695 527244 : root->hasPseudoConstantQuals = false;
696 527244 : root->hasAlternativeSubPlans = false;
697 527244 : root->placeholdersFrozen = false;
698 527244 : root->hasRecursion = hasRecursion;
699 527244 : if (hasRecursion)
700 1018 : root->wt_param_id = assign_special_exec_param(root);
701 : else
702 526226 : root->wt_param_id = -1;
703 527244 : root->non_recursive_path = NULL;
704 527244 : root->partColsUpdated = false;
705 :
706 : /*
707 : * Create the top-level join domain. This won't have valid contents until
708 : * deconstruct_jointree fills it in, but the node needs to exist before
709 : * that so we can build EquivalenceClasses referencing it.
710 : */
711 527244 : root->join_domains = list_make1(makeNode(JoinDomain));
712 :
713 : /*
714 : * If there is a WITH list, process each WITH query and either convert it
715 : * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
716 : */
717 527244 : if (parse->cteList)
718 2910 : SS_process_ctes(root);
719 :
720 : /*
721 : * If it's a MERGE command, transform the joinlist as appropriate.
722 : */
723 527238 : transform_MERGE_to_join(parse);
724 :
725 : /*
726 : * Scan the rangetable for relation RTEs and retrieve the necessary
727 : * catalog information for each relation. Using this information, clear
728 : * the inh flag for any relation that has no children, collect not-null
729 : * attribute numbers for any relation that has column not-null
730 : * constraints, and expand virtual generated columns for any relation that
731 : * contains them. Note that this step does not descend into sublinks and
732 : * subqueries; if we pull up any sublinks or subqueries below, their
733 : * relation RTEs are processed just before pulling them up.
734 : */
735 527238 : parse = root->parse = preprocess_relation_rtes(root);
736 :
737 : /*
738 : * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
739 : * that we don't need so many special cases to deal with that situation.
740 : */
741 527238 : replace_empty_jointree(parse);
742 :
743 : /*
744 : * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
745 : * to transform them into joins. Note that this step does not descend
746 : * into subqueries; if we pull up any subqueries below, their SubLinks are
747 : * processed just before pulling them up.
748 : */
749 527238 : if (parse->hasSubLinks)
750 37982 : pull_up_sublinks(root);
751 :
752 : /*
753 : * Scan the rangetable for function RTEs, do const-simplification on them,
754 : * and then inline them if possible (producing subqueries that might get
755 : * pulled up next). Recursion issues here are handled in the same way as
756 : * for SubLinks.
757 : */
758 527238 : preprocess_function_rtes(root);
759 :
760 : /*
761 : * Check to see if any subqueries in the jointree can be merged into this
762 : * query.
763 : */
764 527232 : pull_up_subqueries(root);
765 :
766 : /*
767 : * If this is a simple UNION ALL query, flatten it into an appendrel. We
768 : * do this now because it requires applying pull_up_subqueries to the leaf
769 : * queries of the UNION ALL, which weren't touched above because they
770 : * weren't referenced by the jointree (they will be after we do this).
771 : */
772 527226 : if (parse->setOperations)
773 6630 : flatten_simple_union_all(root);
774 :
775 : /*
776 : * Survey the rangetable to see what kinds of entries are present. We can
777 : * skip some later processing if relevant SQL features are not used; for
778 : * example if there are no JOIN RTEs we can avoid the expense of doing
779 : * flatten_join_alias_vars(). This must be done after we have finished
780 : * adding rangetable entries, of course. (Note: actually, processing of
781 : * inherited or partitioned rels can cause RTEs for their child tables to
782 : * get added later; but those must all be RTE_RELATION entries, so they
783 : * don't invalidate the conclusions drawn here.)
784 : */
785 527226 : root->hasJoinRTEs = false;
786 527226 : root->hasLateralRTEs = false;
787 527226 : root->group_rtindex = 0;
788 527226 : hasOuterJoins = false;
789 527226 : hasResultRTEs = false;
790 1426632 : foreach(l, parse->rtable)
791 : {
792 899406 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
793 :
794 899406 : switch (rte->rtekind)
795 : {
796 88828 : case RTE_JOIN:
797 88828 : root->hasJoinRTEs = true;
798 88828 : if (IS_OUTER_JOIN(rte->jointype))
799 49410 : hasOuterJoins = true;
800 88828 : break;
801 200696 : case RTE_RESULT:
802 200696 : hasResultRTEs = true;
803 200696 : break;
804 4468 : case RTE_GROUP:
805 : Assert(parse->hasGroupRTE);
806 4468 : root->group_rtindex = list_cell_number(parse->rtable, l) + 1;
807 4468 : break;
808 605414 : default:
809 : /* No work here for other RTE types */
810 605414 : break;
811 : }
812 :
813 899406 : if (rte->lateral)
814 10584 : root->hasLateralRTEs = true;
815 :
816 : /*
817 : * We can also determine the maximum security level required for any
818 : * securityQuals now. Addition of inheritance-child RTEs won't affect
819 : * this, because child tables don't have their own securityQuals; see
820 : * expand_single_inheritance_child().
821 : */
822 899406 : if (rte->securityQuals)
823 2508 : root->qual_security_level = Max(root->qual_security_level,
824 : list_length(rte->securityQuals));
825 : }
826 :
827 : /*
828 : * If we have now verified that the query target relation is
829 : * non-inheriting, mark it as a leaf target.
830 : */
831 527226 : if (parse->resultRelation)
832 : {
833 87068 : RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
834 :
835 87068 : if (!rte->inh)
836 84222 : root->leaf_result_relids =
837 84222 : bms_make_singleton(parse->resultRelation);
838 : }
839 :
840 : /*
841 : * Preprocess RowMark information. We need to do this after subquery
842 : * pullup, so that all base relations are present.
843 : */
844 527226 : preprocess_rowmarks(root);
845 :
846 : /*
847 : * Set hasHavingQual to remember if HAVING clause is present. Needed
848 : * because preprocess_expression will reduce a constant-true condition to
849 : * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
850 : */
851 527226 : root->hasHavingQual = (parse->havingQual != NULL);
852 :
853 : /*
854 : * Do expression preprocessing on targetlist and quals, as well as other
855 : * random expressions in the querytree. Note that we do not need to
856 : * handle sort/group expressions explicitly, because they are actually
857 : * part of the targetlist.
858 : */
859 523372 : parse->targetList = (List *)
860 527226 : preprocess_expression(root, (Node *) parse->targetList,
861 : EXPRKIND_TARGET);
862 :
863 523372 : newWithCheckOptions = NIL;
864 525860 : foreach(l, parse->withCheckOptions)
865 : {
866 2488 : WithCheckOption *wco = lfirst_node(WithCheckOption, l);
867 :
868 2488 : wco->qual = preprocess_expression(root, wco->qual,
869 : EXPRKIND_QUAL);
870 2488 : if (wco->qual != NULL)
871 2088 : newWithCheckOptions = lappend(newWithCheckOptions, wco);
872 : }
873 523372 : parse->withCheckOptions = newWithCheckOptions;
874 :
875 523372 : parse->returningList = (List *)
876 523372 : preprocess_expression(root, (Node *) parse->returningList,
877 : EXPRKIND_TARGET);
878 :
879 523372 : preprocess_qual_conditions(root, (Node *) parse->jointree);
880 :
881 523372 : parse->havingQual = preprocess_expression(root, parse->havingQual,
882 : EXPRKIND_QUAL);
883 :
884 525990 : foreach(l, parse->windowClause)
885 : {
886 2618 : WindowClause *wc = lfirst_node(WindowClause, l);
887 :
888 : /* partitionClause/orderClause are sort/group expressions */
889 2618 : wc->startOffset = preprocess_expression(root, wc->startOffset,
890 : EXPRKIND_LIMIT);
891 2618 : wc->endOffset = preprocess_expression(root, wc->endOffset,
892 : EXPRKIND_LIMIT);
893 : }
894 :
895 523372 : parse->limitOffset = preprocess_expression(root, parse->limitOffset,
896 : EXPRKIND_LIMIT);
897 523372 : parse->limitCount = preprocess_expression(root, parse->limitCount,
898 : EXPRKIND_LIMIT);
899 :
900 523372 : if (parse->onConflict)
901 : {
902 3624 : parse->onConflict->arbiterElems = (List *)
903 1812 : preprocess_expression(root,
904 1812 : (Node *) parse->onConflict->arbiterElems,
905 : EXPRKIND_ARBITER_ELEM);
906 3624 : parse->onConflict->arbiterWhere =
907 1812 : preprocess_expression(root,
908 1812 : parse->onConflict->arbiterWhere,
909 : EXPRKIND_QUAL);
910 3624 : parse->onConflict->onConflictSet = (List *)
911 1812 : preprocess_expression(root,
912 1812 : (Node *) parse->onConflict->onConflictSet,
913 : EXPRKIND_TARGET);
914 1812 : parse->onConflict->onConflictWhere =
915 1812 : preprocess_expression(root,
916 1812 : parse->onConflict->onConflictWhere,
917 : EXPRKIND_QUAL);
918 : /* exclRelTlist contains only Vars, so no preprocessing needed */
919 : }
920 :
921 526216 : foreach(l, parse->mergeActionList)
922 : {
923 2844 : MergeAction *action = (MergeAction *) lfirst(l);
924 :
925 2844 : action->targetList = (List *)
926 2844 : preprocess_expression(root,
927 2844 : (Node *) action->targetList,
928 : EXPRKIND_TARGET);
929 2844 : action->qual =
930 2844 : preprocess_expression(root,
931 : (Node *) action->qual,
932 : EXPRKIND_QUAL);
933 : }
934 :
935 523372 : parse->mergeJoinCondition =
936 523372 : preprocess_expression(root, parse->mergeJoinCondition, EXPRKIND_QUAL);
937 :
938 523372 : root->append_rel_list = (List *)
939 523372 : preprocess_expression(root, (Node *) root->append_rel_list,
940 : EXPRKIND_APPINFO);
941 :
942 : /* Also need to preprocess expressions within RTEs */
943 1418646 : foreach(l, parse->rtable)
944 : {
945 895274 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
946 : int kind;
947 : ListCell *lcsq;
948 :
949 895274 : if (rte->rtekind == RTE_RELATION)
950 : {
951 470240 : if (rte->tablesample)
952 228 : rte->tablesample = (TableSampleClause *)
953 228 : preprocess_expression(root,
954 228 : (Node *) rte->tablesample,
955 : EXPRKIND_TABLESAMPLE);
956 : }
957 425034 : else if (rte->rtekind == RTE_SUBQUERY)
958 : {
959 : /*
960 : * We don't want to do all preprocessing yet on the subquery's
961 : * expressions, since that will happen when we plan it. But if it
962 : * contains any join aliases of our level, those have to get
963 : * expanded now, because planning of the subquery won't do it.
964 : * That's only possible if the subquery is LATERAL.
965 : */
966 68732 : if (rte->lateral && root->hasJoinRTEs)
967 1286 : rte->subquery = (Query *)
968 1286 : flatten_join_alias_vars(root, root->parse,
969 1286 : (Node *) rte->subquery);
970 : }
971 356302 : else if (rte->rtekind == RTE_FUNCTION)
972 : {
973 : /* Preprocess the function expression(s) fully */
974 51614 : kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
975 51614 : rte->functions = (List *)
976 51614 : preprocess_expression(root, (Node *) rte->functions, kind);
977 : }
978 304688 : else if (rte->rtekind == RTE_TABLEFUNC)
979 : {
980 : /* Preprocess the function expression(s) fully */
981 626 : kind = rte->lateral ? EXPRKIND_TABLEFUNC_LATERAL : EXPRKIND_TABLEFUNC;
982 626 : rte->tablefunc = (TableFunc *)
983 626 : preprocess_expression(root, (Node *) rte->tablefunc, kind);
984 : }
985 304062 : else if (rte->rtekind == RTE_VALUES)
986 : {
987 : /* Preprocess the values lists fully */
988 8268 : kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
989 8268 : rte->values_lists = (List *)
990 8268 : preprocess_expression(root, (Node *) rte->values_lists, kind);
991 : }
992 295794 : else if (rte->rtekind == RTE_GROUP)
993 : {
994 : /* Preprocess the groupexprs list fully */
995 4468 : rte->groupexprs = (List *)
996 4468 : preprocess_expression(root, (Node *) rte->groupexprs,
997 : EXPRKIND_GROUPEXPR);
998 : }
999 :
1000 : /*
1001 : * Process each element of the securityQuals list as if it were a
1002 : * separate qual expression (as indeed it is). We need to do it this
1003 : * way to get proper canonicalization of AND/OR structure. Note that
1004 : * this converts each element into an implicit-AND sublist.
1005 : */
1006 898140 : foreach(lcsq, rte->securityQuals)
1007 : {
1008 2866 : lfirst(lcsq) = preprocess_expression(root,
1009 2866 : (Node *) lfirst(lcsq),
1010 : EXPRKIND_QUAL);
1011 : }
1012 : }
1013 :
1014 : /*
1015 : * Now that we are done preprocessing expressions, and in particular done
1016 : * flattening join alias variables, get rid of the joinaliasvars lists.
1017 : * They no longer match what expressions in the rest of the tree look
1018 : * like, because we have not preprocessed expressions in those lists (and
1019 : * do not want to; for example, expanding a SubLink there would result in
1020 : * a useless unreferenced subplan). Leaving them in place simply creates
1021 : * a hazard for later scans of the tree. We could try to prevent that by
1022 : * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
1023 : * but that doesn't sound very reliable.
1024 : */
1025 523372 : if (root->hasJoinRTEs)
1026 : {
1027 306798 : foreach(l, parse->rtable)
1028 : {
1029 253120 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
1030 :
1031 253120 : rte->joinaliasvars = NIL;
1032 : }
1033 : }
1034 :
1035 : /*
1036 : * Replace any Vars in the subquery's targetlist and havingQual that
1037 : * reference GROUP outputs with the underlying grouping expressions.
1038 : *
1039 : * Note that we need to perform this replacement after we've preprocessed
1040 : * the grouping expressions. This is to ensure that there is only one
1041 : * instance of SubPlan for each SubLink contained within the grouping
1042 : * expressions.
1043 : */
1044 523372 : if (parse->hasGroupRTE)
1045 : {
1046 4468 : parse->targetList = (List *)
1047 4468 : flatten_group_exprs(root, root->parse, (Node *) parse->targetList);
1048 4468 : parse->havingQual =
1049 4468 : flatten_group_exprs(root, root->parse, parse->havingQual);
1050 : }
1051 :
1052 : /* Constant-folding might have removed all set-returning functions */
1053 523372 : if (parse->hasTargetSRFs)
1054 12048 : parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
1055 :
1056 : /*
1057 : * In some cases we may want to transfer a HAVING clause into WHERE. We
1058 : * cannot do so if the HAVING clause contains aggregates (obviously) or
1059 : * volatile functions (since a HAVING clause is supposed to be executed
1060 : * only once per group). We also can't do this if there are any nonempty
1061 : * grouping sets and the clause references any columns that are nullable
1062 : * by the grouping sets; moving such a clause into WHERE would potentially
1063 : * change the results. (If there are only empty grouping sets, then the
1064 : * HAVING clause must be degenerate as discussed below.)
1065 : *
1066 : * Also, it may be that the clause is so expensive to execute that we're
1067 : * better off doing it only once per group, despite the loss of
1068 : * selectivity. This is hard to estimate short of doing the entire
1069 : * planning process twice, so we use a heuristic: clauses containing
1070 : * subplans are left in HAVING. Otherwise, we move or copy the HAVING
1071 : * clause into WHERE, in hopes of eliminating tuples before aggregation
1072 : * instead of after.
1073 : *
1074 : * If the query has explicit grouping then we can simply move such a
1075 : * clause into WHERE; any group that fails the clause will not be in the
1076 : * output because none of its tuples will reach the grouping or
1077 : * aggregation stage. Otherwise we must have a degenerate (variable-free)
1078 : * HAVING clause, which we put in WHERE so that query_planner() can use it
1079 : * in a gating Result node, but also keep in HAVING to ensure that we
1080 : * don't emit a bogus aggregated row. (This could be done better, but it
1081 : * seems not worth optimizing.)
1082 : *
1083 : * Note that a HAVING clause may contain expressions that are not fully
1084 : * preprocessed. This can happen if these expressions are part of
1085 : * grouping items. In such cases, they are replaced with GROUP Vars in
1086 : * the parser and then replaced back after we've done with expression
1087 : * preprocessing on havingQual. This is not an issue if the clause
1088 : * remains in HAVING, because these expressions will be matched to lower
1089 : * target items in setrefs.c. However, if the clause is moved or copied
1090 : * into WHERE, we need to ensure that these expressions are fully
1091 : * preprocessed.
1092 : *
1093 : * Note that both havingQual and parse->jointree->quals are in
1094 : * implicitly-ANDed-list form at this point, even though they are declared
1095 : * as Node *.
1096 : */
1097 523372 : newHaving = NIL;
1098 524528 : foreach(l, (List *) parse->havingQual)
1099 : {
1100 1156 : Node *havingclause = (Node *) lfirst(l);
1101 :
1102 1492 : if (contain_agg_clause(havingclause) ||
1103 672 : contain_volatile_functions(havingclause) ||
1104 336 : contain_subplans(havingclause) ||
1105 420 : (parse->groupClause && parse->groupingSets &&
1106 84 : bms_is_member(root->group_rtindex, pull_varnos(root, havingclause))))
1107 : {
1108 : /* keep it in HAVING */
1109 892 : newHaving = lappend(newHaving, havingclause);
1110 : }
1111 264 : else if (parse->groupClause)
1112 : {
1113 : Node *whereclause;
1114 :
1115 : /* Preprocess the HAVING clause fully */
1116 246 : whereclause = preprocess_expression(root, havingclause,
1117 : EXPRKIND_QUAL);
1118 : /* ... and move it to WHERE */
1119 246 : parse->jointree->quals = (Node *)
1120 246 : list_concat((List *) parse->jointree->quals,
1121 : (List *) whereclause);
1122 : }
1123 : else
1124 : {
1125 : Node *whereclause;
1126 :
1127 : /* Preprocess the HAVING clause fully */
1128 18 : whereclause = preprocess_expression(root, copyObject(havingclause),
1129 : EXPRKIND_QUAL);
1130 : /* ... and put a copy in WHERE */
1131 36 : parse->jointree->quals = (Node *)
1132 18 : list_concat((List *) parse->jointree->quals,
1133 : (List *) whereclause);
1134 : /* ... and also keep it in HAVING */
1135 18 : newHaving = lappend(newHaving, havingclause);
1136 : }
1137 : }
1138 523372 : parse->havingQual = (Node *) newHaving;
1139 :
1140 : /*
1141 : * If we have any outer joins, try to reduce them to plain inner joins.
1142 : * This step is most easily done after we've done expression
1143 : * preprocessing.
1144 : */
1145 523372 : if (hasOuterJoins)
1146 34596 : reduce_outer_joins(root);
1147 :
1148 : /*
1149 : * If we have any RTE_RESULT relations, see if they can be deleted from
1150 : * the jointree. We also rely on this processing to flatten single-child
1151 : * FromExprs underneath outer joins. This step is most effectively done
1152 : * after we've done expression preprocessing and outer join reduction.
1153 : */
1154 523372 : if (hasResultRTEs || hasOuterJoins)
1155 230080 : remove_useless_result_rtes(root);
1156 :
1157 : /*
1158 : * Do the main planning.
1159 : */
1160 523372 : grouping_planner(root, tuple_fraction, setops);
1161 :
1162 : /*
1163 : * Capture the set of outer-level param IDs we have access to, for use in
1164 : * extParam/allParam calculations later.
1165 : */
1166 523300 : SS_identify_outer_params(root);
1167 :
1168 : /*
1169 : * If any initPlans were created in this query level, adjust the surviving
1170 : * Paths' costs and parallel-safety flags to account for them. The
1171 : * initPlans won't actually get attached to the plan tree till
1172 : * create_plan() runs, but we must include their effects now.
1173 : */
1174 523300 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1175 523300 : SS_charge_for_initplans(root, final_rel);
1176 :
1177 : /*
1178 : * Make sure we've identified the cheapest Path for the final rel. (By
1179 : * doing this here not in grouping_planner, we include initPlan costs in
1180 : * the decision, though it's unlikely that will change anything.)
1181 : */
1182 523300 : set_cheapest(final_rel);
1183 :
1184 523300 : return root;
1185 : }
1186 :
1187 : /*
1188 : * preprocess_expression
1189 : * Do subquery_planner's preprocessing work for an expression,
1190 : * which can be a targetlist, a WHERE clause (including JOIN/ON
1191 : * conditions), a HAVING clause, or a few other things.
1192 : */
1193 : static Node *
1194 4391340 : preprocess_expression(PlannerInfo *root, Node *expr, int kind)
1195 : {
1196 : /*
1197 : * Fall out quickly if expression is empty. This occurs often enough to
1198 : * be worth checking. Note that null->null is the correct conversion for
1199 : * implicit-AND result format, too.
1200 : */
1201 4391340 : if (expr == NULL)
1202 3468866 : return NULL;
1203 :
1204 : /*
1205 : * If the query has any join RTEs, replace join alias variables with
1206 : * base-relation variables. We must do this first, since any expressions
1207 : * we may extract from the joinaliasvars lists have not been preprocessed.
1208 : * For example, if we did this after sublink processing, sublinks expanded
1209 : * out from join aliases would not get processed. But we can skip this in
1210 : * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1211 : * they can't contain any Vars of the current query level.
1212 : */
1213 922474 : if (root->hasJoinRTEs &&
1214 387630 : !(kind == EXPRKIND_RTFUNC ||
1215 193642 : kind == EXPRKIND_VALUES ||
1216 : kind == EXPRKIND_TABLESAMPLE ||
1217 : kind == EXPRKIND_TABLEFUNC))
1218 193624 : expr = flatten_join_alias_vars(root, root->parse, expr);
1219 :
1220 : /*
1221 : * Simplify constant expressions. For function RTEs, this was already
1222 : * done by preprocess_function_rtes. (But note we must do it again for
1223 : * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1224 : * un-simplified subexpressions inserted by flattening of subqueries or
1225 : * join alias variables.)
1226 : *
1227 : * Note: an essential effect of this is to convert named-argument function
1228 : * calls to positional notation and insert the current actual values of
1229 : * any default arguments for functions. To ensure that happens, we *must*
1230 : * process all expressions here. Previous PG versions sometimes skipped
1231 : * const-simplification if it didn't seem worth the trouble, but we can't
1232 : * do that anymore.
1233 : *
1234 : * Note: this also flattens nested AND and OR expressions into N-argument
1235 : * form. All processing of a qual expression after this point must be
1236 : * careful to maintain AND/OR flatness --- that is, do not generate a tree
1237 : * with AND directly under AND, nor OR directly under OR.
1238 : */
1239 922474 : if (kind != EXPRKIND_RTFUNC)
1240 879234 : expr = eval_const_expressions(root, expr);
1241 :
1242 : /*
1243 : * If it's a qual or havingQual, canonicalize it.
1244 : */
1245 918620 : if (kind == EXPRKIND_QUAL)
1246 : {
1247 329808 : expr = (Node *) canonicalize_qual((Expr *) expr, false);
1248 :
1249 : #ifdef OPTIMIZER_DEBUG
1250 : printf("After canonicalize_qual()\n");
1251 : pprint(expr);
1252 : #endif
1253 : }
1254 :
1255 : /*
1256 : * Check for ANY ScalarArrayOpExpr with Const arrays and set the
1257 : * hashfuncid of any that might execute more quickly by using hash lookups
1258 : * instead of a linear search.
1259 : */
1260 918620 : if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1261 : {
1262 840224 : convert_saop_to_hashed_saop(expr);
1263 : }
1264 :
1265 : /* Expand SubLinks to SubPlans */
1266 918620 : if (root->parse->hasSubLinks)
1267 109400 : expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1268 :
1269 : /*
1270 : * XXX do not insert anything here unless you have grokked the comments in
1271 : * SS_replace_correlation_vars ...
1272 : */
1273 :
1274 : /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1275 918620 : if (root->query_level > 1)
1276 161262 : expr = SS_replace_correlation_vars(root, expr);
1277 :
1278 : /*
1279 : * If it's a qual or havingQual, convert it to implicit-AND format. (We
1280 : * don't want to do this before eval_const_expressions, since the latter
1281 : * would be unable to simplify a top-level AND correctly. Also,
1282 : * SS_process_sublinks expects explicit-AND format.)
1283 : */
1284 918620 : if (kind == EXPRKIND_QUAL)
1285 329808 : expr = (Node *) make_ands_implicit((Expr *) expr);
1286 :
1287 918620 : return expr;
1288 : }
1289 :
1290 : /*
1291 : * preprocess_qual_conditions
1292 : * Recursively scan the query's jointree and do subquery_planner's
1293 : * preprocessing work on each qual condition found therein.
1294 : */
1295 : static void
1296 1299032 : preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
1297 : {
1298 1299032 : if (jtnode == NULL)
1299 0 : return;
1300 1299032 : if (IsA(jtnode, RangeTblRef))
1301 : {
1302 : /* nothing to do here */
1303 : }
1304 634798 : else if (IsA(jtnode, FromExpr))
1305 : {
1306 538160 : FromExpr *f = (FromExpr *) jtnode;
1307 : ListCell *l;
1308 :
1309 1120544 : foreach(l, f->fromlist)
1310 582384 : preprocess_qual_conditions(root, lfirst(l));
1311 :
1312 538160 : f->quals = preprocess_expression(root, f->quals, EXPRKIND_QUAL);
1313 : }
1314 96638 : else if (IsA(jtnode, JoinExpr))
1315 : {
1316 96638 : JoinExpr *j = (JoinExpr *) jtnode;
1317 :
1318 96638 : preprocess_qual_conditions(root, j->larg);
1319 96638 : preprocess_qual_conditions(root, j->rarg);
1320 :
1321 96638 : j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1322 : }
1323 : else
1324 0 : elog(ERROR, "unrecognized node type: %d",
1325 : (int) nodeTag(jtnode));
1326 : }
1327 :
1328 : /*
1329 : * preprocess_phv_expression
1330 : * Do preprocessing on a PlaceHolderVar expression that's been pulled up.
1331 : *
1332 : * If a LATERAL subquery references an output of another subquery, and that
1333 : * output must be wrapped in a PlaceHolderVar because of an intermediate outer
1334 : * join, then we'll push the PlaceHolderVar expression down into the subquery
1335 : * and later pull it back up during find_lateral_references, which runs after
1336 : * subquery_planner has preprocessed all the expressions that were in the
1337 : * current query level to start with. So we need to preprocess it then.
1338 : */
1339 : Expr *
1340 90 : preprocess_phv_expression(PlannerInfo *root, Expr *expr)
1341 : {
1342 90 : return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1343 : }
1344 :
1345 : /*--------------------
1346 : * grouping_planner
1347 : * Perform planning steps related to grouping, aggregation, etc.
1348 : *
1349 : * This function adds all required top-level processing to the scan/join
1350 : * Path(s) produced by query_planner.
1351 : *
1352 : * tuple_fraction is the fraction of tuples we expect will be retrieved.
1353 : * tuple_fraction is interpreted as follows:
1354 : * 0: expect all tuples to be retrieved (normal case)
1355 : * 0 < tuple_fraction < 1: expect the given fraction of tuples available
1356 : * from the plan to be retrieved
1357 : * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
1358 : * expected to be retrieved (ie, a LIMIT specification).
1359 : * setops is used for set operation subqueries to provide the subquery with
1360 : * the context in which it's being used so that Paths correctly sorted for the
1361 : * set operation can be generated. NULL when not planning a set operation
1362 : * child, or when a child of a set op that isn't interested in sorted input.
1363 : *
1364 : * Returns nothing; the useful output is in the Paths we attach to the
1365 : * (UPPERREL_FINAL, NULL) upperrel in *root. In addition,
1366 : * root->processed_tlist contains the final processed targetlist.
1367 : *
1368 : * Note that we have not done set_cheapest() on the final rel; it's convenient
1369 : * to leave this to the caller.
1370 : *--------------------
1371 : */
1372 : static void
1373 523372 : grouping_planner(PlannerInfo *root, double tuple_fraction,
1374 : SetOperationStmt *setops)
1375 : {
1376 523372 : Query *parse = root->parse;
1377 523372 : int64 offset_est = 0;
1378 523372 : int64 count_est = 0;
1379 523372 : double limit_tuples = -1.0;
1380 523372 : bool have_postponed_srfs = false;
1381 : PathTarget *final_target;
1382 : List *final_targets;
1383 : List *final_targets_contain_srfs;
1384 : bool final_target_parallel_safe;
1385 : RelOptInfo *current_rel;
1386 : RelOptInfo *final_rel;
1387 : FinalPathExtraData extra;
1388 : ListCell *lc;
1389 :
1390 : /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1391 523372 : if (parse->limitCount || parse->limitOffset)
1392 : {
1393 5008 : tuple_fraction = preprocess_limit(root, tuple_fraction,
1394 : &offset_est, &count_est);
1395 :
1396 : /*
1397 : * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1398 : * estimate the effects of using a bounded sort.
1399 : */
1400 5008 : if (count_est > 0 && offset_est >= 0)
1401 4474 : limit_tuples = (double) count_est + (double) offset_est;
1402 : }
1403 :
1404 : /* Make tuple_fraction accessible to lower-level routines */
1405 523372 : root->tuple_fraction = tuple_fraction;
1406 :
1407 523372 : if (parse->setOperations)
1408 : {
1409 : /*
1410 : * Construct Paths for set operations. The results will not need any
1411 : * work except perhaps a top-level sort and/or LIMIT. Note that any
1412 : * special work for recursive unions is the responsibility of
1413 : * plan_set_operations.
1414 : */
1415 6180 : current_rel = plan_set_operations(root);
1416 :
1417 : /*
1418 : * We should not need to call preprocess_targetlist, since we must be
1419 : * in a SELECT query node. Instead, use the processed_tlist returned
1420 : * by plan_set_operations (since this tells whether it returned any
1421 : * resjunk columns!), and transfer any sort key information from the
1422 : * original tlist.
1423 : */
1424 : Assert(parse->commandType == CMD_SELECT);
1425 :
1426 : /* for safety, copy processed_tlist instead of modifying in-place */
1427 6174 : root->processed_tlist =
1428 6174 : postprocess_setop_tlist(copyObject(root->processed_tlist),
1429 : parse->targetList);
1430 :
1431 : /* Also extract the PathTarget form of the setop result tlist */
1432 6174 : final_target = current_rel->cheapest_total_path->pathtarget;
1433 :
1434 : /* And check whether it's parallel safe */
1435 : final_target_parallel_safe =
1436 6174 : is_parallel_safe(root, (Node *) final_target->exprs);
1437 :
1438 : /* The setop result tlist couldn't contain any SRFs */
1439 : Assert(!parse->hasTargetSRFs);
1440 6174 : final_targets = final_targets_contain_srfs = NIL;
1441 :
1442 : /*
1443 : * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1444 : * checked already, but let's make sure).
1445 : */
1446 6174 : if (parse->rowMarks)
1447 0 : ereport(ERROR,
1448 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1449 : /*------
1450 : translator: %s is a SQL row locking clause such as FOR UPDATE */
1451 : errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1452 : LCS_asString(linitial_node(RowMarkClause,
1453 : parse->rowMarks)->strength))));
1454 :
1455 : /*
1456 : * Calculate pathkeys that represent result ordering requirements
1457 : */
1458 : Assert(parse->distinctClause == NIL);
1459 6174 : root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1460 : parse->sortClause,
1461 : root->processed_tlist);
1462 : }
1463 : else
1464 : {
1465 : /* No set operations, do regular planning */
1466 : PathTarget *sort_input_target;
1467 : List *sort_input_targets;
1468 : List *sort_input_targets_contain_srfs;
1469 : bool sort_input_target_parallel_safe;
1470 : PathTarget *grouping_target;
1471 : List *grouping_targets;
1472 : List *grouping_targets_contain_srfs;
1473 : bool grouping_target_parallel_safe;
1474 : PathTarget *scanjoin_target;
1475 : List *scanjoin_targets;
1476 : List *scanjoin_targets_contain_srfs;
1477 : bool scanjoin_target_parallel_safe;
1478 : bool scanjoin_target_same_exprs;
1479 : bool have_grouping;
1480 517192 : WindowFuncLists *wflists = NULL;
1481 517192 : List *activeWindows = NIL;
1482 517192 : grouping_sets_data *gset_data = NULL;
1483 : standard_qp_extra qp_extra;
1484 :
1485 : /* A recursive query should always have setOperations */
1486 : Assert(!root->hasRecursion);
1487 :
1488 : /* Preprocess grouping sets and GROUP BY clause, if any */
1489 517192 : if (parse->groupingSets)
1490 : {
1491 878 : gset_data = preprocess_grouping_sets(root);
1492 : }
1493 516314 : else if (parse->groupClause)
1494 : {
1495 : /* Preprocess regular GROUP BY clause, if any */
1496 3632 : root->processed_groupClause = preprocess_groupclause(root, NIL);
1497 : }
1498 :
1499 : /*
1500 : * Preprocess targetlist. Note that much of the remaining planning
1501 : * work will be done with the PathTarget representation of tlists, but
1502 : * we must also maintain the full representation of the final tlist so
1503 : * that we can transfer its decoration (resnames etc) to the topmost
1504 : * tlist of the finished Plan. This is kept in processed_tlist.
1505 : */
1506 517186 : preprocess_targetlist(root);
1507 :
1508 : /*
1509 : * Mark all the aggregates with resolved aggtranstypes, and detect
1510 : * aggregates that are duplicates or can share transition state. We
1511 : * must do this before slicing and dicing the tlist into various
1512 : * pathtargets, else some copies of the Aggref nodes might escape
1513 : * being marked.
1514 : */
1515 517186 : if (parse->hasAggs)
1516 : {
1517 38468 : preprocess_aggrefs(root, (Node *) root->processed_tlist);
1518 38468 : preprocess_aggrefs(root, (Node *) parse->havingQual);
1519 : }
1520 :
1521 : /*
1522 : * Locate any window functions in the tlist. (We don't need to look
1523 : * anywhere else, since expressions used in ORDER BY will be in there
1524 : * too.) Note that they could all have been eliminated by constant
1525 : * folding, in which case we don't need to do any more work.
1526 : */
1527 517186 : if (parse->hasWindowFuncs)
1528 : {
1529 2384 : wflists = find_window_functions((Node *) root->processed_tlist,
1530 2384 : list_length(parse->windowClause));
1531 2384 : if (wflists->numWindowFuncs > 0)
1532 : {
1533 : /*
1534 : * See if any modifications can be made to each WindowClause
1535 : * to allow the executor to execute the WindowFuncs more
1536 : * quickly.
1537 : */
1538 2378 : optimize_window_clauses(root, wflists);
1539 :
1540 : /* Extract the list of windows actually in use. */
1541 2378 : activeWindows = select_active_windows(root, wflists);
1542 :
1543 : /* Make sure they all have names, for EXPLAIN's use. */
1544 2378 : name_active_windows(activeWindows);
1545 : }
1546 : else
1547 6 : parse->hasWindowFuncs = false;
1548 : }
1549 :
1550 : /*
1551 : * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1552 : * adding logic between here and the query_planner() call. Anything
1553 : * that is needed in MIN/MAX-optimizable cases will have to be
1554 : * duplicated in planagg.c.
1555 : */
1556 517186 : if (parse->hasAggs)
1557 38468 : preprocess_minmax_aggregates(root);
1558 :
1559 : /*
1560 : * Figure out whether there's a hard limit on the number of rows that
1561 : * query_planner's result subplan needs to return. Even if we know a
1562 : * hard limit overall, it doesn't apply if the query has any
1563 : * grouping/aggregation operations, or SRFs in the tlist.
1564 : */
1565 517186 : if (parse->groupClause ||
1566 512724 : parse->groupingSets ||
1567 512682 : parse->distinctClause ||
1568 510002 : parse->hasAggs ||
1569 475426 : parse->hasWindowFuncs ||
1570 473186 : parse->hasTargetSRFs ||
1571 461606 : root->hasHavingQual)
1572 55598 : root->limit_tuples = -1.0;
1573 : else
1574 461588 : root->limit_tuples = limit_tuples;
1575 :
1576 : /* Set up data needed by standard_qp_callback */
1577 517186 : qp_extra.activeWindows = activeWindows;
1578 517186 : qp_extra.gset_data = gset_data;
1579 :
1580 : /*
1581 : * If we're a subquery for a set operation, store the SetOperationStmt
1582 : * in qp_extra.
1583 : */
1584 517186 : qp_extra.setop = setops;
1585 :
1586 : /*
1587 : * Generate the best unsorted and presorted paths for the scan/join
1588 : * portion of this Query, ie the processing represented by the
1589 : * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1590 : * We also generate (in standard_qp_callback) pathkey representations
1591 : * of the query's sort clause, distinct clause, etc.
1592 : */
1593 517186 : current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1594 :
1595 : /*
1596 : * Convert the query's result tlist into PathTarget format.
1597 : *
1598 : * Note: this cannot be done before query_planner() has performed
1599 : * appendrel expansion, because that might add resjunk entries to
1600 : * root->processed_tlist. Waiting till afterwards is also helpful
1601 : * because the target width estimates can use per-Var width numbers
1602 : * that were obtained within query_planner().
1603 : */
1604 517132 : final_target = create_pathtarget(root, root->processed_tlist);
1605 : final_target_parallel_safe =
1606 517132 : is_parallel_safe(root, (Node *) final_target->exprs);
1607 :
1608 : /*
1609 : * If ORDER BY was given, consider whether we should use a post-sort
1610 : * projection, and compute the adjusted target for preceding steps if
1611 : * so.
1612 : */
1613 517132 : if (parse->sortClause)
1614 : {
1615 72222 : sort_input_target = make_sort_input_target(root,
1616 : final_target,
1617 : &have_postponed_srfs);
1618 : sort_input_target_parallel_safe =
1619 72222 : is_parallel_safe(root, (Node *) sort_input_target->exprs);
1620 : }
1621 : else
1622 : {
1623 444910 : sort_input_target = final_target;
1624 444910 : sort_input_target_parallel_safe = final_target_parallel_safe;
1625 : }
1626 :
1627 : /*
1628 : * If we have window functions to deal with, the output from any
1629 : * grouping step needs to be what the window functions want;
1630 : * otherwise, it should be sort_input_target.
1631 : */
1632 517132 : if (activeWindows)
1633 : {
1634 2378 : grouping_target = make_window_input_target(root,
1635 : final_target,
1636 : activeWindows);
1637 : grouping_target_parallel_safe =
1638 2378 : is_parallel_safe(root, (Node *) grouping_target->exprs);
1639 : }
1640 : else
1641 : {
1642 514754 : grouping_target = sort_input_target;
1643 514754 : grouping_target_parallel_safe = sort_input_target_parallel_safe;
1644 : }
1645 :
1646 : /*
1647 : * If we have grouping or aggregation to do, the topmost scan/join
1648 : * plan node must emit what the grouping step wants; otherwise, it
1649 : * should emit grouping_target.
1650 : */
1651 512670 : have_grouping = (parse->groupClause || parse->groupingSets ||
1652 1029802 : parse->hasAggs || root->hasHavingQual);
1653 517132 : if (have_grouping)
1654 : {
1655 39136 : scanjoin_target = make_group_input_target(root, final_target);
1656 : scanjoin_target_parallel_safe =
1657 39136 : is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1658 : }
1659 : else
1660 : {
1661 477996 : scanjoin_target = grouping_target;
1662 477996 : scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1663 : }
1664 :
1665 : /*
1666 : * If there are any SRFs in the targetlist, we must separate each of
1667 : * these PathTargets into SRF-computing and SRF-free targets. Replace
1668 : * each of the named targets with a SRF-free version, and remember the
1669 : * list of additional projection steps we need to add afterwards.
1670 : */
1671 517132 : if (parse->hasTargetSRFs)
1672 : {
1673 : /* final_target doesn't recompute any SRFs in sort_input_target */
1674 12048 : split_pathtarget_at_srfs(root, final_target, sort_input_target,
1675 : &final_targets,
1676 : &final_targets_contain_srfs);
1677 12048 : final_target = linitial_node(PathTarget, final_targets);
1678 : Assert(!linitial_int(final_targets_contain_srfs));
1679 : /* likewise for sort_input_target vs. grouping_target */
1680 12048 : split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1681 : &sort_input_targets,
1682 : &sort_input_targets_contain_srfs);
1683 12048 : sort_input_target = linitial_node(PathTarget, sort_input_targets);
1684 : Assert(!linitial_int(sort_input_targets_contain_srfs));
1685 : /* likewise for grouping_target vs. scanjoin_target */
1686 12048 : split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
1687 : &grouping_targets,
1688 : &grouping_targets_contain_srfs);
1689 12048 : grouping_target = linitial_node(PathTarget, grouping_targets);
1690 : Assert(!linitial_int(grouping_targets_contain_srfs));
1691 : /* scanjoin_target will not have any SRFs precomputed for it */
1692 12048 : split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1693 : &scanjoin_targets,
1694 : &scanjoin_targets_contain_srfs);
1695 12048 : scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1696 : Assert(!linitial_int(scanjoin_targets_contain_srfs));
1697 : }
1698 : else
1699 : {
1700 : /* initialize lists; for most of these, dummy values are OK */
1701 505084 : final_targets = final_targets_contain_srfs = NIL;
1702 505084 : sort_input_targets = sort_input_targets_contain_srfs = NIL;
1703 505084 : grouping_targets = grouping_targets_contain_srfs = NIL;
1704 505084 : scanjoin_targets = list_make1(scanjoin_target);
1705 505084 : scanjoin_targets_contain_srfs = NIL;
1706 : }
1707 :
1708 : /* Apply scan/join target. */
1709 517132 : scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1710 517132 : && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1711 517132 : apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1712 : scanjoin_targets_contain_srfs,
1713 : scanjoin_target_parallel_safe,
1714 : scanjoin_target_same_exprs);
1715 :
1716 : /*
1717 : * Save the various upper-rel PathTargets we just computed into
1718 : * root->upper_targets[]. The core code doesn't use this, but it
1719 : * provides a convenient place for extensions to get at the info. For
1720 : * consistency, we save all the intermediate targets, even though some
1721 : * of the corresponding upperrels might not be needed for this query.
1722 : */
1723 517132 : root->upper_targets[UPPERREL_FINAL] = final_target;
1724 517132 : root->upper_targets[UPPERREL_ORDERED] = final_target;
1725 517132 : root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1726 517132 : root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1727 517132 : root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1728 517132 : root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1729 :
1730 : /*
1731 : * If we have grouping and/or aggregation, consider ways to implement
1732 : * that. We build a new upperrel representing the output of this
1733 : * phase.
1734 : */
1735 517132 : if (have_grouping)
1736 : {
1737 39136 : current_rel = create_grouping_paths(root,
1738 : current_rel,
1739 : grouping_target,
1740 : grouping_target_parallel_safe,
1741 : gset_data);
1742 : /* Fix things up if grouping_target contains SRFs */
1743 39130 : if (parse->hasTargetSRFs)
1744 426 : adjust_paths_for_srfs(root, current_rel,
1745 : grouping_targets,
1746 : grouping_targets_contain_srfs);
1747 : }
1748 :
1749 : /*
1750 : * If we have window functions, consider ways to implement those. We
1751 : * build a new upperrel representing the output of this phase.
1752 : */
1753 517126 : if (activeWindows)
1754 : {
1755 2378 : current_rel = create_window_paths(root,
1756 : current_rel,
1757 : grouping_target,
1758 : sort_input_target,
1759 : sort_input_target_parallel_safe,
1760 : wflists,
1761 : activeWindows);
1762 : /* Fix things up if sort_input_target contains SRFs */
1763 2378 : if (parse->hasTargetSRFs)
1764 12 : adjust_paths_for_srfs(root, current_rel,
1765 : sort_input_targets,
1766 : sort_input_targets_contain_srfs);
1767 : }
1768 :
1769 : /*
1770 : * If there is a DISTINCT clause, consider ways to implement that. We
1771 : * build a new upperrel representing the output of this phase.
1772 : */
1773 517126 : if (parse->distinctClause)
1774 : {
1775 2714 : current_rel = create_distinct_paths(root,
1776 : current_rel,
1777 : sort_input_target);
1778 : }
1779 : } /* end of if (setOperations) */
1780 :
1781 : /*
1782 : * If ORDER BY was given, consider ways to implement that, and generate a
1783 : * new upperrel containing only paths that emit the correct ordering and
1784 : * project the correct final_target. We can apply the original
1785 : * limit_tuples limit in sort costing here, but only if there are no
1786 : * postponed SRFs.
1787 : */
1788 523300 : if (parse->sortClause)
1789 : {
1790 76172 : current_rel = create_ordered_paths(root,
1791 : current_rel,
1792 : final_target,
1793 : final_target_parallel_safe,
1794 : have_postponed_srfs ? -1.0 :
1795 : limit_tuples);
1796 : /* Fix things up if final_target contains SRFs */
1797 76172 : if (parse->hasTargetSRFs)
1798 196 : adjust_paths_for_srfs(root, current_rel,
1799 : final_targets,
1800 : final_targets_contain_srfs);
1801 : }
1802 :
1803 : /*
1804 : * Now we are prepared to build the final-output upperrel.
1805 : */
1806 523300 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1807 :
1808 : /*
1809 : * If the input rel is marked consider_parallel and there's nothing that's
1810 : * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1811 : * consider_parallel as well. Note that if the query has rowMarks or is
1812 : * not a SELECT, consider_parallel will be false for every relation in the
1813 : * query.
1814 : */
1815 698848 : if (current_rel->consider_parallel &&
1816 351072 : is_parallel_safe(root, parse->limitOffset) &&
1817 175524 : is_parallel_safe(root, parse->limitCount))
1818 175518 : final_rel->consider_parallel = true;
1819 :
1820 : /*
1821 : * If the current_rel belongs to a single FDW, so does the final_rel.
1822 : */
1823 523300 : final_rel->serverid = current_rel->serverid;
1824 523300 : final_rel->userid = current_rel->userid;
1825 523300 : final_rel->useridiscurrent = current_rel->useridiscurrent;
1826 523300 : final_rel->fdwroutine = current_rel->fdwroutine;
1827 :
1828 : /*
1829 : * Generate paths for the final_rel. Insert all surviving paths, with
1830 : * LockRows, Limit, and/or ModifyTable steps added if needed.
1831 : */
1832 1066318 : foreach(lc, current_rel->pathlist)
1833 : {
1834 543018 : Path *path = (Path *) lfirst(lc);
1835 :
1836 : /*
1837 : * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1838 : * (Note: we intentionally test parse->rowMarks not root->rowMarks
1839 : * here. If there are only non-locking rowmarks, they should be
1840 : * handled by the ModifyTable node instead. However, root->rowMarks
1841 : * is what goes into the LockRows node.)
1842 : */
1843 543018 : if (parse->rowMarks)
1844 : {
1845 8246 : path = (Path *) create_lockrows_path(root, final_rel, path,
1846 : root->rowMarks,
1847 : assign_special_exec_param(root));
1848 : }
1849 :
1850 : /*
1851 : * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1852 : */
1853 543018 : if (limit_needed(parse))
1854 : {
1855 5934 : path = (Path *) create_limit_path(root, final_rel, path,
1856 : parse->limitOffset,
1857 : parse->limitCount,
1858 : parse->limitOption,
1859 : offset_est, count_est);
1860 : }
1861 :
1862 : /*
1863 : * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1864 : */
1865 543018 : if (parse->commandType != CMD_SELECT)
1866 : {
1867 : Index rootRelation;
1868 86820 : List *resultRelations = NIL;
1869 86820 : List *updateColnosLists = NIL;
1870 86820 : List *withCheckOptionLists = NIL;
1871 86820 : List *returningLists = NIL;
1872 86820 : List *mergeActionLists = NIL;
1873 86820 : List *mergeJoinConditions = NIL;
1874 : List *rowMarks;
1875 :
1876 86820 : if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
1877 : {
1878 : /* Inherited UPDATE/DELETE/MERGE */
1879 2816 : RelOptInfo *top_result_rel = find_base_rel(root,
1880 : parse->resultRelation);
1881 2816 : int resultRelation = -1;
1882 :
1883 : /* Pass the root result rel forward to the executor. */
1884 2816 : rootRelation = parse->resultRelation;
1885 :
1886 : /* Add only leaf children to ModifyTable. */
1887 8230 : while ((resultRelation = bms_next_member(root->leaf_result_relids,
1888 8230 : resultRelation)) >= 0)
1889 : {
1890 5414 : RelOptInfo *this_result_rel = find_base_rel(root,
1891 : resultRelation);
1892 :
1893 : /*
1894 : * Also exclude any leaf rels that have turned dummy since
1895 : * being added to the list, for example, by being excluded
1896 : * by constraint exclusion.
1897 : */
1898 5414 : if (IS_DUMMY_REL(this_result_rel))
1899 174 : continue;
1900 :
1901 : /* Build per-target-rel lists needed by ModifyTable */
1902 5240 : resultRelations = lappend_int(resultRelations,
1903 : resultRelation);
1904 5240 : if (parse->commandType == CMD_UPDATE)
1905 : {
1906 3624 : List *update_colnos = root->update_colnos;
1907 :
1908 3624 : if (this_result_rel != top_result_rel)
1909 : update_colnos =
1910 3624 : adjust_inherited_attnums_multilevel(root,
1911 : update_colnos,
1912 : this_result_rel->relid,
1913 : top_result_rel->relid);
1914 3624 : updateColnosLists = lappend(updateColnosLists,
1915 : update_colnos);
1916 : }
1917 5240 : if (parse->withCheckOptions)
1918 : {
1919 504 : List *withCheckOptions = parse->withCheckOptions;
1920 :
1921 504 : if (this_result_rel != top_result_rel)
1922 : withCheckOptions = (List *)
1923 504 : adjust_appendrel_attrs_multilevel(root,
1924 : (Node *) withCheckOptions,
1925 : this_result_rel,
1926 : top_result_rel);
1927 504 : withCheckOptionLists = lappend(withCheckOptionLists,
1928 : withCheckOptions);
1929 : }
1930 5240 : if (parse->returningList)
1931 : {
1932 840 : List *returningList = parse->returningList;
1933 :
1934 840 : if (this_result_rel != top_result_rel)
1935 : returningList = (List *)
1936 840 : adjust_appendrel_attrs_multilevel(root,
1937 : (Node *) returningList,
1938 : this_result_rel,
1939 : top_result_rel);
1940 840 : returningLists = lappend(returningLists,
1941 : returningList);
1942 : }
1943 5240 : if (parse->mergeActionList)
1944 : {
1945 : ListCell *l;
1946 522 : List *mergeActionList = NIL;
1947 :
1948 : /*
1949 : * Copy MergeActions and translate stuff that
1950 : * references attribute numbers.
1951 : */
1952 1614 : foreach(l, parse->mergeActionList)
1953 : {
1954 1092 : MergeAction *action = lfirst(l),
1955 1092 : *leaf_action = copyObject(action);
1956 :
1957 1092 : leaf_action->qual =
1958 1092 : adjust_appendrel_attrs_multilevel(root,
1959 : (Node *) action->qual,
1960 : this_result_rel,
1961 : top_result_rel);
1962 1092 : leaf_action->targetList = (List *)
1963 1092 : adjust_appendrel_attrs_multilevel(root,
1964 1092 : (Node *) action->targetList,
1965 : this_result_rel,
1966 : top_result_rel);
1967 1092 : if (leaf_action->commandType == CMD_UPDATE)
1968 592 : leaf_action->updateColnos =
1969 592 : adjust_inherited_attnums_multilevel(root,
1970 : action->updateColnos,
1971 : this_result_rel->relid,
1972 : top_result_rel->relid);
1973 1092 : mergeActionList = lappend(mergeActionList,
1974 : leaf_action);
1975 : }
1976 :
1977 522 : mergeActionLists = lappend(mergeActionLists,
1978 : mergeActionList);
1979 : }
1980 5240 : if (parse->commandType == CMD_MERGE)
1981 : {
1982 522 : Node *mergeJoinCondition = parse->mergeJoinCondition;
1983 :
1984 522 : if (this_result_rel != top_result_rel)
1985 : mergeJoinCondition =
1986 522 : adjust_appendrel_attrs_multilevel(root,
1987 : mergeJoinCondition,
1988 : this_result_rel,
1989 : top_result_rel);
1990 522 : mergeJoinConditions = lappend(mergeJoinConditions,
1991 : mergeJoinCondition);
1992 : }
1993 : }
1994 :
1995 2816 : if (resultRelations == NIL)
1996 : {
1997 : /*
1998 : * We managed to exclude every child rel, so generate a
1999 : * dummy one-relation plan using info for the top target
2000 : * rel (even though that may not be a leaf target).
2001 : * Although it's clear that no data will be updated or
2002 : * deleted, we still need to have a ModifyTable node so
2003 : * that any statement triggers will be executed. (This
2004 : * could be cleaner if we fixed nodeModifyTable.c to allow
2005 : * zero target relations, but that probably wouldn't be a
2006 : * net win.)
2007 : */
2008 30 : resultRelations = list_make1_int(parse->resultRelation);
2009 30 : if (parse->commandType == CMD_UPDATE)
2010 30 : updateColnosLists = list_make1(root->update_colnos);
2011 30 : if (parse->withCheckOptions)
2012 0 : withCheckOptionLists = list_make1(parse->withCheckOptions);
2013 30 : if (parse->returningList)
2014 18 : returningLists = list_make1(parse->returningList);
2015 30 : if (parse->mergeActionList)
2016 0 : mergeActionLists = list_make1(parse->mergeActionList);
2017 30 : if (parse->commandType == CMD_MERGE)
2018 0 : mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2019 : }
2020 : }
2021 : else
2022 : {
2023 : /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2024 84004 : rootRelation = 0; /* there's no separate root rel */
2025 84004 : resultRelations = list_make1_int(parse->resultRelation);
2026 84004 : if (parse->commandType == CMD_UPDATE)
2027 11924 : updateColnosLists = list_make1(root->update_colnos);
2028 84004 : if (parse->withCheckOptions)
2029 926 : withCheckOptionLists = list_make1(parse->withCheckOptions);
2030 84004 : if (parse->returningList)
2031 2418 : returningLists = list_make1(parse->returningList);
2032 84004 : if (parse->mergeActionList)
2033 1632 : mergeActionLists = list_make1(parse->mergeActionList);
2034 84004 : if (parse->commandType == CMD_MERGE)
2035 1632 : mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2036 : }
2037 :
2038 : /*
2039 : * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2040 : * will have dealt with fetching non-locked marked rows, else we
2041 : * need to have ModifyTable do that.
2042 : */
2043 86820 : if (parse->rowMarks)
2044 0 : rowMarks = NIL;
2045 : else
2046 86820 : rowMarks = root->rowMarks;
2047 :
2048 : path = (Path *)
2049 86820 : create_modifytable_path(root, final_rel,
2050 : path,
2051 : parse->commandType,
2052 86820 : parse->canSetTag,
2053 86820 : parse->resultRelation,
2054 : rootRelation,
2055 86820 : root->partColsUpdated,
2056 : resultRelations,
2057 : updateColnosLists,
2058 : withCheckOptionLists,
2059 : returningLists,
2060 : rowMarks,
2061 : parse->onConflict,
2062 : mergeActionLists,
2063 : mergeJoinConditions,
2064 : assign_special_exec_param(root));
2065 : }
2066 :
2067 : /* And shove it into final_rel */
2068 543018 : add_path(final_rel, path);
2069 : }
2070 :
2071 : /*
2072 : * Generate partial paths for final_rel, too, if outer query levels might
2073 : * be able to make use of them.
2074 : */
2075 523300 : if (final_rel->consider_parallel && root->query_level > 1 &&
2076 25484 : !limit_needed(parse))
2077 : {
2078 : Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2079 25406 : foreach(lc, current_rel->partial_pathlist)
2080 : {
2081 108 : Path *partial_path = (Path *) lfirst(lc);
2082 :
2083 108 : add_partial_path(final_rel, partial_path);
2084 : }
2085 : }
2086 :
2087 523300 : extra.limit_needed = limit_needed(parse);
2088 523300 : extra.limit_tuples = limit_tuples;
2089 523300 : extra.count_est = count_est;
2090 523300 : extra.offset_est = offset_est;
2091 :
2092 : /*
2093 : * If there is an FDW that's responsible for all baserels of the query,
2094 : * let it consider adding ForeignPaths.
2095 : */
2096 523300 : if (final_rel->fdwroutine &&
2097 1256 : final_rel->fdwroutine->GetForeignUpperPaths)
2098 1188 : final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2099 : current_rel, final_rel,
2100 : &extra);
2101 :
2102 : /* Let extensions possibly add some more paths */
2103 523300 : if (create_upper_paths_hook)
2104 0 : (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2105 : current_rel, final_rel, &extra);
2106 :
2107 : /* Note: currently, we leave it to callers to do set_cheapest() */
2108 523300 : }
2109 :
2110 : /*
2111 : * Do preprocessing for groupingSets clause and related data. This handles the
2112 : * preliminary steps of expanding the grouping sets, organizing them into lists
2113 : * of rollups, and preparing annotations which will later be filled in with
2114 : * size estimates.
2115 : */
2116 : static grouping_sets_data *
2117 878 : preprocess_grouping_sets(PlannerInfo *root)
2118 : {
2119 878 : Query *parse = root->parse;
2120 : List *sets;
2121 878 : int maxref = 0;
2122 : ListCell *lc_set;
2123 878 : grouping_sets_data *gd = palloc0(sizeof(grouping_sets_data));
2124 :
2125 878 : parse->groupingSets = expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
2126 :
2127 878 : gd->any_hashable = false;
2128 878 : gd->unhashable_refs = NULL;
2129 878 : gd->unsortable_refs = NULL;
2130 878 : gd->unsortable_sets = NIL;
2131 :
2132 : /*
2133 : * We don't currently make any attempt to optimize the groupClause when
2134 : * there are grouping sets, so just duplicate it in processed_groupClause.
2135 : */
2136 878 : root->processed_groupClause = parse->groupClause;
2137 :
2138 878 : if (parse->groupClause)
2139 : {
2140 : ListCell *lc;
2141 :
2142 2684 : foreach(lc, parse->groupClause)
2143 : {
2144 1848 : SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2145 1848 : Index ref = gc->tleSortGroupRef;
2146 :
2147 1848 : if (ref > maxref)
2148 1812 : maxref = ref;
2149 :
2150 1848 : if (!gc->hashable)
2151 30 : gd->unhashable_refs = bms_add_member(gd->unhashable_refs, ref);
2152 :
2153 1848 : if (!OidIsValid(gc->sortop))
2154 42 : gd->unsortable_refs = bms_add_member(gd->unsortable_refs, ref);
2155 : }
2156 : }
2157 :
2158 : /* Allocate workspace array for remapping */
2159 878 : gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2160 :
2161 : /*
2162 : * If we have any unsortable sets, we must extract them before trying to
2163 : * prepare rollups. Unsortable sets don't go through
2164 : * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2165 : * here.
2166 : */
2167 878 : if (!bms_is_empty(gd->unsortable_refs))
2168 : {
2169 42 : List *sortable_sets = NIL;
2170 : ListCell *lc;
2171 :
2172 126 : foreach(lc, parse->groupingSets)
2173 : {
2174 90 : List *gset = (List *) lfirst(lc);
2175 :
2176 90 : if (bms_overlap_list(gd->unsortable_refs, gset))
2177 : {
2178 48 : GroupingSetData *gs = makeNode(GroupingSetData);
2179 :
2180 48 : gs->set = gset;
2181 48 : gd->unsortable_sets = lappend(gd->unsortable_sets, gs);
2182 :
2183 : /*
2184 : * We must enforce here that an unsortable set is hashable;
2185 : * later code assumes this. Parse analysis only checks that
2186 : * every individual column is either hashable or sortable.
2187 : *
2188 : * Note that passing this test doesn't guarantee we can
2189 : * generate a plan; there might be other showstoppers.
2190 : */
2191 48 : if (bms_overlap_list(gd->unhashable_refs, gset))
2192 6 : ereport(ERROR,
2193 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2194 : errmsg("could not implement GROUP BY"),
2195 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2196 : }
2197 : else
2198 42 : sortable_sets = lappend(sortable_sets, gset);
2199 : }
2200 :
2201 36 : if (sortable_sets)
2202 30 : sets = extract_rollup_sets(sortable_sets);
2203 : else
2204 6 : sets = NIL;
2205 : }
2206 : else
2207 836 : sets = extract_rollup_sets(parse->groupingSets);
2208 :
2209 2310 : foreach(lc_set, sets)
2210 : {
2211 1438 : List *current_sets = (List *) lfirst(lc_set);
2212 1438 : RollupData *rollup = makeNode(RollupData);
2213 : GroupingSetData *gs;
2214 :
2215 : /*
2216 : * Reorder the current list of grouping sets into correct prefix
2217 : * order. If only one aggregation pass is needed, try to make the
2218 : * list match the ORDER BY clause; if more than one pass is needed, we
2219 : * don't bother with that.
2220 : *
2221 : * Note that this reorders the sets from smallest-member-first to
2222 : * largest-member-first, and applies the GroupingSetData annotations,
2223 : * though the data will be filled in later.
2224 : */
2225 1438 : current_sets = reorder_grouping_sets(current_sets,
2226 1438 : (list_length(sets) == 1
2227 : ? parse->sortClause
2228 : : NIL));
2229 :
2230 : /*
2231 : * Get the initial (and therefore largest) grouping set.
2232 : */
2233 1438 : gs = linitial_node(GroupingSetData, current_sets);
2234 :
2235 : /*
2236 : * Order the groupClause appropriately. If the first grouping set is
2237 : * empty, then the groupClause must also be empty; otherwise we have
2238 : * to force the groupClause to match that grouping set's order.
2239 : *
2240 : * (The first grouping set can be empty even though parse->groupClause
2241 : * is not empty only if all non-empty grouping sets are unsortable.
2242 : * The groupClauses for hashed grouping sets are built later on.)
2243 : */
2244 1438 : if (gs->set)
2245 1396 : rollup->groupClause = preprocess_groupclause(root, gs->set);
2246 : else
2247 42 : rollup->groupClause = NIL;
2248 :
2249 : /*
2250 : * Is it hashable? We pretend empty sets are hashable even though we
2251 : * actually force them not to be hashed later. But don't bother if
2252 : * there's nothing but empty sets (since in that case we can't hash
2253 : * anything).
2254 : */
2255 1438 : if (gs->set &&
2256 1396 : !bms_overlap_list(gd->unhashable_refs, gs->set))
2257 : {
2258 1372 : rollup->hashable = true;
2259 1372 : gd->any_hashable = true;
2260 : }
2261 :
2262 : /*
2263 : * Now that we've pinned down an order for the groupClause for this
2264 : * list of grouping sets, we need to remap the entries in the grouping
2265 : * sets from sortgrouprefs to plain indices (0-based) into the
2266 : * groupClause for this collection of grouping sets. We keep the
2267 : * original form for later use, though.
2268 : */
2269 1438 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2270 : current_sets,
2271 : gd->tleref_to_colnum_map);
2272 1438 : rollup->gsets_data = current_sets;
2273 :
2274 1438 : gd->rollups = lappend(gd->rollups, rollup);
2275 : }
2276 :
2277 872 : if (gd->unsortable_sets)
2278 : {
2279 : /*
2280 : * We have not yet pinned down a groupclause for this, but we will
2281 : * need index-based lists for estimation purposes. Construct
2282 : * hash_sets_idx based on the entire original groupclause for now.
2283 : */
2284 36 : gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2285 : gd->unsortable_sets,
2286 : gd->tleref_to_colnum_map);
2287 36 : gd->any_hashable = true;
2288 : }
2289 :
2290 872 : return gd;
2291 : }
2292 :
2293 : /*
2294 : * Given a groupclause and a list of GroupingSetData, return equivalent sets
2295 : * (without annotation) mapped to indexes into the given groupclause.
2296 : */
2297 : static List *
2298 4200 : remap_to_groupclause_idx(List *groupClause,
2299 : List *gsets,
2300 : int *tleref_to_colnum_map)
2301 : {
2302 4200 : int ref = 0;
2303 4200 : List *result = NIL;
2304 : ListCell *lc;
2305 :
2306 10264 : foreach(lc, groupClause)
2307 : {
2308 6064 : SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2309 :
2310 6064 : tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2311 : }
2312 :
2313 9702 : foreach(lc, gsets)
2314 : {
2315 5502 : List *set = NIL;
2316 : ListCell *lc2;
2317 5502 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
2318 :
2319 12404 : foreach(lc2, gs->set)
2320 : {
2321 6902 : set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2322 : }
2323 :
2324 5502 : result = lappend(result, set);
2325 : }
2326 :
2327 4200 : return result;
2328 : }
2329 :
2330 :
2331 : /*
2332 : * preprocess_rowmarks - set up PlanRowMarks if needed
2333 : */
2334 : static void
2335 527226 : preprocess_rowmarks(PlannerInfo *root)
2336 : {
2337 527226 : Query *parse = root->parse;
2338 : Bitmapset *rels;
2339 : List *prowmarks;
2340 : ListCell *l;
2341 : int i;
2342 :
2343 527226 : if (parse->rowMarks)
2344 : {
2345 : /*
2346 : * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2347 : * grouping, since grouping renders a reference to individual tuple
2348 : * CTIDs invalid. This is also checked at parse time, but that's
2349 : * insufficient because of rule substitution, query pullup, etc.
2350 : */
2351 7758 : CheckSelectLocking(parse, linitial_node(RowMarkClause,
2352 : parse->rowMarks)->strength);
2353 : }
2354 : else
2355 : {
2356 : /*
2357 : * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
2358 : * UPDATE/SHARE.
2359 : */
2360 519468 : if (parse->commandType != CMD_UPDATE &&
2361 505560 : parse->commandType != CMD_DELETE &&
2362 501258 : parse->commandType != CMD_MERGE)
2363 499386 : return;
2364 : }
2365 :
2366 : /*
2367 : * We need to have rowmarks for all base relations except the target. We
2368 : * make a bitmapset of all base rels and then remove the items we don't
2369 : * need or have FOR [KEY] UPDATE/SHARE marks for.
2370 : */
2371 27840 : rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
2372 27840 : if (parse->resultRelation)
2373 20082 : rels = bms_del_member(rels, parse->resultRelation);
2374 :
2375 : /*
2376 : * Convert RowMarkClauses to PlanRowMark representation.
2377 : */
2378 27840 : prowmarks = NIL;
2379 35876 : foreach(l, parse->rowMarks)
2380 : {
2381 8036 : RowMarkClause *rc = lfirst_node(RowMarkClause, l);
2382 8036 : RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2383 : PlanRowMark *newrc;
2384 :
2385 : /*
2386 : * Currently, it is syntactically impossible to have FOR UPDATE et al
2387 : * applied to an update/delete target rel. If that ever becomes
2388 : * possible, we should drop the target from the PlanRowMark list.
2389 : */
2390 : Assert(rc->rti != parse->resultRelation);
2391 :
2392 : /*
2393 : * Ignore RowMarkClauses for subqueries; they aren't real tables and
2394 : * can't support true locking. Subqueries that got flattened into the
2395 : * main query should be ignored completely. Any that didn't will get
2396 : * ROW_MARK_COPY items in the next loop.
2397 : */
2398 8036 : if (rte->rtekind != RTE_RELATION)
2399 108 : continue;
2400 :
2401 7928 : rels = bms_del_member(rels, rc->rti);
2402 :
2403 7928 : newrc = makeNode(PlanRowMark);
2404 7928 : newrc->rti = newrc->prti = rc->rti;
2405 7928 : newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2406 7928 : newrc->markType = select_rowmark_type(rte, rc->strength);
2407 7928 : newrc->allMarkTypes = (1 << newrc->markType);
2408 7928 : newrc->strength = rc->strength;
2409 7928 : newrc->waitPolicy = rc->waitPolicy;
2410 7928 : newrc->isParent = false;
2411 :
2412 7928 : prowmarks = lappend(prowmarks, newrc);
2413 : }
2414 :
2415 : /*
2416 : * Now, add rowmarks for any non-target, non-locked base relations.
2417 : */
2418 27840 : i = 0;
2419 67090 : foreach(l, parse->rtable)
2420 : {
2421 39250 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
2422 : PlanRowMark *newrc;
2423 :
2424 39250 : i++;
2425 39250 : if (!bms_is_member(i, rels))
2426 35488 : continue;
2427 :
2428 3762 : newrc = makeNode(PlanRowMark);
2429 3762 : newrc->rti = newrc->prti = i;
2430 3762 : newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2431 3762 : newrc->markType = select_rowmark_type(rte, LCS_NONE);
2432 3762 : newrc->allMarkTypes = (1 << newrc->markType);
2433 3762 : newrc->strength = LCS_NONE;
2434 3762 : newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2435 3762 : newrc->isParent = false;
2436 :
2437 3762 : prowmarks = lappend(prowmarks, newrc);
2438 : }
2439 :
2440 27840 : root->rowMarks = prowmarks;
2441 : }
2442 :
2443 : /*
2444 : * Select RowMarkType to use for a given table
2445 : */
2446 : RowMarkType
2447 14022 : select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
2448 : {
2449 14022 : if (rte->rtekind != RTE_RELATION)
2450 : {
2451 : /* If it's not a table at all, use ROW_MARK_COPY */
2452 1512 : return ROW_MARK_COPY;
2453 : }
2454 12510 : else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2455 : {
2456 : /* Let the FDW select the rowmark type, if it wants to */
2457 212 : FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2458 :
2459 212 : if (fdwroutine->GetForeignRowMarkType != NULL)
2460 0 : return fdwroutine->GetForeignRowMarkType(rte, strength);
2461 : /* Otherwise, use ROW_MARK_COPY by default */
2462 212 : return ROW_MARK_COPY;
2463 : }
2464 : else
2465 : {
2466 : /* Regular table, apply the appropriate lock type */
2467 12298 : switch (strength)
2468 : {
2469 2480 : case LCS_NONE:
2470 :
2471 : /*
2472 : * We don't need a tuple lock, only the ability to re-fetch
2473 : * the row.
2474 : */
2475 2480 : return ROW_MARK_REFERENCE;
2476 : break;
2477 7922 : case LCS_FORKEYSHARE:
2478 7922 : return ROW_MARK_KEYSHARE;
2479 : break;
2480 300 : case LCS_FORSHARE:
2481 300 : return ROW_MARK_SHARE;
2482 : break;
2483 72 : case LCS_FORNOKEYUPDATE:
2484 72 : return ROW_MARK_NOKEYEXCLUSIVE;
2485 : break;
2486 1524 : case LCS_FORUPDATE:
2487 1524 : return ROW_MARK_EXCLUSIVE;
2488 : break;
2489 : }
2490 0 : elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2491 : return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2492 : }
2493 : }
2494 :
2495 : /*
2496 : * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
2497 : *
2498 : * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
2499 : * results back in *count_est and *offset_est. These variables are set to
2500 : * 0 if the corresponding clause is not present, and -1 if it's present
2501 : * but we couldn't estimate the value for it. (The "0" convention is OK
2502 : * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
2503 : * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
2504 : * usual practice of never estimating less than one row.) These values will
2505 : * be passed to create_limit_path, which see if you change this code.
2506 : *
2507 : * The return value is the suitably adjusted tuple_fraction to use for
2508 : * planning the query. This adjustment is not overridable, since it reflects
2509 : * plan actions that grouping_planner() will certainly take, not assumptions
2510 : * about context.
2511 : */
2512 : static double
2513 5008 : preprocess_limit(PlannerInfo *root, double tuple_fraction,
2514 : int64 *offset_est, int64 *count_est)
2515 : {
2516 5008 : Query *parse = root->parse;
2517 : Node *est;
2518 : double limit_fraction;
2519 :
2520 : /* Should not be called unless LIMIT or OFFSET */
2521 : Assert(parse->limitCount || parse->limitOffset);
2522 :
2523 : /*
2524 : * Try to obtain the clause values. We use estimate_expression_value
2525 : * primarily because it can sometimes do something useful with Params.
2526 : */
2527 5008 : if (parse->limitCount)
2528 : {
2529 4498 : est = estimate_expression_value(root, parse->limitCount);
2530 4498 : if (est && IsA(est, Const))
2531 : {
2532 4492 : if (((Const *) est)->constisnull)
2533 : {
2534 : /* NULL indicates LIMIT ALL, ie, no limit */
2535 0 : *count_est = 0; /* treat as not present */
2536 : }
2537 : else
2538 : {
2539 4492 : *count_est = DatumGetInt64(((Const *) est)->constvalue);
2540 4492 : if (*count_est <= 0)
2541 150 : *count_est = 1; /* force to at least 1 */
2542 : }
2543 : }
2544 : else
2545 6 : *count_est = -1; /* can't estimate */
2546 : }
2547 : else
2548 510 : *count_est = 0; /* not present */
2549 :
2550 5008 : if (parse->limitOffset)
2551 : {
2552 882 : est = estimate_expression_value(root, parse->limitOffset);
2553 882 : if (est && IsA(est, Const))
2554 : {
2555 858 : if (((Const *) est)->constisnull)
2556 : {
2557 : /* Treat NULL as no offset; the executor will too */
2558 0 : *offset_est = 0; /* treat as not present */
2559 : }
2560 : else
2561 : {
2562 858 : *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2563 858 : if (*offset_est < 0)
2564 0 : *offset_est = 0; /* treat as not present */
2565 : }
2566 : }
2567 : else
2568 24 : *offset_est = -1; /* can't estimate */
2569 : }
2570 : else
2571 4126 : *offset_est = 0; /* not present */
2572 :
2573 5008 : if (*count_est != 0)
2574 : {
2575 : /*
2576 : * A LIMIT clause limits the absolute number of tuples returned.
2577 : * However, if it's not a constant LIMIT then we have to guess; for
2578 : * lack of a better idea, assume 10% of the plan's result is wanted.
2579 : */
2580 4498 : if (*count_est < 0 || *offset_est < 0)
2581 : {
2582 : /* LIMIT or OFFSET is an expression ... punt ... */
2583 24 : limit_fraction = 0.10;
2584 : }
2585 : else
2586 : {
2587 : /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2588 4474 : limit_fraction = (double) *count_est + (double) *offset_est;
2589 : }
2590 :
2591 : /*
2592 : * If we have absolute limits from both caller and LIMIT, use the
2593 : * smaller value; likewise if they are both fractional. If one is
2594 : * fractional and the other absolute, we can't easily determine which
2595 : * is smaller, but we use the heuristic that the absolute will usually
2596 : * be smaller.
2597 : */
2598 4498 : if (tuple_fraction >= 1.0)
2599 : {
2600 6 : if (limit_fraction >= 1.0)
2601 : {
2602 : /* both absolute */
2603 6 : tuple_fraction = Min(tuple_fraction, limit_fraction);
2604 : }
2605 : else
2606 : {
2607 : /* caller absolute, limit fractional; use caller's value */
2608 : }
2609 : }
2610 4492 : else if (tuple_fraction > 0.0)
2611 : {
2612 148 : if (limit_fraction >= 1.0)
2613 : {
2614 : /* caller fractional, limit absolute; use limit */
2615 148 : tuple_fraction = limit_fraction;
2616 : }
2617 : else
2618 : {
2619 : /* both fractional */
2620 0 : tuple_fraction = Min(tuple_fraction, limit_fraction);
2621 : }
2622 : }
2623 : else
2624 : {
2625 : /* no info from caller, just use limit */
2626 4344 : tuple_fraction = limit_fraction;
2627 : }
2628 : }
2629 510 : else if (*offset_est != 0 && tuple_fraction > 0.0)
2630 : {
2631 : /*
2632 : * We have an OFFSET but no LIMIT. This acts entirely differently
2633 : * from the LIMIT case: here, we need to increase rather than decrease
2634 : * the caller's tuple_fraction, because the OFFSET acts to cause more
2635 : * tuples to be fetched instead of fewer. This only matters if we got
2636 : * a tuple_fraction > 0, however.
2637 : *
2638 : * As above, use 10% if OFFSET is present but unestimatable.
2639 : */
2640 12 : if (*offset_est < 0)
2641 0 : limit_fraction = 0.10;
2642 : else
2643 12 : limit_fraction = (double) *offset_est;
2644 :
2645 : /*
2646 : * If we have absolute counts from both caller and OFFSET, add them
2647 : * together; likewise if they are both fractional. If one is
2648 : * fractional and the other absolute, we want to take the larger, and
2649 : * we heuristically assume that's the fractional one.
2650 : */
2651 12 : if (tuple_fraction >= 1.0)
2652 : {
2653 0 : if (limit_fraction >= 1.0)
2654 : {
2655 : /* both absolute, so add them together */
2656 0 : tuple_fraction += limit_fraction;
2657 : }
2658 : else
2659 : {
2660 : /* caller absolute, limit fractional; use limit */
2661 0 : tuple_fraction = limit_fraction;
2662 : }
2663 : }
2664 : else
2665 : {
2666 12 : if (limit_fraction >= 1.0)
2667 : {
2668 : /* caller fractional, limit absolute; use caller's value */
2669 : }
2670 : else
2671 : {
2672 : /* both fractional, so add them together */
2673 0 : tuple_fraction += limit_fraction;
2674 0 : if (tuple_fraction >= 1.0)
2675 0 : tuple_fraction = 0.0; /* assume fetch all */
2676 : }
2677 : }
2678 : }
2679 :
2680 5008 : return tuple_fraction;
2681 : }
2682 :
2683 : /*
2684 : * limit_needed - do we actually need a Limit plan node?
2685 : *
2686 : * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
2687 : * a Limit node. This is worth checking for because "OFFSET 0" is a common
2688 : * locution for an optimization fence. (Because other places in the planner
2689 : * merely check whether parse->limitOffset isn't NULL, it will still work as
2690 : * an optimization fence --- we're just suppressing unnecessary run-time
2691 : * overhead.)
2692 : *
2693 : * This might look like it could be merged into preprocess_limit, but there's
2694 : * a key distinction: here we need hard constants in OFFSET/LIMIT, whereas
2695 : * in preprocess_limit it's good enough to consider estimated values.
2696 : */
2697 : bool
2698 1104614 : limit_needed(Query *parse)
2699 : {
2700 : Node *node;
2701 :
2702 1104614 : node = parse->limitCount;
2703 1104614 : if (node)
2704 : {
2705 10766 : if (IsA(node, Const))
2706 : {
2707 : /* NULL indicates LIMIT ALL, ie, no limit */
2708 10530 : if (!((Const *) node)->constisnull)
2709 10530 : return true; /* LIMIT with a constant value */
2710 : }
2711 : else
2712 236 : return true; /* non-constant LIMIT */
2713 : }
2714 :
2715 1093848 : node = parse->limitOffset;
2716 1093848 : if (node)
2717 : {
2718 1498 : if (IsA(node, Const))
2719 : {
2720 : /* Treat NULL as no offset; the executor would too */
2721 1190 : if (!((Const *) node)->constisnull)
2722 : {
2723 1190 : int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2724 :
2725 1190 : if (offset != 0)
2726 110 : return true; /* OFFSET with a nonzero value */
2727 : }
2728 : }
2729 : else
2730 308 : return true; /* non-constant OFFSET */
2731 : }
2732 :
2733 1093430 : return false; /* don't need a Limit plan node */
2734 : }
2735 :
2736 : /*
2737 : * preprocess_groupclause - do preparatory work on GROUP BY clause
2738 : *
2739 : * The idea here is to adjust the ordering of the GROUP BY elements
2740 : * (which in itself is semantically insignificant) to match ORDER BY,
2741 : * thereby allowing a single sort operation to both implement the ORDER BY
2742 : * requirement and set up for a Unique step that implements GROUP BY.
2743 : * We also consider partial match between GROUP BY and ORDER BY elements,
2744 : * which could allow to implement ORDER BY using the incremental sort.
2745 : *
2746 : * We also consider other orderings of the GROUP BY elements, which could
2747 : * match the sort ordering of other possible plans (eg an indexscan) and
2748 : * thereby reduce cost. This is implemented during the generation of grouping
2749 : * paths. See get_useful_group_keys_orderings() for details.
2750 : *
2751 : * Note: we need no comparable processing of the distinctClause because
2752 : * the parser already enforced that that matches ORDER BY.
2753 : *
2754 : * Note: we return a fresh List, but its elements are the same
2755 : * SortGroupClauses appearing in parse->groupClause. This is important
2756 : * because later processing may modify the processed_groupClause list.
2757 : *
2758 : * For grouping sets, the order of items is instead forced to agree with that
2759 : * of the grouping set (and items not in the grouping set are skipped). The
2760 : * work of sorting the order of grouping set elements to match the ORDER BY if
2761 : * possible is done elsewhere.
2762 : */
2763 : static List *
2764 7754 : preprocess_groupclause(PlannerInfo *root, List *force)
2765 : {
2766 7754 : Query *parse = root->parse;
2767 7754 : List *new_groupclause = NIL;
2768 : ListCell *sl;
2769 : ListCell *gl;
2770 :
2771 : /* For grouping sets, we need to force the ordering */
2772 7754 : if (force)
2773 : {
2774 10108 : foreach(sl, force)
2775 : {
2776 5986 : Index ref = lfirst_int(sl);
2777 5986 : SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
2778 :
2779 5986 : new_groupclause = lappend(new_groupclause, cl);
2780 : }
2781 :
2782 4122 : return new_groupclause;
2783 : }
2784 :
2785 : /* If no ORDER BY, nothing useful to do here */
2786 3632 : if (parse->sortClause == NIL)
2787 2058 : return list_copy(parse->groupClause);
2788 :
2789 : /*
2790 : * Scan the ORDER BY clause and construct a list of matching GROUP BY
2791 : * items, but only as far as we can make a matching prefix.
2792 : *
2793 : * This code assumes that the sortClause contains no duplicate items.
2794 : */
2795 3054 : foreach(sl, parse->sortClause)
2796 : {
2797 2126 : SortGroupClause *sc = lfirst_node(SortGroupClause, sl);
2798 :
2799 3222 : foreach(gl, parse->groupClause)
2800 : {
2801 2576 : SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
2802 :
2803 2576 : if (equal(gc, sc))
2804 : {
2805 1480 : new_groupclause = lappend(new_groupclause, gc);
2806 1480 : break;
2807 : }
2808 : }
2809 2126 : if (gl == NULL)
2810 646 : break; /* no match, so stop scanning */
2811 : }
2812 :
2813 :
2814 : /* If no match at all, no point in reordering GROUP BY */
2815 1574 : if (new_groupclause == NIL)
2816 298 : return list_copy(parse->groupClause);
2817 :
2818 : /*
2819 : * Add any remaining GROUP BY items to the new list. We don't require a
2820 : * complete match, because even partial match allows ORDER BY to be
2821 : * implemented using incremental sort. Also, give up if there are any
2822 : * non-sortable GROUP BY items, since then there's no hope anyway.
2823 : */
2824 2922 : foreach(gl, parse->groupClause)
2825 : {
2826 1646 : SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
2827 :
2828 1646 : if (list_member_ptr(new_groupclause, gc))
2829 1480 : continue; /* it matched an ORDER BY item */
2830 166 : if (!OidIsValid(gc->sortop)) /* give up, GROUP BY can't be sorted */
2831 0 : return list_copy(parse->groupClause);
2832 166 : new_groupclause = lappend(new_groupclause, gc);
2833 : }
2834 :
2835 : /* Success --- install the rearranged GROUP BY list */
2836 : Assert(list_length(parse->groupClause) == list_length(new_groupclause));
2837 1276 : return new_groupclause;
2838 : }
2839 :
2840 : /*
2841 : * Extract lists of grouping sets that can be implemented using a single
2842 : * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
2843 : *
2844 : * Input must be sorted with smallest sets first. Result has each sublist
2845 : * sorted with smallest sets first.
2846 : *
2847 : * We want to produce the absolute minimum possible number of lists here to
2848 : * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
2849 : * of finding the minimal partition of a partially-ordered set into chains
2850 : * (which is what we need, taking the list of grouping sets as a poset ordered
2851 : * by set inclusion) can be mapped to the problem of finding the maximum
2852 : * cardinality matching on a bipartite graph, which is solvable in polynomial
2853 : * time with a worst case of no worse than O(n^2.5) and usually much
2854 : * better. Since our N is at most 4096, we don't need to consider fallbacks to
2855 : * heuristic or approximate methods. (Planning time for a 12-d cube is under
2856 : * half a second on my modest system even with optimization off and assertions
2857 : * on.)
2858 : */
2859 : static List *
2860 866 : extract_rollup_sets(List *groupingSets)
2861 : {
2862 866 : int num_sets_raw = list_length(groupingSets);
2863 866 : int num_empty = 0;
2864 866 : int num_sets = 0; /* distinct sets */
2865 866 : int num_chains = 0;
2866 866 : List *result = NIL;
2867 : List **results;
2868 : List **orig_sets;
2869 : Bitmapset **set_masks;
2870 : int *chains;
2871 : short **adjacency;
2872 : short *adjacency_buf;
2873 : BipartiteMatchState *state;
2874 : int i;
2875 : int j;
2876 : int j_size;
2877 866 : ListCell *lc1 = list_head(groupingSets);
2878 : ListCell *lc;
2879 :
2880 : /*
2881 : * Start by stripping out empty sets. The algorithm doesn't require this,
2882 : * but the planner currently needs all empty sets to be returned in the
2883 : * first list, so we strip them here and add them back after.
2884 : */
2885 1476 : while (lc1 && lfirst(lc1) == NIL)
2886 : {
2887 610 : ++num_empty;
2888 610 : lc1 = lnext(groupingSets, lc1);
2889 : }
2890 :
2891 : /* bail out now if it turns out that all we had were empty sets. */
2892 866 : if (!lc1)
2893 42 : return list_make1(groupingSets);
2894 :
2895 : /*----------
2896 : * We don't strictly need to remove duplicate sets here, but if we don't,
2897 : * they tend to become scattered through the result, which is a bit
2898 : * confusing (and irritating if we ever decide to optimize them out).
2899 : * So we remove them here and add them back after.
2900 : *
2901 : * For each non-duplicate set, we fill in the following:
2902 : *
2903 : * orig_sets[i] = list of the original set lists
2904 : * set_masks[i] = bitmapset for testing inclusion
2905 : * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
2906 : *
2907 : * chains[i] will be the result group this set is assigned to.
2908 : *
2909 : * We index all of these from 1 rather than 0 because it is convenient
2910 : * to leave 0 free for the NIL node in the graph algorithm.
2911 : *----------
2912 : */
2913 824 : orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
2914 824 : set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
2915 824 : adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
2916 824 : adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
2917 :
2918 824 : j_size = 0;
2919 824 : j = 0;
2920 824 : i = 1;
2921 :
2922 2948 : for_each_cell(lc, groupingSets, lc1)
2923 : {
2924 2124 : List *candidate = (List *) lfirst(lc);
2925 2124 : Bitmapset *candidate_set = NULL;
2926 : ListCell *lc2;
2927 2124 : int dup_of = 0;
2928 :
2929 5142 : foreach(lc2, candidate)
2930 : {
2931 3018 : candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
2932 : }
2933 :
2934 : /* we can only be a dup if we're the same length as a previous set */
2935 2124 : if (j_size == list_length(candidate))
2936 : {
2937 : int k;
2938 :
2939 1904 : for (k = j; k < i; ++k)
2940 : {
2941 1236 : if (bms_equal(set_masks[k], candidate_set))
2942 : {
2943 158 : dup_of = k;
2944 158 : break;
2945 : }
2946 : }
2947 : }
2948 1298 : else if (j_size < list_length(candidate))
2949 : {
2950 1298 : j_size = list_length(candidate);
2951 1298 : j = i;
2952 : }
2953 :
2954 2124 : if (dup_of > 0)
2955 : {
2956 158 : orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
2957 158 : bms_free(candidate_set);
2958 : }
2959 : else
2960 : {
2961 : int k;
2962 1966 : int n_adj = 0;
2963 :
2964 1966 : orig_sets[i] = list_make1(candidate);
2965 1966 : set_masks[i] = candidate_set;
2966 :
2967 : /* fill in adjacency list; no need to compare equal-size sets */
2968 :
2969 3238 : for (k = j - 1; k > 0; --k)
2970 : {
2971 1272 : if (bms_is_subset(set_masks[k], candidate_set))
2972 1110 : adjacency_buf[++n_adj] = k;
2973 : }
2974 :
2975 1966 : if (n_adj > 0)
2976 : {
2977 598 : adjacency_buf[0] = n_adj;
2978 598 : adjacency[i] = palloc((n_adj + 1) * sizeof(short));
2979 598 : memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
2980 : }
2981 : else
2982 1368 : adjacency[i] = NULL;
2983 :
2984 1966 : ++i;
2985 : }
2986 : }
2987 :
2988 824 : num_sets = i - 1;
2989 :
2990 : /*
2991 : * Apply the graph matching algorithm to do the work.
2992 : */
2993 824 : state = BipartiteMatch(num_sets, num_sets, adjacency);
2994 :
2995 : /*
2996 : * Now, the state->pair* fields have the info we need to assign sets to
2997 : * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
2998 : * pair_vu[v] = u (both will be true, but we check both so that we can do
2999 : * it in one pass)
3000 : */
3001 824 : chains = palloc0((num_sets + 1) * sizeof(int));
3002 :
3003 2790 : for (i = 1; i <= num_sets; ++i)
3004 : {
3005 1966 : int u = state->pair_vu[i];
3006 1966 : int v = state->pair_uv[i];
3007 :
3008 1966 : if (u > 0 && u < i)
3009 0 : chains[i] = chains[u];
3010 1966 : else if (v > 0 && v < i)
3011 570 : chains[i] = chains[v];
3012 : else
3013 1396 : chains[i] = ++num_chains;
3014 : }
3015 :
3016 : /* build result lists. */
3017 824 : results = palloc0((num_chains + 1) * sizeof(List *));
3018 :
3019 2790 : for (i = 1; i <= num_sets; ++i)
3020 : {
3021 1966 : int c = chains[i];
3022 :
3023 : Assert(c > 0);
3024 :
3025 1966 : results[c] = list_concat(results[c], orig_sets[i]);
3026 : }
3027 :
3028 : /* push any empty sets back on the first list. */
3029 1344 : while (num_empty-- > 0)
3030 520 : results[1] = lcons(NIL, results[1]);
3031 :
3032 : /* make result list */
3033 2220 : for (i = 1; i <= num_chains; ++i)
3034 1396 : result = lappend(result, results[i]);
3035 :
3036 : /*
3037 : * Free all the things.
3038 : *
3039 : * (This is over-fussy for small sets but for large sets we could have
3040 : * tied up a nontrivial amount of memory.)
3041 : */
3042 824 : BipartiteMatchFree(state);
3043 824 : pfree(results);
3044 824 : pfree(chains);
3045 2790 : for (i = 1; i <= num_sets; ++i)
3046 1966 : if (adjacency[i])
3047 598 : pfree(adjacency[i]);
3048 824 : pfree(adjacency);
3049 824 : pfree(adjacency_buf);
3050 824 : pfree(orig_sets);
3051 2790 : for (i = 1; i <= num_sets; ++i)
3052 1966 : bms_free(set_masks[i]);
3053 824 : pfree(set_masks);
3054 :
3055 824 : return result;
3056 : }
3057 :
3058 : /*
3059 : * Reorder the elements of a list of grouping sets such that they have correct
3060 : * prefix relationships. Also inserts the GroupingSetData annotations.
3061 : *
3062 : * The input must be ordered with smallest sets first; the result is returned
3063 : * with largest sets first. Note that the result shares no list substructure
3064 : * with the input, so it's safe for the caller to modify it later.
3065 : *
3066 : * If we're passed in a sortclause, we follow its order of columns to the
3067 : * extent possible, to minimize the chance that we add unnecessary sorts.
3068 : * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
3069 : * gets implemented in one pass.)
3070 : */
3071 : static List *
3072 1438 : reorder_grouping_sets(List *groupingSets, List *sortclause)
3073 : {
3074 : ListCell *lc;
3075 1438 : List *previous = NIL;
3076 1438 : List *result = NIL;
3077 :
3078 4172 : foreach(lc, groupingSets)
3079 : {
3080 2734 : List *candidate = (List *) lfirst(lc);
3081 2734 : List *new_elems = list_difference_int(candidate, previous);
3082 2734 : GroupingSetData *gs = makeNode(GroupingSetData);
3083 :
3084 2898 : while (list_length(sortclause) > list_length(previous) &&
3085 : new_elems != NIL)
3086 : {
3087 272 : SortGroupClause *sc = list_nth(sortclause, list_length(previous));
3088 272 : int ref = sc->tleSortGroupRef;
3089 :
3090 272 : if (list_member_int(new_elems, ref))
3091 : {
3092 164 : previous = lappend_int(previous, ref);
3093 164 : new_elems = list_delete_int(new_elems, ref);
3094 : }
3095 : else
3096 : {
3097 : /* diverged from the sortclause; give up on it */
3098 108 : sortclause = NIL;
3099 108 : break;
3100 : }
3101 : }
3102 :
3103 2734 : previous = list_concat(previous, new_elems);
3104 :
3105 2734 : gs->set = list_copy(previous);
3106 2734 : result = lcons(gs, result);
3107 : }
3108 :
3109 1438 : list_free(previous);
3110 :
3111 1438 : return result;
3112 : }
3113 :
3114 : /*
3115 : * has_volatile_pathkey
3116 : * Returns true if any PathKey in 'keys' has an EquivalenceClass
3117 : * containing a volatile function. Otherwise returns false.
3118 : */
3119 : static bool
3120 3096 : has_volatile_pathkey(List *keys)
3121 : {
3122 : ListCell *lc;
3123 :
3124 6336 : foreach(lc, keys)
3125 : {
3126 3258 : PathKey *pathkey = lfirst_node(PathKey, lc);
3127 :
3128 3258 : if (pathkey->pk_eclass->ec_has_volatile)
3129 18 : return true;
3130 : }
3131 :
3132 3078 : return false;
3133 : }
3134 :
3135 : /*
3136 : * adjust_group_pathkeys_for_groupagg
3137 : * Add pathkeys to root->group_pathkeys to reflect the best set of
3138 : * pre-ordered input for ordered aggregates.
3139 : *
3140 : * We define "best" as the pathkeys that suit the largest number of
3141 : * aggregate functions. We find these by looking at the first ORDER BY /
3142 : * DISTINCT aggregate and take the pathkeys for that before searching for
3143 : * other aggregates that require the same or a more strict variation of the
3144 : * same pathkeys. We then repeat that process for any remaining aggregates
3145 : * with different pathkeys and if we find another set of pathkeys that suits a
3146 : * larger number of aggregates then we select those pathkeys instead.
3147 : *
3148 : * When the best pathkeys are found we also mark each Aggref that can use
3149 : * those pathkeys as aggpresorted = true.
3150 : *
3151 : * Note: When an aggregate function's ORDER BY / DISTINCT clause contains any
3152 : * volatile functions, we never make use of these pathkeys. We want to ensure
3153 : * that sorts using volatile functions are done independently in each Aggref
3154 : * rather than once at the query level. If we were to allow this then Aggrefs
3155 : * with compatible sort orders would all transition their rows in the same
3156 : * order if those pathkeys were deemed to be the best pathkeys to sort on.
3157 : * Whereas, if some other set of Aggref's pathkeys happened to be deemed
3158 : * better pathkeys to sort on, then the volatile function Aggrefs would be
3159 : * left to perform their sorts individually. To avoid this inconsistent
3160 : * behavior which could make Aggref results depend on what other Aggrefs the
3161 : * query contains, we always force Aggrefs with volatile functions to perform
3162 : * their own sorts.
3163 : */
3164 : static void
3165 2700 : adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
3166 : {
3167 2700 : List *grouppathkeys = root->group_pathkeys;
3168 : List *bestpathkeys;
3169 : Bitmapset *bestaggs;
3170 : Bitmapset *unprocessed_aggs;
3171 : ListCell *lc;
3172 : int i;
3173 :
3174 : /* Shouldn't be here if there are grouping sets */
3175 : Assert(root->parse->groupingSets == NIL);
3176 : /* Shouldn't be here unless there are some ordered aggregates */
3177 : Assert(root->numOrderedAggs > 0);
3178 :
3179 : /* Do nothing if disabled */
3180 2700 : if (!enable_presorted_aggregate)
3181 6 : return;
3182 :
3183 : /*
3184 : * Make a first pass over all AggInfos to collect a Bitmapset containing
3185 : * the indexes of all AggInfos to be processed below.
3186 : */
3187 2694 : unprocessed_aggs = NULL;
3188 6072 : foreach(lc, root->agginfos)
3189 : {
3190 3378 : AggInfo *agginfo = lfirst_node(AggInfo, lc);
3191 3378 : Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3192 :
3193 3378 : if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3194 264 : continue;
3195 :
3196 : /* Skip unless there's a DISTINCT or ORDER BY clause */
3197 3114 : if (aggref->aggdistinct == NIL && aggref->aggorder == NIL)
3198 300 : continue;
3199 :
3200 : /* Additional safety checks are needed if there's a FILTER clause */
3201 2814 : if (aggref->aggfilter != NULL)
3202 : {
3203 : ListCell *lc2;
3204 54 : bool allow_presort = true;
3205 :
3206 : /*
3207 : * When the Aggref has a FILTER clause, it's possible that the
3208 : * filter removes rows that cannot be sorted because the
3209 : * expression to sort by results in an error during its
3210 : * evaluation. This is a problem for presorting as that happens
3211 : * before the FILTER, whereas without presorting, the Aggregate
3212 : * node will apply the FILTER *before* sorting. So that we never
3213 : * try to sort anything that might error, here we aim to skip over
3214 : * any Aggrefs with arguments with expressions which, when
3215 : * evaluated, could cause an ERROR. Vars and Consts are ok. There
3216 : * may be more cases that should be allowed, but more thought
3217 : * needs to be given. Err on the side of caution.
3218 : */
3219 102 : foreach(lc2, aggref->args)
3220 : {
3221 72 : TargetEntry *tle = (TargetEntry *) lfirst(lc2);
3222 72 : Expr *expr = tle->expr;
3223 :
3224 84 : while (IsA(expr, RelabelType))
3225 12 : expr = (Expr *) (castNode(RelabelType, expr))->arg;
3226 :
3227 : /* Common case, Vars and Consts are ok */
3228 72 : if (IsA(expr, Var) || IsA(expr, Const))
3229 48 : continue;
3230 :
3231 : /* Unsupported. Don't try to presort for this Aggref */
3232 24 : allow_presort = false;
3233 24 : break;
3234 : }
3235 :
3236 : /* Skip unsupported Aggrefs */
3237 54 : if (!allow_presort)
3238 24 : continue;
3239 : }
3240 :
3241 2790 : unprocessed_aggs = bms_add_member(unprocessed_aggs,
3242 : foreach_current_index(lc));
3243 : }
3244 :
3245 : /*
3246 : * Now process all the unprocessed_aggs to find the best set of pathkeys
3247 : * for the given set of aggregates.
3248 : *
3249 : * On the first outer loop here 'bestaggs' will be empty. We'll populate
3250 : * this during the first loop using the pathkeys for the very first
3251 : * AggInfo then taking any stronger pathkeys from any other AggInfos with
3252 : * a more strict set of compatible pathkeys. Once the outer loop is
3253 : * complete, we mark off all the aggregates with compatible pathkeys then
3254 : * remove those from the unprocessed_aggs and repeat the process to try to
3255 : * find another set of pathkeys that are suitable for a larger number of
3256 : * aggregates. The outer loop will stop when there are not enough
3257 : * unprocessed aggregates for it to be possible to find a set of pathkeys
3258 : * to suit a larger number of aggregates.
3259 : */
3260 2694 : bestpathkeys = NIL;
3261 2694 : bestaggs = NULL;
3262 5322 : while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3263 : {
3264 2628 : Bitmapset *aggindexes = NULL;
3265 2628 : List *currpathkeys = NIL;
3266 :
3267 2628 : i = -1;
3268 5724 : while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3269 : {
3270 3096 : AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3271 3096 : Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3272 : List *sortlist;
3273 : List *pathkeys;
3274 :
3275 3096 : if (aggref->aggdistinct != NIL)
3276 718 : sortlist = aggref->aggdistinct;
3277 : else
3278 2378 : sortlist = aggref->aggorder;
3279 :
3280 3096 : pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3281 : aggref->args);
3282 :
3283 : /*
3284 : * Ignore Aggrefs which have volatile functions in their ORDER BY
3285 : * or DISTINCT clause.
3286 : */
3287 3096 : if (has_volatile_pathkey(pathkeys))
3288 : {
3289 18 : unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3290 18 : continue;
3291 : }
3292 :
3293 : /*
3294 : * When not set yet, take the pathkeys from the first unprocessed
3295 : * aggregate.
3296 : */
3297 3078 : if (currpathkeys == NIL)
3298 : {
3299 2622 : currpathkeys = pathkeys;
3300 :
3301 : /* include the GROUP BY pathkeys, if they exist */
3302 2622 : if (grouppathkeys != NIL)
3303 276 : currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3304 : currpathkeys);
3305 :
3306 : /* record that we found pathkeys for this aggregate */
3307 2622 : aggindexes = bms_add_member(aggindexes, i);
3308 : }
3309 : else
3310 : {
3311 : /* now look for a stronger set of matching pathkeys */
3312 :
3313 : /* include the GROUP BY pathkeys, if they exist */
3314 456 : if (grouppathkeys != NIL)
3315 288 : pathkeys = append_pathkeys(list_copy(grouppathkeys),
3316 : pathkeys);
3317 :
3318 : /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3319 456 : switch (compare_pathkeys(currpathkeys, pathkeys))
3320 : {
3321 12 : case PATHKEYS_BETTER2:
3322 : /* 'pathkeys' are stronger, use these ones instead */
3323 12 : currpathkeys = pathkeys;
3324 : /* FALLTHROUGH */
3325 :
3326 66 : case PATHKEYS_BETTER1:
3327 : /* 'pathkeys' are less strict */
3328 : /* FALLTHROUGH */
3329 :
3330 : case PATHKEYS_EQUAL:
3331 : /* mark this aggregate as covered by 'currpathkeys' */
3332 66 : aggindexes = bms_add_member(aggindexes, i);
3333 66 : break;
3334 :
3335 390 : case PATHKEYS_DIFFERENT:
3336 390 : break;
3337 : }
3338 : }
3339 : }
3340 :
3341 : /* remove the aggregates that we've just processed */
3342 2628 : unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3343 :
3344 : /*
3345 : * If this pass included more aggregates than the previous best then
3346 : * use these ones as the best set.
3347 : */
3348 2628 : if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3349 : {
3350 2520 : bestaggs = aggindexes;
3351 2520 : bestpathkeys = currpathkeys;
3352 : }
3353 : }
3354 :
3355 : /*
3356 : * If we found any ordered aggregates, update root->group_pathkeys to add
3357 : * the best set of aggregate pathkeys. Note that bestpathkeys includes
3358 : * the original GROUP BY pathkeys already.
3359 : */
3360 2694 : if (bestpathkeys != NIL)
3361 2460 : root->group_pathkeys = bestpathkeys;
3362 :
3363 : /*
3364 : * Now that we've found the best set of aggregates we can set the
3365 : * presorted flag to indicate to the executor that it needn't bother
3366 : * performing a sort for these Aggrefs. We're able to do this now as
3367 : * there's no chance of a Hash Aggregate plan as create_grouping_paths
3368 : * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3369 : * of ordered aggregates.
3370 : */
3371 2694 : i = -1;
3372 5250 : while ((i = bms_next_member(bestaggs, i)) >= 0)
3373 : {
3374 2556 : AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3375 :
3376 5130 : foreach(lc, agginfo->aggrefs)
3377 : {
3378 2574 : Aggref *aggref = lfirst_node(Aggref, lc);
3379 :
3380 2574 : aggref->aggpresorted = true;
3381 : }
3382 : }
3383 : }
3384 :
3385 : /*
3386 : * Compute query_pathkeys and other pathkeys during plan generation
3387 : */
3388 : static void
3389 517168 : standard_qp_callback(PlannerInfo *root, void *extra)
3390 : {
3391 517168 : Query *parse = root->parse;
3392 517168 : standard_qp_extra *qp_extra = (standard_qp_extra *) extra;
3393 517168 : List *tlist = root->processed_tlist;
3394 517168 : List *activeWindows = qp_extra->activeWindows;
3395 :
3396 : /*
3397 : * Calculate pathkeys that represent grouping/ordering and/or ordered
3398 : * aggregate requirements.
3399 : */
3400 517168 : if (qp_extra->gset_data)
3401 : {
3402 : /*
3403 : * With grouping sets, just use the first RollupData's groupClause. We
3404 : * don't make any effort to optimize grouping clauses when there are
3405 : * grouping sets, nor can we combine aggregate ordering keys with
3406 : * grouping.
3407 : */
3408 872 : List *rollups = qp_extra->gset_data->rollups;
3409 872 : List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3410 :
3411 872 : if (grouping_is_sortable(groupClause))
3412 : {
3413 : bool sortable;
3414 :
3415 : /*
3416 : * The groupClause is logically below the grouping step. So if
3417 : * there is an RTE entry for the grouping step, we need to remove
3418 : * its RT index from the sort expressions before we make PathKeys
3419 : * for them.
3420 : */
3421 872 : root->group_pathkeys =
3422 872 : make_pathkeys_for_sortclauses_extended(root,
3423 : &groupClause,
3424 : tlist,
3425 : false,
3426 872 : parse->hasGroupRTE,
3427 : &sortable,
3428 : false);
3429 : Assert(sortable);
3430 872 : root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3431 : }
3432 : else
3433 : {
3434 0 : root->group_pathkeys = NIL;
3435 0 : root->num_groupby_pathkeys = 0;
3436 : }
3437 : }
3438 516296 : else if (parse->groupClause || root->numOrderedAggs > 0)
3439 6088 : {
3440 : /*
3441 : * With a plain GROUP BY list, we can remove any grouping items that
3442 : * are proven redundant by EquivalenceClass processing. For example,
3443 : * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3444 : * especially common cases, but they're nearly free to detect. Note
3445 : * that we remove redundant items from processed_groupClause but not
3446 : * the original parse->groupClause.
3447 : */
3448 : bool sortable;
3449 :
3450 : /*
3451 : * Convert group clauses into pathkeys. Set the ec_sortref field of
3452 : * EquivalenceClass'es if it's not set yet.
3453 : */
3454 6088 : root->group_pathkeys =
3455 6088 : make_pathkeys_for_sortclauses_extended(root,
3456 : &root->processed_groupClause,
3457 : tlist,
3458 : true,
3459 : false,
3460 : &sortable,
3461 : true);
3462 6088 : if (!sortable)
3463 : {
3464 : /* Can't sort; no point in considering aggregate ordering either */
3465 0 : root->group_pathkeys = NIL;
3466 0 : root->num_groupby_pathkeys = 0;
3467 : }
3468 : else
3469 : {
3470 6088 : root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3471 : /* If we have ordered aggs, consider adding onto group_pathkeys */
3472 6088 : if (root->numOrderedAggs > 0)
3473 2700 : adjust_group_pathkeys_for_groupagg(root);
3474 : }
3475 : }
3476 : else
3477 : {
3478 510208 : root->group_pathkeys = NIL;
3479 510208 : root->num_groupby_pathkeys = 0;
3480 : }
3481 :
3482 : /* We consider only the first (bottom) window in pathkeys logic */
3483 517168 : if (activeWindows != NIL)
3484 : {
3485 2378 : WindowClause *wc = linitial_node(WindowClause, activeWindows);
3486 :
3487 2378 : root->window_pathkeys = make_pathkeys_for_window(root,
3488 : wc,
3489 : tlist);
3490 : }
3491 : else
3492 514790 : root->window_pathkeys = NIL;
3493 :
3494 : /*
3495 : * As with GROUP BY, we can discard any DISTINCT items that are proven
3496 : * redundant by EquivalenceClass processing. The non-redundant list is
3497 : * kept in root->processed_distinctClause, leaving the original
3498 : * parse->distinctClause alone.
3499 : */
3500 517168 : if (parse->distinctClause)
3501 : {
3502 : bool sortable;
3503 :
3504 : /* Make a copy since pathkey processing can modify the list */
3505 2714 : root->processed_distinctClause = list_copy(parse->distinctClause);
3506 2714 : root->distinct_pathkeys =
3507 2714 : make_pathkeys_for_sortclauses_extended(root,
3508 : &root->processed_distinctClause,
3509 : tlist,
3510 : true,
3511 : false,
3512 : &sortable,
3513 : false);
3514 2714 : if (!sortable)
3515 6 : root->distinct_pathkeys = NIL;
3516 : }
3517 : else
3518 514454 : root->distinct_pathkeys = NIL;
3519 :
3520 517168 : root->sort_pathkeys =
3521 517168 : make_pathkeys_for_sortclauses(root,
3522 : parse->sortClause,
3523 : tlist);
3524 :
3525 : /* setting setop_pathkeys might be useful to the union planner */
3526 517168 : if (qp_extra->setop != NULL)
3527 : {
3528 : List *groupClauses;
3529 : bool sortable;
3530 :
3531 12278 : groupClauses = generate_setop_child_grouplist(qp_extra->setop, tlist);
3532 :
3533 12278 : root->setop_pathkeys =
3534 12278 : make_pathkeys_for_sortclauses_extended(root,
3535 : &groupClauses,
3536 : tlist,
3537 : false,
3538 : false,
3539 : &sortable,
3540 : false);
3541 12278 : if (!sortable)
3542 208 : root->setop_pathkeys = NIL;
3543 : }
3544 : else
3545 504890 : root->setop_pathkeys = NIL;
3546 :
3547 : /*
3548 : * Figure out whether we want a sorted result from query_planner.
3549 : *
3550 : * If we have a sortable GROUP BY clause, then we want a result sorted
3551 : * properly for grouping. Otherwise, if we have window functions to
3552 : * evaluate, we try to sort for the first window. Otherwise, if there's a
3553 : * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3554 : * we try to produce output that's sufficiently well sorted for the
3555 : * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3556 : * by the ORDER BY clause. Otherwise, if we're a subquery being planned
3557 : * for a set operation which can benefit from presorted results and have a
3558 : * sortable targetlist, we want to sort by the target list.
3559 : *
3560 : * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3561 : * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3562 : * that might just leave us failing to exploit an available sort order at
3563 : * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3564 : * much easier, since we know that the parser ensured that one is a
3565 : * superset of the other.
3566 : */
3567 517168 : if (root->group_pathkeys)
3568 6604 : root->query_pathkeys = root->group_pathkeys;
3569 510564 : else if (root->window_pathkeys)
3570 2032 : root->query_pathkeys = root->window_pathkeys;
3571 1017064 : else if (list_length(root->distinct_pathkeys) >
3572 508532 : list_length(root->sort_pathkeys))
3573 2252 : root->query_pathkeys = root->distinct_pathkeys;
3574 506280 : else if (root->sort_pathkeys)
3575 69724 : root->query_pathkeys = root->sort_pathkeys;
3576 436556 : else if (root->setop_pathkeys != NIL)
3577 10878 : root->query_pathkeys = root->setop_pathkeys;
3578 : else
3579 425678 : root->query_pathkeys = NIL;
3580 517168 : }
3581 :
3582 : /*
3583 : * Estimate number of groups produced by grouping clauses (1 if not grouping)
3584 : *
3585 : * path_rows: number of output rows from scan/join step
3586 : * gd: grouping sets data including list of grouping sets and their clauses
3587 : * target_list: target list containing group clause references
3588 : *
3589 : * If doing grouping sets, we also annotate the gsets data with the estimates
3590 : * for each set and each individual rollup list, with a view to later
3591 : * determining whether some combination of them could be hashed instead.
3592 : */
3593 : static double
3594 42396 : get_number_of_groups(PlannerInfo *root,
3595 : double path_rows,
3596 : grouping_sets_data *gd,
3597 : List *target_list)
3598 : {
3599 42396 : Query *parse = root->parse;
3600 : double dNumGroups;
3601 :
3602 42396 : if (parse->groupClause)
3603 : {
3604 : List *groupExprs;
3605 :
3606 6942 : if (parse->groupingSets)
3607 : {
3608 : /* Add up the estimates for each grouping set */
3609 : ListCell *lc;
3610 :
3611 : Assert(gd); /* keep Coverity happy */
3612 :
3613 830 : dNumGroups = 0;
3614 :
3615 2226 : foreach(lc, gd->rollups)
3616 : {
3617 1396 : RollupData *rollup = lfirst_node(RollupData, lc);
3618 : ListCell *lc2;
3619 : ListCell *lc3;
3620 :
3621 1396 : groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3622 : target_list);
3623 :
3624 1396 : rollup->numGroups = 0.0;
3625 :
3626 4040 : forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3627 : {
3628 2644 : List *gset = (List *) lfirst(lc2);
3629 2644 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc3);
3630 2644 : double numGroups = estimate_num_groups(root,
3631 : groupExprs,
3632 : path_rows,
3633 : &gset,
3634 : NULL);
3635 :
3636 2644 : gs->numGroups = numGroups;
3637 2644 : rollup->numGroups += numGroups;
3638 : }
3639 :
3640 1396 : dNumGroups += rollup->numGroups;
3641 : }
3642 :
3643 830 : if (gd->hash_sets_idx)
3644 : {
3645 : ListCell *lc2;
3646 :
3647 36 : gd->dNumHashGroups = 0;
3648 :
3649 36 : groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3650 : target_list);
3651 :
3652 78 : forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3653 : {
3654 42 : List *gset = (List *) lfirst(lc);
3655 42 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
3656 42 : double numGroups = estimate_num_groups(root,
3657 : groupExprs,
3658 : path_rows,
3659 : &gset,
3660 : NULL);
3661 :
3662 42 : gs->numGroups = numGroups;
3663 42 : gd->dNumHashGroups += numGroups;
3664 : }
3665 :
3666 36 : dNumGroups += gd->dNumHashGroups;
3667 : }
3668 : }
3669 : else
3670 : {
3671 : /* Plain GROUP BY -- estimate based on optimized groupClause */
3672 6112 : groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3673 : target_list);
3674 :
3675 6112 : dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3676 : NULL, NULL);
3677 : }
3678 : }
3679 35454 : else if (parse->groupingSets)
3680 : {
3681 : /* Empty grouping sets ... one result row for each one */
3682 42 : dNumGroups = list_length(parse->groupingSets);
3683 : }
3684 35412 : else if (parse->hasAggs || root->hasHavingQual)
3685 : {
3686 : /* Plain aggregation, one result row */
3687 35412 : dNumGroups = 1;
3688 : }
3689 : else
3690 : {
3691 : /* Not grouping */
3692 0 : dNumGroups = 1;
3693 : }
3694 :
3695 42396 : return dNumGroups;
3696 : }
3697 :
3698 : /*
3699 : * create_grouping_paths
3700 : *
3701 : * Build a new upperrel containing Paths for grouping and/or aggregation.
3702 : * Along the way, we also build an upperrel for Paths which are partially
3703 : * grouped and/or aggregated. A partially grouped and/or aggregated path
3704 : * needs a FinalizeAggregate node to complete the aggregation. Currently,
3705 : * the only partially grouped paths we build are also partial paths; that
3706 : * is, they need a Gather and then a FinalizeAggregate.
3707 : *
3708 : * input_rel: contains the source-data Paths
3709 : * target: the pathtarget for the result Paths to compute
3710 : * gd: grouping sets data including list of grouping sets and their clauses
3711 : *
3712 : * Note: all Paths in input_rel are expected to return the target computed
3713 : * by make_group_input_target.
3714 : */
3715 : static RelOptInfo *
3716 39136 : create_grouping_paths(PlannerInfo *root,
3717 : RelOptInfo *input_rel,
3718 : PathTarget *target,
3719 : bool target_parallel_safe,
3720 : grouping_sets_data *gd)
3721 : {
3722 39136 : Query *parse = root->parse;
3723 : RelOptInfo *grouped_rel;
3724 : RelOptInfo *partially_grouped_rel;
3725 : AggClauseCosts agg_costs;
3726 :
3727 234816 : MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3728 39136 : get_agg_clause_costs(root, AGGSPLIT_SIMPLE, &agg_costs);
3729 :
3730 : /*
3731 : * Create grouping relation to hold fully aggregated grouping and/or
3732 : * aggregation paths.
3733 : */
3734 39136 : grouped_rel = make_grouping_rel(root, input_rel, target,
3735 : target_parallel_safe, parse->havingQual);
3736 :
3737 : /*
3738 : * Create either paths for a degenerate grouping or paths for ordinary
3739 : * grouping, as appropriate.
3740 : */
3741 39136 : if (is_degenerate_grouping(root))
3742 18 : create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3743 : else
3744 : {
3745 39118 : int flags = 0;
3746 : GroupPathExtraData extra;
3747 :
3748 : /*
3749 : * Determine whether it's possible to perform sort-based
3750 : * implementations of grouping. (Note that if processed_groupClause
3751 : * is empty, grouping_is_sortable() is trivially true, and all the
3752 : * pathkeys_contained_in() tests will succeed too, so that we'll
3753 : * consider every surviving input path.)
3754 : *
3755 : * If we have grouping sets, we might be able to sort some but not all
3756 : * of them; in this case, we need can_sort to be true as long as we
3757 : * must consider any sorted-input plan.
3758 : */
3759 39118 : if ((gd && gd->rollups != NIL)
3760 38252 : || grouping_is_sortable(root->processed_groupClause))
3761 39112 : flags |= GROUPING_CAN_USE_SORT;
3762 :
3763 : /*
3764 : * Determine whether we should consider hash-based implementations of
3765 : * grouping.
3766 : *
3767 : * Hashed aggregation only applies if we're grouping. If we have
3768 : * grouping sets, some groups might be hashable but others not; in
3769 : * this case we set can_hash true as long as there is nothing globally
3770 : * preventing us from hashing (and we should therefore consider plans
3771 : * with hashes).
3772 : *
3773 : * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3774 : * BY aggregates. (Doing so would imply storing *all* the input
3775 : * values in the hash table, and/or running many sorts in parallel,
3776 : * either of which seems like a certain loser.) We similarly don't
3777 : * support ordered-set aggregates in hashed aggregation, but that case
3778 : * is also included in the numOrderedAggs count.
3779 : *
3780 : * Note: grouping_is_hashable() is much more expensive to check than
3781 : * the other gating conditions, so we want to do it last.
3782 : */
3783 39118 : if ((parse->groupClause != NIL &&
3784 8644 : root->numOrderedAggs == 0 &&
3785 4182 : (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3786 4178 : flags |= GROUPING_CAN_USE_HASH;
3787 :
3788 : /*
3789 : * Determine whether partial aggregation is possible.
3790 : */
3791 39118 : if (can_partial_agg(root))
3792 34160 : flags |= GROUPING_CAN_PARTIAL_AGG;
3793 :
3794 39118 : extra.flags = flags;
3795 39118 : extra.target_parallel_safe = target_parallel_safe;
3796 39118 : extra.havingQual = parse->havingQual;
3797 39118 : extra.targetList = parse->targetList;
3798 39118 : extra.partial_costs_set = false;
3799 :
3800 : /*
3801 : * Determine whether partitionwise aggregation is in theory possible.
3802 : * It can be disabled by the user, and for now, we don't try to
3803 : * support grouping sets. create_ordinary_grouping_paths() will check
3804 : * additional conditions, such as whether input_rel is partitioned.
3805 : */
3806 39118 : if (enable_partitionwise_aggregate && !parse->groupingSets)
3807 556 : extra.patype = PARTITIONWISE_AGGREGATE_FULL;
3808 : else
3809 38562 : extra.patype = PARTITIONWISE_AGGREGATE_NONE;
3810 :
3811 39118 : create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3812 : &agg_costs, gd, &extra,
3813 : &partially_grouped_rel);
3814 : }
3815 :
3816 39130 : set_cheapest(grouped_rel);
3817 39130 : return grouped_rel;
3818 : }
3819 :
3820 : /*
3821 : * make_grouping_rel
3822 : *
3823 : * Create a new grouping rel and set basic properties.
3824 : *
3825 : * input_rel represents the underlying scan/join relation.
3826 : * target is the output expected from the grouping relation.
3827 : */
3828 : static RelOptInfo *
3829 40630 : make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
3830 : PathTarget *target, bool target_parallel_safe,
3831 : Node *havingQual)
3832 : {
3833 : RelOptInfo *grouped_rel;
3834 :
3835 40630 : if (IS_OTHER_REL(input_rel))
3836 : {
3837 1494 : grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
3838 : input_rel->relids);
3839 1494 : grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3840 : }
3841 : else
3842 : {
3843 : /*
3844 : * By tradition, the relids set for the main grouping relation is
3845 : * NULL. (This could be changed, but might require adjustments
3846 : * elsewhere.)
3847 : */
3848 39136 : grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3849 : }
3850 :
3851 : /* Set target. */
3852 40630 : grouped_rel->reltarget = target;
3853 :
3854 : /*
3855 : * If the input relation is not parallel-safe, then the grouped relation
3856 : * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3857 : * target list and HAVING quals are parallel-safe.
3858 : */
3859 68744 : if (input_rel->consider_parallel && target_parallel_safe &&
3860 28114 : is_parallel_safe(root, (Node *) havingQual))
3861 28096 : grouped_rel->consider_parallel = true;
3862 :
3863 : /*
3864 : * If the input rel belongs to a single FDW, so does the grouped rel.
3865 : */
3866 40630 : grouped_rel->serverid = input_rel->serverid;
3867 40630 : grouped_rel->userid = input_rel->userid;
3868 40630 : grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3869 40630 : grouped_rel->fdwroutine = input_rel->fdwroutine;
3870 :
3871 40630 : return grouped_rel;
3872 : }
3873 :
3874 : /*
3875 : * is_degenerate_grouping
3876 : *
3877 : * A degenerate grouping is one in which the query has a HAVING qual and/or
3878 : * grouping sets, but no aggregates and no GROUP BY (which implies that the
3879 : * grouping sets are all empty).
3880 : */
3881 : static bool
3882 39136 : is_degenerate_grouping(PlannerInfo *root)
3883 : {
3884 39136 : Query *parse = root->parse;
3885 :
3886 38118 : return (root->hasHavingQual || parse->groupingSets) &&
3887 77254 : !parse->hasAggs && parse->groupClause == NIL;
3888 : }
3889 :
3890 : /*
3891 : * create_degenerate_grouping_paths
3892 : *
3893 : * When the grouping is degenerate (see is_degenerate_grouping), we are
3894 : * supposed to emit either zero or one row for each grouping set depending on
3895 : * whether HAVING succeeds. Furthermore, there cannot be any variables in
3896 : * either HAVING or the targetlist, so we actually do not need the FROM table
3897 : * at all! We can just throw away the plan-so-far and generate a Result node.
3898 : * This is a sufficiently unusual corner case that it's not worth contorting
3899 : * the structure of this module to avoid having to generate the earlier paths
3900 : * in the first place.
3901 : */
3902 : static void
3903 18 : create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
3904 : RelOptInfo *grouped_rel)
3905 : {
3906 18 : Query *parse = root->parse;
3907 : int nrows;
3908 : Path *path;
3909 :
3910 18 : nrows = list_length(parse->groupingSets);
3911 18 : if (nrows > 1)
3912 : {
3913 : /*
3914 : * Doesn't seem worthwhile writing code to cons up a generate_series
3915 : * or a values scan to emit multiple rows. Instead just make N clones
3916 : * and append them. (With a volatile HAVING clause, this means you
3917 : * might get between 0 and N output rows. Offhand I think that's
3918 : * desired.)
3919 : */
3920 0 : List *paths = NIL;
3921 :
3922 0 : while (--nrows >= 0)
3923 : {
3924 : path = (Path *)
3925 0 : create_group_result_path(root, grouped_rel,
3926 0 : grouped_rel->reltarget,
3927 0 : (List *) parse->havingQual);
3928 0 : paths = lappend(paths, path);
3929 : }
3930 : path = (Path *)
3931 0 : create_append_path(root,
3932 : grouped_rel,
3933 : paths,
3934 : NIL,
3935 : NIL,
3936 : NULL,
3937 : 0,
3938 : false,
3939 : -1);
3940 : }
3941 : else
3942 : {
3943 : /* No grouping sets, or just one, so one output row */
3944 : path = (Path *)
3945 18 : create_group_result_path(root, grouped_rel,
3946 18 : grouped_rel->reltarget,
3947 18 : (List *) parse->havingQual);
3948 : }
3949 :
3950 18 : add_path(grouped_rel, path);
3951 18 : }
3952 :
3953 : /*
3954 : * create_ordinary_grouping_paths
3955 : *
3956 : * Create grouping paths for the ordinary (that is, non-degenerate) case.
3957 : *
3958 : * We need to consider sorted and hashed aggregation in the same function,
3959 : * because otherwise (1) it would be harder to throw an appropriate error
3960 : * message if neither way works, and (2) we should not allow hashtable size
3961 : * considerations to dissuade us from using hashing if sorting is not possible.
3962 : *
3963 : * *partially_grouped_rel_p will be set to the partially grouped rel which this
3964 : * function creates, or to NULL if it doesn't create one.
3965 : */
3966 : static void
3967 40612 : create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
3968 : RelOptInfo *grouped_rel,
3969 : const AggClauseCosts *agg_costs,
3970 : grouping_sets_data *gd,
3971 : GroupPathExtraData *extra,
3972 : RelOptInfo **partially_grouped_rel_p)
3973 : {
3974 40612 : Path *cheapest_path = input_rel->cheapest_total_path;
3975 40612 : RelOptInfo *partially_grouped_rel = NULL;
3976 : double dNumGroups;
3977 40612 : PartitionwiseAggregateType patype = PARTITIONWISE_AGGREGATE_NONE;
3978 :
3979 : /*
3980 : * If this is the topmost grouping relation or if the parent relation is
3981 : * doing some form of partitionwise aggregation, then we may be able to do
3982 : * it at this level also. However, if the input relation is not
3983 : * partitioned, partitionwise aggregate is impossible.
3984 : */
3985 40612 : if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
3986 2050 : IS_PARTITIONED_REL(input_rel))
3987 : {
3988 : /*
3989 : * If this is the topmost relation or if the parent relation is doing
3990 : * full partitionwise aggregation, then we can do full partitionwise
3991 : * aggregation provided that the GROUP BY clause contains all of the
3992 : * partitioning columns at this level and the collation used by GROUP
3993 : * BY matches the partitioning collation. Otherwise, we can do at
3994 : * most partial partitionwise aggregation. But if partial aggregation
3995 : * is not supported in general then we can't use it for partitionwise
3996 : * aggregation either.
3997 : *
3998 : * Check parse->groupClause not processed_groupClause, because it's
3999 : * okay if some of the partitioning columns were proved redundant.
4000 : */
4001 1160 : if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
4002 556 : group_by_has_partkey(input_rel, extra->targetList,
4003 556 : root->parse->groupClause))
4004 320 : patype = PARTITIONWISE_AGGREGATE_FULL;
4005 284 : else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4006 242 : patype = PARTITIONWISE_AGGREGATE_PARTIAL;
4007 : else
4008 42 : patype = PARTITIONWISE_AGGREGATE_NONE;
4009 : }
4010 :
4011 : /*
4012 : * Before generating paths for grouped_rel, we first generate any possible
4013 : * partially grouped paths; that way, later code can easily consider both
4014 : * parallel and non-parallel approaches to grouping.
4015 : */
4016 40612 : if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4017 : {
4018 : bool force_rel_creation;
4019 :
4020 : /*
4021 : * If we're doing partitionwise aggregation at this level, force
4022 : * creation of a partially_grouped_rel so we can add partitionwise
4023 : * paths to it.
4024 : */
4025 35582 : force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
4026 :
4027 : partially_grouped_rel =
4028 35582 : create_partial_grouping_paths(root,
4029 : grouped_rel,
4030 : input_rel,
4031 : gd,
4032 : extra,
4033 : force_rel_creation);
4034 : }
4035 :
4036 : /* Set out parameter. */
4037 40612 : *partially_grouped_rel_p = partially_grouped_rel;
4038 :
4039 : /* Apply partitionwise aggregation technique, if possible. */
4040 40612 : if (patype != PARTITIONWISE_AGGREGATE_NONE)
4041 562 : create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
4042 : partially_grouped_rel, agg_costs,
4043 : gd, patype, extra);
4044 :
4045 : /* If we are doing partial aggregation only, return. */
4046 40612 : if (extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
4047 : {
4048 : Assert(partially_grouped_rel);
4049 :
4050 618 : if (partially_grouped_rel->pathlist)
4051 618 : set_cheapest(partially_grouped_rel);
4052 :
4053 618 : return;
4054 : }
4055 :
4056 : /* Gather any partially grouped partial paths. */
4057 39994 : if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4058 : {
4059 1484 : gather_grouping_paths(root, partially_grouped_rel);
4060 1484 : set_cheapest(partially_grouped_rel);
4061 : }
4062 :
4063 : /*
4064 : * Estimate number of groups.
4065 : */
4066 39994 : dNumGroups = get_number_of_groups(root,
4067 : cheapest_path->rows,
4068 : gd,
4069 : extra->targetList);
4070 :
4071 : /* Build final grouping paths */
4072 39994 : add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4073 : partially_grouped_rel, agg_costs, gd,
4074 : dNumGroups, extra);
4075 :
4076 : /* Give a helpful error if we failed to find any implementation */
4077 39994 : if (grouped_rel->pathlist == NIL)
4078 6 : ereport(ERROR,
4079 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4080 : errmsg("could not implement GROUP BY"),
4081 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4082 :
4083 : /*
4084 : * If there is an FDW that's responsible for all baserels of the query,
4085 : * let it consider adding ForeignPaths.
4086 : */
4087 39988 : if (grouped_rel->fdwroutine &&
4088 336 : grouped_rel->fdwroutine->GetForeignUpperPaths)
4089 336 : grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4090 : input_rel, grouped_rel,
4091 : extra);
4092 :
4093 : /* Let extensions possibly add some more paths */
4094 39988 : if (create_upper_paths_hook)
4095 0 : (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4096 : input_rel, grouped_rel,
4097 : extra);
4098 : }
4099 :
4100 : /*
4101 : * For a given input path, consider the possible ways of doing grouping sets on
4102 : * it, by combinations of hashing and sorting. This can be called multiple
4103 : * times, so it's important that it not scribble on input. No result is
4104 : * returned, but any generated paths are added to grouped_rel.
4105 : */
4106 : static void
4107 1732 : consider_groupingsets_paths(PlannerInfo *root,
4108 : RelOptInfo *grouped_rel,
4109 : Path *path,
4110 : bool is_sorted,
4111 : bool can_hash,
4112 : grouping_sets_data *gd,
4113 : const AggClauseCosts *agg_costs,
4114 : double dNumGroups)
4115 : {
4116 1732 : Query *parse = root->parse;
4117 1732 : Size hash_mem_limit = get_hash_memory_limit();
4118 :
4119 : /*
4120 : * If we're not being offered sorted input, then only consider plans that
4121 : * can be done entirely by hashing.
4122 : *
4123 : * We can hash everything if it looks like it'll fit in hash_mem. But if
4124 : * the input is actually sorted despite not being advertised as such, we
4125 : * prefer to make use of that in order to use less memory.
4126 : *
4127 : * If none of the grouping sets are sortable, then ignore the hash_mem
4128 : * limit and generate a path anyway, since otherwise we'll just fail.
4129 : */
4130 1732 : if (!is_sorted)
4131 : {
4132 794 : List *new_rollups = NIL;
4133 794 : RollupData *unhashed_rollup = NULL;
4134 : List *sets_data;
4135 794 : List *empty_sets_data = NIL;
4136 794 : List *empty_sets = NIL;
4137 : ListCell *lc;
4138 794 : ListCell *l_start = list_head(gd->rollups);
4139 794 : AggStrategy strat = AGG_HASHED;
4140 : double hashsize;
4141 794 : double exclude_groups = 0.0;
4142 :
4143 : Assert(can_hash);
4144 :
4145 : /*
4146 : * If the input is coincidentally sorted usefully (which can happen
4147 : * even if is_sorted is false, since that only means that our caller
4148 : * has set up the sorting for us), then save some hashtable space by
4149 : * making use of that. But we need to watch out for degenerate cases:
4150 : *
4151 : * 1) If there are any empty grouping sets, then group_pathkeys might
4152 : * be NIL if all non-empty grouping sets are unsortable. In this case,
4153 : * there will be a rollup containing only empty groups, and the
4154 : * pathkeys_contained_in test is vacuously true; this is ok.
4155 : *
4156 : * XXX: the above relies on the fact that group_pathkeys is generated
4157 : * from the first rollup. If we add the ability to consider multiple
4158 : * sort orders for grouping input, this assumption might fail.
4159 : *
4160 : * 2) If there are no empty sets and only unsortable sets, then the
4161 : * rollups list will be empty (and thus l_start == NULL), and
4162 : * group_pathkeys will be NIL; we must ensure that the vacuously-true
4163 : * pathkeys_contained_in test doesn't cause us to crash.
4164 : */
4165 1582 : if (l_start != NULL &&
4166 788 : pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4167 : {
4168 12 : unhashed_rollup = lfirst_node(RollupData, l_start);
4169 12 : exclude_groups = unhashed_rollup->numGroups;
4170 12 : l_start = lnext(gd->rollups, l_start);
4171 : }
4172 :
4173 794 : hashsize = estimate_hashagg_tablesize(root,
4174 : path,
4175 : agg_costs,
4176 : dNumGroups - exclude_groups);
4177 :
4178 : /*
4179 : * gd->rollups is empty if we have only unsortable columns to work
4180 : * with. Override hash_mem in that case; otherwise, we'll rely on the
4181 : * sorted-input case to generate usable mixed paths.
4182 : */
4183 794 : if (hashsize > hash_mem_limit && gd->rollups)
4184 18 : return; /* nope, won't fit */
4185 :
4186 : /*
4187 : * We need to burst the existing rollups list into individual grouping
4188 : * sets and recompute a groupClause for each set.
4189 : */
4190 776 : sets_data = list_copy(gd->unsortable_sets);
4191 :
4192 1980 : for_each_cell(lc, gd->rollups, l_start)
4193 : {
4194 1228 : RollupData *rollup = lfirst_node(RollupData, lc);
4195 :
4196 : /*
4197 : * If we find an unhashable rollup that's not been skipped by the
4198 : * "actually sorted" check above, we can't cope; we'd need sorted
4199 : * input (with a different sort order) but we can't get that here.
4200 : * So bail out; we'll get a valid path from the is_sorted case
4201 : * instead.
4202 : *
4203 : * The mere presence of empty grouping sets doesn't make a rollup
4204 : * unhashable (see preprocess_grouping_sets), we handle those
4205 : * specially below.
4206 : */
4207 1228 : if (!rollup->hashable)
4208 24 : return;
4209 :
4210 1204 : sets_data = list_concat(sets_data, rollup->gsets_data);
4211 : }
4212 3162 : foreach(lc, sets_data)
4213 : {
4214 2410 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
4215 2410 : List *gset = gs->set;
4216 : RollupData *rollup;
4217 :
4218 2410 : if (gset == NIL)
4219 : {
4220 : /* Empty grouping sets can't be hashed. */
4221 484 : empty_sets_data = lappend(empty_sets_data, gs);
4222 484 : empty_sets = lappend(empty_sets, NIL);
4223 : }
4224 : else
4225 : {
4226 1926 : rollup = makeNode(RollupData);
4227 :
4228 1926 : rollup->groupClause = preprocess_groupclause(root, gset);
4229 1926 : rollup->gsets_data = list_make1(gs);
4230 1926 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4231 : rollup->gsets_data,
4232 : gd->tleref_to_colnum_map);
4233 1926 : rollup->numGroups = gs->numGroups;
4234 1926 : rollup->hashable = true;
4235 1926 : rollup->is_hashed = true;
4236 1926 : new_rollups = lappend(new_rollups, rollup);
4237 : }
4238 : }
4239 :
4240 : /*
4241 : * If we didn't find anything nonempty to hash, then bail. We'll
4242 : * generate a path from the is_sorted case.
4243 : */
4244 752 : if (new_rollups == NIL)
4245 0 : return;
4246 :
4247 : /*
4248 : * If there were empty grouping sets they should have been in the
4249 : * first rollup.
4250 : */
4251 : Assert(!unhashed_rollup || !empty_sets);
4252 :
4253 752 : if (unhashed_rollup)
4254 : {
4255 12 : new_rollups = lappend(new_rollups, unhashed_rollup);
4256 12 : strat = AGG_MIXED;
4257 : }
4258 740 : else if (empty_sets)
4259 : {
4260 436 : RollupData *rollup = makeNode(RollupData);
4261 :
4262 436 : rollup->groupClause = NIL;
4263 436 : rollup->gsets_data = empty_sets_data;
4264 436 : rollup->gsets = empty_sets;
4265 436 : rollup->numGroups = list_length(empty_sets);
4266 436 : rollup->hashable = false;
4267 436 : rollup->is_hashed = false;
4268 436 : new_rollups = lappend(new_rollups, rollup);
4269 436 : strat = AGG_MIXED;
4270 : }
4271 :
4272 752 : add_path(grouped_rel, (Path *)
4273 752 : create_groupingsets_path(root,
4274 : grouped_rel,
4275 : path,
4276 752 : (List *) parse->havingQual,
4277 : strat,
4278 : new_rollups,
4279 : agg_costs));
4280 752 : return;
4281 : }
4282 :
4283 : /*
4284 : * If we have sorted input but nothing we can do with it, bail.
4285 : */
4286 938 : if (gd->rollups == NIL)
4287 0 : return;
4288 :
4289 : /*
4290 : * Given sorted input, we try and make two paths: one sorted and one mixed
4291 : * sort/hash. (We need to try both because hashagg might be disabled, or
4292 : * some columns might not be sortable.)
4293 : *
4294 : * can_hash is passed in as false if some obstacle elsewhere (such as
4295 : * ordered aggs) means that we shouldn't consider hashing at all.
4296 : */
4297 938 : if (can_hash && gd->any_hashable)
4298 : {
4299 860 : List *rollups = NIL;
4300 860 : List *hash_sets = list_copy(gd->unsortable_sets);
4301 860 : double availspace = hash_mem_limit;
4302 : ListCell *lc;
4303 :
4304 : /*
4305 : * Account first for space needed for groups we can't sort at all.
4306 : */
4307 860 : availspace -= estimate_hashagg_tablesize(root,
4308 : path,
4309 : agg_costs,
4310 : gd->dNumHashGroups);
4311 :
4312 860 : if (availspace > 0 && list_length(gd->rollups) > 1)
4313 : {
4314 : double scale;
4315 444 : int num_rollups = list_length(gd->rollups);
4316 : int k_capacity;
4317 444 : int *k_weights = palloc(num_rollups * sizeof(int));
4318 444 : Bitmapset *hash_items = NULL;
4319 : int i;
4320 :
4321 : /*
4322 : * We treat this as a knapsack problem: the knapsack capacity
4323 : * represents hash_mem, the item weights are the estimated memory
4324 : * usage of the hashtables needed to implement a single rollup,
4325 : * and we really ought to use the cost saving as the item value;
4326 : * however, currently the costs assigned to sort nodes don't
4327 : * reflect the comparison costs well, and so we treat all items as
4328 : * of equal value (each rollup we hash instead saves us one sort).
4329 : *
4330 : * To use the discrete knapsack, we need to scale the values to a
4331 : * reasonably small bounded range. We choose to allow a 5% error
4332 : * margin; we have no more than 4096 rollups in the worst possible
4333 : * case, which with a 5% error margin will require a bit over 42MB
4334 : * of workspace. (Anyone wanting to plan queries that complex had
4335 : * better have the memory for it. In more reasonable cases, with
4336 : * no more than a couple of dozen rollups, the memory usage will
4337 : * be negligible.)
4338 : *
4339 : * k_capacity is naturally bounded, but we clamp the values for
4340 : * scale and weight (below) to avoid overflows or underflows (or
4341 : * uselessly trying to use a scale factor less than 1 byte).
4342 : */
4343 444 : scale = Max(availspace / (20.0 * num_rollups), 1.0);
4344 444 : k_capacity = (int) floor(availspace / scale);
4345 :
4346 : /*
4347 : * We leave the first rollup out of consideration since it's the
4348 : * one that matches the input sort order. We assign indexes "i"
4349 : * to only those entries considered for hashing; the second loop,
4350 : * below, must use the same condition.
4351 : */
4352 444 : i = 0;
4353 1140 : for_each_from(lc, gd->rollups, 1)
4354 : {
4355 696 : RollupData *rollup = lfirst_node(RollupData, lc);
4356 :
4357 696 : if (rollup->hashable)
4358 : {
4359 696 : double sz = estimate_hashagg_tablesize(root,
4360 : path,
4361 : agg_costs,
4362 : rollup->numGroups);
4363 :
4364 : /*
4365 : * If sz is enormous, but hash_mem (and hence scale) is
4366 : * small, avoid integer overflow here.
4367 : */
4368 696 : k_weights[i] = (int) Min(floor(sz / scale),
4369 : k_capacity + 1.0);
4370 696 : ++i;
4371 : }
4372 : }
4373 :
4374 : /*
4375 : * Apply knapsack algorithm; compute the set of items which
4376 : * maximizes the value stored (in this case the number of sorts
4377 : * saved) while keeping the total size (approximately) within
4378 : * capacity.
4379 : */
4380 444 : if (i > 0)
4381 444 : hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4382 :
4383 444 : if (!bms_is_empty(hash_items))
4384 : {
4385 444 : rollups = list_make1(linitial(gd->rollups));
4386 :
4387 444 : i = 0;
4388 1140 : for_each_from(lc, gd->rollups, 1)
4389 : {
4390 696 : RollupData *rollup = lfirst_node(RollupData, lc);
4391 :
4392 696 : if (rollup->hashable)
4393 : {
4394 696 : if (bms_is_member(i, hash_items))
4395 660 : hash_sets = list_concat(hash_sets,
4396 660 : rollup->gsets_data);
4397 : else
4398 36 : rollups = lappend(rollups, rollup);
4399 696 : ++i;
4400 : }
4401 : else
4402 0 : rollups = lappend(rollups, rollup);
4403 : }
4404 : }
4405 : }
4406 :
4407 860 : if (!rollups && hash_sets)
4408 24 : rollups = list_copy(gd->rollups);
4409 :
4410 1660 : foreach(lc, hash_sets)
4411 : {
4412 800 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
4413 800 : RollupData *rollup = makeNode(RollupData);
4414 :
4415 : Assert(gs->set != NIL);
4416 :
4417 800 : rollup->groupClause = preprocess_groupclause(root, gs->set);
4418 800 : rollup->gsets_data = list_make1(gs);
4419 800 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4420 : rollup->gsets_data,
4421 : gd->tleref_to_colnum_map);
4422 800 : rollup->numGroups = gs->numGroups;
4423 800 : rollup->hashable = true;
4424 800 : rollup->is_hashed = true;
4425 800 : rollups = lcons(rollup, rollups);
4426 : }
4427 :
4428 860 : if (rollups)
4429 : {
4430 468 : add_path(grouped_rel, (Path *)
4431 468 : create_groupingsets_path(root,
4432 : grouped_rel,
4433 : path,
4434 468 : (List *) parse->havingQual,
4435 : AGG_MIXED,
4436 : rollups,
4437 : agg_costs));
4438 : }
4439 : }
4440 :
4441 : /*
4442 : * Now try the simple sorted case.
4443 : */
4444 938 : if (!gd->unsortable_sets)
4445 908 : add_path(grouped_rel, (Path *)
4446 908 : create_groupingsets_path(root,
4447 : grouped_rel,
4448 : path,
4449 908 : (List *) parse->havingQual,
4450 : AGG_SORTED,
4451 : gd->rollups,
4452 : agg_costs));
4453 : }
4454 :
4455 : /*
4456 : * create_window_paths
4457 : *
4458 : * Build a new upperrel containing Paths for window-function evaluation.
4459 : *
4460 : * input_rel: contains the source-data Paths
4461 : * input_target: result of make_window_input_target
4462 : * output_target: what the topmost WindowAggPath should return
4463 : * wflists: result of find_window_functions
4464 : * activeWindows: result of select_active_windows
4465 : *
4466 : * Note: all Paths in input_rel are expected to return input_target.
4467 : */
4468 : static RelOptInfo *
4469 2378 : create_window_paths(PlannerInfo *root,
4470 : RelOptInfo *input_rel,
4471 : PathTarget *input_target,
4472 : PathTarget *output_target,
4473 : bool output_target_parallel_safe,
4474 : WindowFuncLists *wflists,
4475 : List *activeWindows)
4476 : {
4477 : RelOptInfo *window_rel;
4478 : ListCell *lc;
4479 :
4480 : /* For now, do all work in the (WINDOW, NULL) upperrel */
4481 2378 : window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4482 :
4483 : /*
4484 : * If the input relation is not parallel-safe, then the window relation
4485 : * can't be parallel-safe, either. Otherwise, we need to examine the
4486 : * target list and active windows for non-parallel-safe constructs.
4487 : */
4488 2378 : if (input_rel->consider_parallel && output_target_parallel_safe &&
4489 0 : is_parallel_safe(root, (Node *) activeWindows))
4490 0 : window_rel->consider_parallel = true;
4491 :
4492 : /*
4493 : * If the input rel belongs to a single FDW, so does the window rel.
4494 : */
4495 2378 : window_rel->serverid = input_rel->serverid;
4496 2378 : window_rel->userid = input_rel->userid;
4497 2378 : window_rel->useridiscurrent = input_rel->useridiscurrent;
4498 2378 : window_rel->fdwroutine = input_rel->fdwroutine;
4499 :
4500 : /*
4501 : * Consider computing window functions starting from the existing
4502 : * cheapest-total path (which will likely require a sort) as well as any
4503 : * existing paths that satisfy or partially satisfy root->window_pathkeys.
4504 : */
4505 5078 : foreach(lc, input_rel->pathlist)
4506 : {
4507 2700 : Path *path = (Path *) lfirst(lc);
4508 : int presorted_keys;
4509 :
4510 3022 : if (path == input_rel->cheapest_total_path ||
4511 322 : pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4512 140 : &presorted_keys) ||
4513 140 : presorted_keys > 0)
4514 2586 : create_one_window_path(root,
4515 : window_rel,
4516 : path,
4517 : input_target,
4518 : output_target,
4519 : wflists,
4520 : activeWindows);
4521 : }
4522 :
4523 : /*
4524 : * If there is an FDW that's responsible for all baserels of the query,
4525 : * let it consider adding ForeignPaths.
4526 : */
4527 2378 : if (window_rel->fdwroutine &&
4528 12 : window_rel->fdwroutine->GetForeignUpperPaths)
4529 12 : window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4530 : input_rel, window_rel,
4531 : NULL);
4532 :
4533 : /* Let extensions possibly add some more paths */
4534 2378 : if (create_upper_paths_hook)
4535 0 : (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4536 : input_rel, window_rel, NULL);
4537 :
4538 : /* Now choose the best path(s) */
4539 2378 : set_cheapest(window_rel);
4540 :
4541 2378 : return window_rel;
4542 : }
4543 :
4544 : /*
4545 : * Stack window-function implementation steps atop the given Path, and
4546 : * add the result to window_rel.
4547 : *
4548 : * window_rel: upperrel to contain result
4549 : * path: input Path to use (must return input_target)
4550 : * input_target: result of make_window_input_target
4551 : * output_target: what the topmost WindowAggPath should return
4552 : * wflists: result of find_window_functions
4553 : * activeWindows: result of select_active_windows
4554 : */
4555 : static void
4556 2586 : create_one_window_path(PlannerInfo *root,
4557 : RelOptInfo *window_rel,
4558 : Path *path,
4559 : PathTarget *input_target,
4560 : PathTarget *output_target,
4561 : WindowFuncLists *wflists,
4562 : List *activeWindows)
4563 : {
4564 : PathTarget *window_target;
4565 : ListCell *l;
4566 2586 : List *topqual = NIL;
4567 :
4568 : /*
4569 : * Since each window clause could require a different sort order, we stack
4570 : * up a WindowAgg node for each clause, with sort steps between them as
4571 : * needed. (We assume that select_active_windows chose a good order for
4572 : * executing the clauses in.)
4573 : *
4574 : * input_target should contain all Vars and Aggs needed for the result.
4575 : * (In some cases we wouldn't need to propagate all of these all the way
4576 : * to the top, since they might only be needed as inputs to WindowFuncs.
4577 : * It's probably not worth trying to optimize that though.) It must also
4578 : * contain all window partitioning and sorting expressions, to ensure
4579 : * they're computed only once at the bottom of the stack (that's critical
4580 : * for volatile functions). As we climb up the stack, we'll add outputs
4581 : * for the WindowFuncs computed at each level.
4582 : */
4583 2586 : window_target = input_target;
4584 :
4585 5340 : foreach(l, activeWindows)
4586 : {
4587 2754 : WindowClause *wc = lfirst_node(WindowClause, l);
4588 : List *window_pathkeys;
4589 2754 : List *runcondition = NIL;
4590 : int presorted_keys;
4591 : bool is_sorted;
4592 : bool topwindow;
4593 : ListCell *lc2;
4594 :
4595 2754 : window_pathkeys = make_pathkeys_for_window(root,
4596 : wc,
4597 : root->processed_tlist);
4598 :
4599 2754 : is_sorted = pathkeys_count_contained_in(window_pathkeys,
4600 : path->pathkeys,
4601 : &presorted_keys);
4602 :
4603 : /* Sort if necessary */
4604 2754 : if (!is_sorted)
4605 : {
4606 : /*
4607 : * No presorted keys or incremental sort disabled, just perform a
4608 : * complete sort.
4609 : */
4610 2106 : if (presorted_keys == 0 || !enable_incremental_sort)
4611 2044 : path = (Path *) create_sort_path(root, window_rel,
4612 : path,
4613 : window_pathkeys,
4614 : -1.0);
4615 : else
4616 : {
4617 : /*
4618 : * Since we have presorted keys and incremental sort is
4619 : * enabled, just use incremental sort.
4620 : */
4621 62 : path = (Path *) create_incremental_sort_path(root,
4622 : window_rel,
4623 : path,
4624 : window_pathkeys,
4625 : presorted_keys,
4626 : -1.0);
4627 : }
4628 : }
4629 :
4630 2754 : if (lnext(activeWindows, l))
4631 : {
4632 : /*
4633 : * Add the current WindowFuncs to the output target for this
4634 : * intermediate WindowAggPath. We must copy window_target to
4635 : * avoid changing the previous path's target.
4636 : *
4637 : * Note: a WindowFunc adds nothing to the target's eval costs; but
4638 : * we do need to account for the increase in tlist width.
4639 : */
4640 168 : int64 tuple_width = window_target->width;
4641 :
4642 168 : window_target = copy_pathtarget(window_target);
4643 384 : foreach(lc2, wflists->windowFuncs[wc->winref])
4644 : {
4645 216 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4646 :
4647 216 : add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4648 216 : tuple_width += get_typavgwidth(wfunc->wintype, -1);
4649 : }
4650 168 : window_target->width = clamp_width_est(tuple_width);
4651 : }
4652 : else
4653 : {
4654 : /* Install the goal target in the topmost WindowAgg */
4655 2586 : window_target = output_target;
4656 : }
4657 :
4658 : /* mark the final item in the list as the top-level window */
4659 2754 : topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4660 :
4661 : /*
4662 : * Collect the WindowFuncRunConditions from each WindowFunc and
4663 : * convert them into OpExprs
4664 : */
4665 6246 : foreach(lc2, wflists->windowFuncs[wc->winref])
4666 : {
4667 : ListCell *lc3;
4668 3492 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4669 :
4670 3672 : foreach(lc3, wfunc->runCondition)
4671 : {
4672 180 : WindowFuncRunCondition *wfuncrc =
4673 : lfirst_node(WindowFuncRunCondition, lc3);
4674 : Expr *opexpr;
4675 : Expr *leftop;
4676 : Expr *rightop;
4677 :
4678 180 : if (wfuncrc->wfunc_left)
4679 : {
4680 162 : leftop = (Expr *) copyObject(wfunc);
4681 162 : rightop = copyObject(wfuncrc->arg);
4682 : }
4683 : else
4684 : {
4685 18 : leftop = copyObject(wfuncrc->arg);
4686 18 : rightop = (Expr *) copyObject(wfunc);
4687 : }
4688 :
4689 180 : opexpr = make_opclause(wfuncrc->opno,
4690 : BOOLOID,
4691 : false,
4692 : leftop,
4693 : rightop,
4694 : InvalidOid,
4695 : wfuncrc->inputcollid);
4696 :
4697 180 : runcondition = lappend(runcondition, opexpr);
4698 :
4699 180 : if (!topwindow)
4700 24 : topqual = lappend(topqual, opexpr);
4701 : }
4702 : }
4703 :
4704 : path = (Path *)
4705 2754 : create_windowagg_path(root, window_rel, path, window_target,
4706 2754 : wflists->windowFuncs[wc->winref],
4707 : runcondition, wc,
4708 : topwindow ? topqual : NIL, topwindow);
4709 : }
4710 :
4711 2586 : add_path(window_rel, path);
4712 2586 : }
4713 :
4714 : /*
4715 : * create_distinct_paths
4716 : *
4717 : * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
4718 : *
4719 : * input_rel: contains the source-data Paths
4720 : * target: the pathtarget for the result Paths to compute
4721 : *
4722 : * Note: input paths should already compute the desired pathtarget, since
4723 : * Sort/Unique won't project anything.
4724 : */
4725 : static RelOptInfo *
4726 2714 : create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4727 : PathTarget *target)
4728 : {
4729 : RelOptInfo *distinct_rel;
4730 :
4731 : /* For now, do all work in the (DISTINCT, NULL) upperrel */
4732 2714 : distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4733 :
4734 : /*
4735 : * We don't compute anything at this level, so distinct_rel will be
4736 : * parallel-safe if the input rel is parallel-safe. In particular, if
4737 : * there is a DISTINCT ON (...) clause, any path for the input_rel will
4738 : * output those expressions, and will not be parallel-safe unless those
4739 : * expressions are parallel-safe.
4740 : */
4741 2714 : distinct_rel->consider_parallel = input_rel->consider_parallel;
4742 :
4743 : /*
4744 : * If the input rel belongs to a single FDW, so does the distinct_rel.
4745 : */
4746 2714 : distinct_rel->serverid = input_rel->serverid;
4747 2714 : distinct_rel->userid = input_rel->userid;
4748 2714 : distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4749 2714 : distinct_rel->fdwroutine = input_rel->fdwroutine;
4750 :
4751 : /* build distinct paths based on input_rel's pathlist */
4752 2714 : create_final_distinct_paths(root, input_rel, distinct_rel);
4753 :
4754 : /* now build distinct paths based on input_rel's partial_pathlist */
4755 2714 : create_partial_distinct_paths(root, input_rel, distinct_rel, target);
4756 :
4757 : /* Give a helpful error if we failed to create any paths */
4758 2714 : if (distinct_rel->pathlist == NIL)
4759 0 : ereport(ERROR,
4760 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4761 : errmsg("could not implement DISTINCT"),
4762 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4763 :
4764 : /*
4765 : * If there is an FDW that's responsible for all baserels of the query,
4766 : * let it consider adding ForeignPaths.
4767 : */
4768 2714 : if (distinct_rel->fdwroutine &&
4769 16 : distinct_rel->fdwroutine->GetForeignUpperPaths)
4770 16 : distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4771 : UPPERREL_DISTINCT,
4772 : input_rel,
4773 : distinct_rel,
4774 : NULL);
4775 :
4776 : /* Let extensions possibly add some more paths */
4777 2714 : if (create_upper_paths_hook)
4778 0 : (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4779 : distinct_rel, NULL);
4780 :
4781 : /* Now choose the best path(s) */
4782 2714 : set_cheapest(distinct_rel);
4783 :
4784 2714 : return distinct_rel;
4785 : }
4786 :
4787 : /*
4788 : * create_partial_distinct_paths
4789 : *
4790 : * Process 'input_rel' partial paths and add unique/aggregate paths to the
4791 : * UPPERREL_PARTIAL_DISTINCT rel. For paths created, add Gather/GatherMerge
4792 : * paths on top and add a final unique/aggregate path to remove any duplicate
4793 : * produced from combining rows from parallel workers.
4794 : */
4795 : static void
4796 2714 : create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4797 : RelOptInfo *final_distinct_rel,
4798 : PathTarget *target)
4799 : {
4800 : RelOptInfo *partial_distinct_rel;
4801 : Query *parse;
4802 : List *distinctExprs;
4803 : double numDistinctRows;
4804 : Path *cheapest_partial_path;
4805 : ListCell *lc;
4806 :
4807 : /* nothing to do when there are no partial paths in the input rel */
4808 2714 : if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4809 2606 : return;
4810 :
4811 108 : parse = root->parse;
4812 :
4813 : /* can't do parallel DISTINCT ON */
4814 108 : if (parse->hasDistinctOn)
4815 0 : return;
4816 :
4817 108 : partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4818 : NULL);
4819 108 : partial_distinct_rel->reltarget = target;
4820 108 : partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4821 :
4822 : /*
4823 : * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4824 : */
4825 108 : partial_distinct_rel->serverid = input_rel->serverid;
4826 108 : partial_distinct_rel->userid = input_rel->userid;
4827 108 : partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4828 108 : partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4829 :
4830 108 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
4831 :
4832 108 : distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4833 : parse->targetList);
4834 :
4835 : /* estimate how many distinct rows we'll get from each worker */
4836 108 : numDistinctRows = estimate_num_groups(root, distinctExprs,
4837 : cheapest_partial_path->rows,
4838 : NULL, NULL);
4839 :
4840 : /*
4841 : * Try sorting the cheapest path and incrementally sorting any paths with
4842 : * presorted keys and put a unique paths atop of those. We'll also
4843 : * attempt to reorder the required pathkeys to match the input path's
4844 : * pathkeys as much as possible, in hopes of avoiding a possible need to
4845 : * re-sort.
4846 : */
4847 108 : if (grouping_is_sortable(root->processed_distinctClause))
4848 : {
4849 234 : foreach(lc, input_rel->partial_pathlist)
4850 : {
4851 126 : Path *input_path = (Path *) lfirst(lc);
4852 : Path *sorted_path;
4853 126 : List *useful_pathkeys_list = NIL;
4854 :
4855 : useful_pathkeys_list =
4856 126 : get_useful_pathkeys_for_distinct(root,
4857 : root->distinct_pathkeys,
4858 : input_path->pathkeys);
4859 : Assert(list_length(useful_pathkeys_list) > 0);
4860 :
4861 390 : foreach_node(List, useful_pathkeys, useful_pathkeys_list)
4862 : {
4863 138 : sorted_path = make_ordered_path(root,
4864 : partial_distinct_rel,
4865 : input_path,
4866 : cheapest_partial_path,
4867 : useful_pathkeys,
4868 : -1.0);
4869 :
4870 138 : if (sorted_path == NULL)
4871 12 : continue;
4872 :
4873 : /*
4874 : * An empty distinct_pathkeys means all tuples have the same
4875 : * value for the DISTINCT clause. See
4876 : * create_final_distinct_paths()
4877 : */
4878 126 : if (root->distinct_pathkeys == NIL)
4879 : {
4880 : Node *limitCount;
4881 :
4882 6 : limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
4883 : sizeof(int64),
4884 : Int64GetDatum(1), false,
4885 : FLOAT8PASSBYVAL);
4886 :
4887 : /*
4888 : * Apply a LimitPath onto the partial path to restrict the
4889 : * tuples from each worker to 1.
4890 : * create_final_distinct_paths will need to apply an
4891 : * additional LimitPath to restrict this to a single row
4892 : * after the Gather node. If the query already has a
4893 : * LIMIT clause, then we could end up with three Limit
4894 : * nodes in the final plan. Consolidating the top two of
4895 : * these could be done, but does not seem worth troubling
4896 : * over.
4897 : */
4898 6 : add_partial_path(partial_distinct_rel, (Path *)
4899 6 : create_limit_path(root, partial_distinct_rel,
4900 : sorted_path,
4901 : NULL,
4902 : limitCount,
4903 : LIMIT_OPTION_COUNT,
4904 : 0, 1));
4905 : }
4906 : else
4907 : {
4908 120 : add_partial_path(partial_distinct_rel, (Path *)
4909 120 : create_upper_unique_path(root, partial_distinct_rel,
4910 : sorted_path,
4911 120 : list_length(root->distinct_pathkeys),
4912 : numDistinctRows));
4913 : }
4914 : }
4915 : }
4916 : }
4917 :
4918 : /*
4919 : * Now try hash aggregate paths, if enabled and hashing is possible. Since
4920 : * we're not on the hook to ensure we do our best to create at least one
4921 : * path here, we treat enable_hashagg as a hard off-switch rather than the
4922 : * slightly softer variant in create_final_distinct_paths.
4923 : */
4924 108 : if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
4925 : {
4926 78 : add_partial_path(partial_distinct_rel, (Path *)
4927 78 : create_agg_path(root,
4928 : partial_distinct_rel,
4929 : cheapest_partial_path,
4930 : cheapest_partial_path->pathtarget,
4931 : AGG_HASHED,
4932 : AGGSPLIT_SIMPLE,
4933 : root->processed_distinctClause,
4934 : NIL,
4935 : NULL,
4936 : numDistinctRows));
4937 : }
4938 :
4939 : /*
4940 : * If there is an FDW that's responsible for all baserels of the query,
4941 : * let it consider adding ForeignPaths.
4942 : */
4943 108 : if (partial_distinct_rel->fdwroutine &&
4944 0 : partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
4945 0 : partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4946 : UPPERREL_PARTIAL_DISTINCT,
4947 : input_rel,
4948 : partial_distinct_rel,
4949 : NULL);
4950 :
4951 : /* Let extensions possibly add some more partial paths */
4952 108 : if (create_upper_paths_hook)
4953 0 : (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
4954 : input_rel, partial_distinct_rel, NULL);
4955 :
4956 108 : if (partial_distinct_rel->partial_pathlist != NIL)
4957 : {
4958 108 : generate_useful_gather_paths(root, partial_distinct_rel, true);
4959 108 : set_cheapest(partial_distinct_rel);
4960 :
4961 : /*
4962 : * Finally, create paths to distinctify the final result. This step
4963 : * is needed to remove any duplicates due to combining rows from
4964 : * parallel workers.
4965 : */
4966 108 : create_final_distinct_paths(root, partial_distinct_rel,
4967 : final_distinct_rel);
4968 : }
4969 : }
4970 :
4971 : /*
4972 : * create_final_distinct_paths
4973 : * Create distinct paths in 'distinct_rel' based on 'input_rel' pathlist
4974 : *
4975 : * input_rel: contains the source-data paths
4976 : * distinct_rel: destination relation for storing created paths
4977 : */
4978 : static RelOptInfo *
4979 2822 : create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4980 : RelOptInfo *distinct_rel)
4981 : {
4982 2822 : Query *parse = root->parse;
4983 2822 : Path *cheapest_input_path = input_rel->cheapest_total_path;
4984 : double numDistinctRows;
4985 : bool allow_hash;
4986 :
4987 : /* Estimate number of distinct rows there will be */
4988 2822 : if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
4989 2748 : root->hasHavingQual)
4990 : {
4991 : /*
4992 : * If there was grouping or aggregation, use the number of input rows
4993 : * as the estimated number of DISTINCT rows (ie, assume the input is
4994 : * already mostly unique).
4995 : */
4996 74 : numDistinctRows = cheapest_input_path->rows;
4997 : }
4998 : else
4999 : {
5000 : /*
5001 : * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
5002 : */
5003 : List *distinctExprs;
5004 :
5005 2748 : distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
5006 : parse->targetList);
5007 2748 : numDistinctRows = estimate_num_groups(root, distinctExprs,
5008 : cheapest_input_path->rows,
5009 : NULL, NULL);
5010 : }
5011 :
5012 : /*
5013 : * Consider sort-based implementations of DISTINCT, if possible.
5014 : */
5015 2822 : if (grouping_is_sortable(root->processed_distinctClause))
5016 : {
5017 : /*
5018 : * Firstly, if we have any adequately-presorted paths, just stick a
5019 : * Unique node on those. We also, consider doing an explicit sort of
5020 : * the cheapest input path and Unique'ing that. If any paths have
5021 : * presorted keys then we'll create an incremental sort atop of those
5022 : * before adding a unique node on the top. We'll also attempt to
5023 : * reorder the required pathkeys to match the input path's pathkeys as
5024 : * much as possible, in hopes of avoiding a possible need to re-sort.
5025 : *
5026 : * When we have DISTINCT ON, we must sort by the more rigorous of
5027 : * DISTINCT and ORDER BY, else it won't have the desired behavior.
5028 : * Also, if we do have to do an explicit sort, we might as well use
5029 : * the more rigorous ordering to avoid a second sort later. (Note
5030 : * that the parser will have ensured that one clause is a prefix of
5031 : * the other.)
5032 : */
5033 : List *needed_pathkeys;
5034 : ListCell *lc;
5035 2816 : double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
5036 :
5037 3064 : if (parse->hasDistinctOn &&
5038 248 : list_length(root->distinct_pathkeys) <
5039 248 : list_length(root->sort_pathkeys))
5040 54 : needed_pathkeys = root->sort_pathkeys;
5041 : else
5042 2762 : needed_pathkeys = root->distinct_pathkeys;
5043 :
5044 7246 : foreach(lc, input_rel->pathlist)
5045 : {
5046 4430 : Path *input_path = (Path *) lfirst(lc);
5047 : Path *sorted_path;
5048 4430 : List *useful_pathkeys_list = NIL;
5049 :
5050 : useful_pathkeys_list =
5051 4430 : get_useful_pathkeys_for_distinct(root,
5052 : needed_pathkeys,
5053 : input_path->pathkeys);
5054 : Assert(list_length(useful_pathkeys_list) > 0);
5055 :
5056 13782 : foreach_node(List, useful_pathkeys, useful_pathkeys_list)
5057 : {
5058 4922 : sorted_path = make_ordered_path(root,
5059 : distinct_rel,
5060 : input_path,
5061 : cheapest_input_path,
5062 : useful_pathkeys,
5063 : limittuples);
5064 :
5065 4922 : if (sorted_path == NULL)
5066 550 : continue;
5067 :
5068 : /*
5069 : * distinct_pathkeys may have become empty if all of the
5070 : * pathkeys were determined to be redundant. If all of the
5071 : * pathkeys are redundant then each DISTINCT target must only
5072 : * allow a single value, therefore all resulting tuples must
5073 : * be identical (or at least indistinguishable by an equality
5074 : * check). We can uniquify these tuples simply by just taking
5075 : * the first tuple. All we do here is add a path to do "LIMIT
5076 : * 1" atop of 'sorted_path'. When doing a DISTINCT ON we may
5077 : * still have a non-NIL sort_pathkeys list, so we must still
5078 : * only do this with paths which are correctly sorted by
5079 : * sort_pathkeys.
5080 : */
5081 4372 : if (root->distinct_pathkeys == NIL)
5082 : {
5083 : Node *limitCount;
5084 :
5085 116 : limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5086 : sizeof(int64),
5087 : Int64GetDatum(1), false,
5088 : FLOAT8PASSBYVAL);
5089 :
5090 : /*
5091 : * If the query already has a LIMIT clause, then we could
5092 : * end up with a duplicate LimitPath in the final plan.
5093 : * That does not seem worth troubling over too much.
5094 : */
5095 116 : add_path(distinct_rel, (Path *)
5096 116 : create_limit_path(root, distinct_rel, sorted_path,
5097 : NULL, limitCount,
5098 : LIMIT_OPTION_COUNT, 0, 1));
5099 : }
5100 : else
5101 : {
5102 4256 : add_path(distinct_rel, (Path *)
5103 4256 : create_upper_unique_path(root, distinct_rel,
5104 : sorted_path,
5105 4256 : list_length(root->distinct_pathkeys),
5106 : numDistinctRows));
5107 : }
5108 : }
5109 : }
5110 : }
5111 :
5112 : /*
5113 : * Consider hash-based implementations of DISTINCT, if possible.
5114 : *
5115 : * If we were not able to make any other types of path, we *must* hash or
5116 : * die trying. If we do have other choices, there are two things that
5117 : * should prevent selection of hashing: if the query uses DISTINCT ON
5118 : * (because it won't really have the expected behavior if we hash), or if
5119 : * enable_hashagg is off.
5120 : *
5121 : * Note: grouping_is_hashable() is much more expensive to check than the
5122 : * other gating conditions, so we want to do it last.
5123 : */
5124 2822 : if (distinct_rel->pathlist == NIL)
5125 6 : allow_hash = true; /* we have no alternatives */
5126 2816 : else if (parse->hasDistinctOn || !enable_hashagg)
5127 398 : allow_hash = false; /* policy-based decision not to hash */
5128 : else
5129 2418 : allow_hash = true; /* default */
5130 :
5131 2822 : if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5132 : {
5133 : /* Generate hashed aggregate path --- no sort needed */
5134 2424 : add_path(distinct_rel, (Path *)
5135 2424 : create_agg_path(root,
5136 : distinct_rel,
5137 : cheapest_input_path,
5138 : cheapest_input_path->pathtarget,
5139 : AGG_HASHED,
5140 : AGGSPLIT_SIMPLE,
5141 : root->processed_distinctClause,
5142 : NIL,
5143 : NULL,
5144 : numDistinctRows));
5145 : }
5146 :
5147 2822 : return distinct_rel;
5148 : }
5149 :
5150 : /*
5151 : * get_useful_pathkeys_for_distinct
5152 : * Get useful orderings of pathkeys for distinctClause by reordering
5153 : * 'needed_pathkeys' to match the given 'path_pathkeys' as much as possible.
5154 : *
5155 : * This returns a list of pathkeys that can be useful for DISTINCT or DISTINCT
5156 : * ON clause. For convenience, it always includes the given 'needed_pathkeys'.
5157 : */
5158 : static List *
5159 4556 : get_useful_pathkeys_for_distinct(PlannerInfo *root, List *needed_pathkeys,
5160 : List *path_pathkeys)
5161 : {
5162 4556 : List *useful_pathkeys_list = NIL;
5163 4556 : List *useful_pathkeys = NIL;
5164 :
5165 : /* always include the given 'needed_pathkeys' */
5166 4556 : useful_pathkeys_list = lappend(useful_pathkeys_list,
5167 : needed_pathkeys);
5168 :
5169 4556 : if (!enable_distinct_reordering)
5170 0 : return useful_pathkeys_list;
5171 :
5172 : /*
5173 : * Scan the given 'path_pathkeys' and construct a list of PathKey nodes
5174 : * that match 'needed_pathkeys', but only up to the longest matching
5175 : * prefix.
5176 : *
5177 : * When we have DISTINCT ON, we must ensure that the resulting pathkey
5178 : * list matches initial distinctClause pathkeys; otherwise, it won't have
5179 : * the desired behavior.
5180 : */
5181 11130 : foreach_node(PathKey, pathkey, path_pathkeys)
5182 : {
5183 : /*
5184 : * The PathKey nodes are canonical, so they can be checked for
5185 : * equality by simple pointer comparison.
5186 : */
5187 2046 : if (!list_member_ptr(needed_pathkeys, pathkey))
5188 10 : break;
5189 2036 : if (root->parse->hasDistinctOn &&
5190 200 : !list_member_ptr(root->distinct_pathkeys, pathkey))
5191 18 : break;
5192 :
5193 2018 : useful_pathkeys = lappend(useful_pathkeys, pathkey);
5194 : }
5195 :
5196 : /* If no match at all, no point in reordering needed_pathkeys */
5197 4556 : if (useful_pathkeys == NIL)
5198 2802 : return useful_pathkeys_list;
5199 :
5200 : /*
5201 : * If not full match, the resulting pathkey list is not useful without
5202 : * incremental sort.
5203 : */
5204 1754 : if (list_length(useful_pathkeys) < list_length(needed_pathkeys) &&
5205 904 : !enable_incremental_sort)
5206 60 : return useful_pathkeys_list;
5207 :
5208 : /* Append the remaining PathKey nodes in needed_pathkeys */
5209 1694 : useful_pathkeys = list_concat_unique_ptr(useful_pathkeys,
5210 : needed_pathkeys);
5211 :
5212 : /*
5213 : * If the resulting pathkey list is the same as the 'needed_pathkeys',
5214 : * just drop it.
5215 : */
5216 1694 : if (compare_pathkeys(needed_pathkeys,
5217 : useful_pathkeys) == PATHKEYS_EQUAL)
5218 1190 : return useful_pathkeys_list;
5219 :
5220 504 : useful_pathkeys_list = lappend(useful_pathkeys_list,
5221 : useful_pathkeys);
5222 :
5223 504 : return useful_pathkeys_list;
5224 : }
5225 :
5226 : /*
5227 : * create_ordered_paths
5228 : *
5229 : * Build a new upperrel containing Paths for ORDER BY evaluation.
5230 : *
5231 : * All paths in the result must satisfy the ORDER BY ordering.
5232 : * The only new paths we need consider are an explicit full sort
5233 : * and incremental sort on the cheapest-total existing path.
5234 : *
5235 : * input_rel: contains the source-data Paths
5236 : * target: the output tlist the result Paths must emit
5237 : * limit_tuples: estimated bound on the number of output tuples,
5238 : * or -1 if no LIMIT or couldn't estimate
5239 : *
5240 : * XXX This only looks at sort_pathkeys. I wonder if it needs to look at the
5241 : * other pathkeys (grouping, ...) like generate_useful_gather_paths.
5242 : */
5243 : static RelOptInfo *
5244 76172 : create_ordered_paths(PlannerInfo *root,
5245 : RelOptInfo *input_rel,
5246 : PathTarget *target,
5247 : bool target_parallel_safe,
5248 : double limit_tuples)
5249 : {
5250 76172 : Path *cheapest_input_path = input_rel->cheapest_total_path;
5251 : RelOptInfo *ordered_rel;
5252 : ListCell *lc;
5253 :
5254 : /* For now, do all work in the (ORDERED, NULL) upperrel */
5255 76172 : ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5256 :
5257 : /*
5258 : * If the input relation is not parallel-safe, then the ordered relation
5259 : * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5260 : * target list is parallel-safe.
5261 : */
5262 76172 : if (input_rel->consider_parallel && target_parallel_safe)
5263 52888 : ordered_rel->consider_parallel = true;
5264 :
5265 : /*
5266 : * If the input rel belongs to a single FDW, so does the ordered_rel.
5267 : */
5268 76172 : ordered_rel->serverid = input_rel->serverid;
5269 76172 : ordered_rel->userid = input_rel->userid;
5270 76172 : ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5271 76172 : ordered_rel->fdwroutine = input_rel->fdwroutine;
5272 :
5273 192526 : foreach(lc, input_rel->pathlist)
5274 : {
5275 116354 : Path *input_path = (Path *) lfirst(lc);
5276 : Path *sorted_path;
5277 : bool is_sorted;
5278 : int presorted_keys;
5279 :
5280 116354 : is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5281 : input_path->pathkeys, &presorted_keys);
5282 :
5283 116354 : if (is_sorted)
5284 43042 : sorted_path = input_path;
5285 : else
5286 : {
5287 : /*
5288 : * Try at least sorting the cheapest path and also try
5289 : * incrementally sorting any path which is partially sorted
5290 : * already (no need to deal with paths which have presorted keys
5291 : * when incremental sort is disabled unless it's the cheapest
5292 : * input path).
5293 : */
5294 73312 : if (input_path != cheapest_input_path &&
5295 6342 : (presorted_keys == 0 || !enable_incremental_sort))
5296 1870 : continue;
5297 :
5298 : /*
5299 : * We've no need to consider both a sort and incremental sort.
5300 : * We'll just do a sort if there are no presorted keys and an
5301 : * incremental sort when there are presorted keys.
5302 : */
5303 71442 : if (presorted_keys == 0 || !enable_incremental_sort)
5304 66362 : sorted_path = (Path *) create_sort_path(root,
5305 : ordered_rel,
5306 : input_path,
5307 : root->sort_pathkeys,
5308 : limit_tuples);
5309 : else
5310 5080 : sorted_path = (Path *) create_incremental_sort_path(root,
5311 : ordered_rel,
5312 : input_path,
5313 : root->sort_pathkeys,
5314 : presorted_keys,
5315 : limit_tuples);
5316 : }
5317 :
5318 : /*
5319 : * If the pathtarget of the result path has different expressions from
5320 : * the target to be applied, a projection step is needed.
5321 : */
5322 114484 : if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5323 294 : sorted_path = apply_projection_to_path(root, ordered_rel,
5324 : sorted_path, target);
5325 :
5326 114484 : add_path(ordered_rel, sorted_path);
5327 : }
5328 :
5329 : /*
5330 : * generate_gather_paths() will have already generated a simple Gather
5331 : * path for the best parallel path, if any, and the loop above will have
5332 : * considered sorting it. Similarly, generate_gather_paths() will also
5333 : * have generated order-preserving Gather Merge plans which can be used
5334 : * without sorting if they happen to match the sort_pathkeys, and the loop
5335 : * above will have handled those as well. However, there's one more
5336 : * possibility: it may make sense to sort the cheapest partial path or
5337 : * incrementally sort any partial path that is partially sorted according
5338 : * to the required output order and then use Gather Merge.
5339 : */
5340 76172 : if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5341 52684 : input_rel->partial_pathlist != NIL)
5342 : {
5343 : Path *cheapest_partial_path;
5344 :
5345 2228 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
5346 :
5347 4662 : foreach(lc, input_rel->partial_pathlist)
5348 : {
5349 2434 : Path *input_path = (Path *) lfirst(lc);
5350 : Path *sorted_path;
5351 : bool is_sorted;
5352 : int presorted_keys;
5353 : double total_groups;
5354 :
5355 2434 : is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5356 : input_path->pathkeys,
5357 : &presorted_keys);
5358 :
5359 2434 : if (is_sorted)
5360 182 : continue;
5361 :
5362 : /*
5363 : * Try at least sorting the cheapest path and also try
5364 : * incrementally sorting any path which is partially sorted
5365 : * already (no need to deal with paths which have presorted keys
5366 : * when incremental sort is disabled unless it's the cheapest
5367 : * partial path).
5368 : */
5369 2252 : if (input_path != cheapest_partial_path &&
5370 42 : (presorted_keys == 0 || !enable_incremental_sort))
5371 0 : continue;
5372 :
5373 : /*
5374 : * We've no need to consider both a sort and incremental sort.
5375 : * We'll just do a sort if there are no presorted keys and an
5376 : * incremental sort when there are presorted keys.
5377 : */
5378 2252 : if (presorted_keys == 0 || !enable_incremental_sort)
5379 2192 : sorted_path = (Path *) create_sort_path(root,
5380 : ordered_rel,
5381 : input_path,
5382 : root->sort_pathkeys,
5383 : limit_tuples);
5384 : else
5385 60 : sorted_path = (Path *) create_incremental_sort_path(root,
5386 : ordered_rel,
5387 : input_path,
5388 : root->sort_pathkeys,
5389 : presorted_keys,
5390 : limit_tuples);
5391 2252 : total_groups = compute_gather_rows(sorted_path);
5392 : sorted_path = (Path *)
5393 2252 : create_gather_merge_path(root, ordered_rel,
5394 : sorted_path,
5395 : sorted_path->pathtarget,
5396 : root->sort_pathkeys, NULL,
5397 : &total_groups);
5398 :
5399 : /*
5400 : * If the pathtarget of the result path has different expressions
5401 : * from the target to be applied, a projection step is needed.
5402 : */
5403 2252 : if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5404 6 : sorted_path = apply_projection_to_path(root, ordered_rel,
5405 : sorted_path, target);
5406 :
5407 2252 : add_path(ordered_rel, sorted_path);
5408 : }
5409 : }
5410 :
5411 : /*
5412 : * If there is an FDW that's responsible for all baserels of the query,
5413 : * let it consider adding ForeignPaths.
5414 : */
5415 76172 : if (ordered_rel->fdwroutine &&
5416 384 : ordered_rel->fdwroutine->GetForeignUpperPaths)
5417 370 : ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5418 : input_rel, ordered_rel,
5419 : NULL);
5420 :
5421 : /* Let extensions possibly add some more paths */
5422 76172 : if (create_upper_paths_hook)
5423 0 : (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5424 : input_rel, ordered_rel, NULL);
5425 :
5426 : /*
5427 : * No need to bother with set_cheapest here; grouping_planner does not
5428 : * need us to do it.
5429 : */
5430 : Assert(ordered_rel->pathlist != NIL);
5431 :
5432 76172 : return ordered_rel;
5433 : }
5434 :
5435 :
5436 : /*
5437 : * make_group_input_target
5438 : * Generate appropriate PathTarget for initial input to grouping nodes.
5439 : *
5440 : * If there is grouping or aggregation, the scan/join subplan cannot emit
5441 : * the query's final targetlist; for example, it certainly can't emit any
5442 : * aggregate function calls. This routine generates the correct target
5443 : * for the scan/join subplan.
5444 : *
5445 : * The query target list passed from the parser already contains entries
5446 : * for all ORDER BY and GROUP BY expressions, but it will not have entries
5447 : * for variables used only in HAVING clauses; so we need to add those
5448 : * variables to the subplan target list. Also, we flatten all expressions
5449 : * except GROUP BY items into their component variables; other expressions
5450 : * will be computed by the upper plan nodes rather than by the subplan.
5451 : * For example, given a query like
5452 : * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
5453 : * we want to pass this targetlist to the subplan:
5454 : * a+b,c,d
5455 : * where the a+b target will be used by the Sort/Group steps, and the
5456 : * other targets will be used for computing the final results.
5457 : *
5458 : * 'final_target' is the query's final target list (in PathTarget form)
5459 : *
5460 : * The result is the PathTarget to be computed by the Paths returned from
5461 : * query_planner().
5462 : */
5463 : static PathTarget *
5464 39136 : make_group_input_target(PlannerInfo *root, PathTarget *final_target)
5465 : {
5466 39136 : Query *parse = root->parse;
5467 : PathTarget *input_target;
5468 : List *non_group_cols;
5469 : List *non_group_vars;
5470 : int i;
5471 : ListCell *lc;
5472 :
5473 : /*
5474 : * We must build a target containing all grouping columns, plus any other
5475 : * Vars mentioned in the query's targetlist and HAVING qual.
5476 : */
5477 39136 : input_target = create_empty_pathtarget();
5478 39136 : non_group_cols = NIL;
5479 :
5480 39136 : i = 0;
5481 95262 : foreach(lc, final_target->exprs)
5482 : {
5483 56126 : Expr *expr = (Expr *) lfirst(lc);
5484 56126 : Index sgref = get_pathtarget_sortgroupref(final_target, i);
5485 :
5486 64890 : if (sgref && root->processed_groupClause &&
5487 8764 : get_sortgroupref_clause_noerr(sgref,
5488 : root->processed_groupClause) != NULL)
5489 : {
5490 : /*
5491 : * It's a grouping column, so add it to the input target as-is.
5492 : *
5493 : * Note that the target is logically below the grouping step. So
5494 : * with grouping sets we need to remove the RT index of the
5495 : * grouping step if there is any from the target expression.
5496 : */
5497 7020 : if (parse->hasGroupRTE && parse->groupingSets != NIL)
5498 : {
5499 : Assert(root->group_rtindex > 0);
5500 : expr = (Expr *)
5501 1836 : remove_nulling_relids((Node *) expr,
5502 1836 : bms_make_singleton(root->group_rtindex),
5503 : NULL);
5504 : }
5505 7020 : add_column_to_pathtarget(input_target, expr, sgref);
5506 : }
5507 : else
5508 : {
5509 : /*
5510 : * Non-grouping column, so just remember the expression for later
5511 : * call to pull_var_clause.
5512 : */
5513 49106 : non_group_cols = lappend(non_group_cols, expr);
5514 : }
5515 :
5516 56126 : i++;
5517 : }
5518 :
5519 : /*
5520 : * If there's a HAVING clause, we'll need the Vars it uses, too.
5521 : */
5522 39136 : if (parse->havingQual)
5523 880 : non_group_cols = lappend(non_group_cols, parse->havingQual);
5524 :
5525 : /*
5526 : * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5527 : * add them to the input target if not already present. (A Var used
5528 : * directly as a GROUP BY item will be present already.) Note this
5529 : * includes Vars used in resjunk items, so we are covering the needs of
5530 : * ORDER BY and window specifications. Vars used within Aggrefs and
5531 : * WindowFuncs will be pulled out here, too.
5532 : *
5533 : * Note that the target is logically below the grouping step. So with
5534 : * grouping sets we need to remove the RT index of the grouping step if
5535 : * there is any from the non-group Vars.
5536 : */
5537 39136 : non_group_vars = pull_var_clause((Node *) non_group_cols,
5538 : PVC_RECURSE_AGGREGATES |
5539 : PVC_RECURSE_WINDOWFUNCS |
5540 : PVC_INCLUDE_PLACEHOLDERS);
5541 39136 : if (parse->hasGroupRTE && parse->groupingSets != NIL)
5542 : {
5543 : Assert(root->group_rtindex > 0);
5544 : non_group_vars = (List *)
5545 830 : remove_nulling_relids((Node *) non_group_vars,
5546 830 : bms_make_singleton(root->group_rtindex),
5547 : NULL);
5548 : }
5549 39136 : add_new_columns_to_pathtarget(input_target, non_group_vars);
5550 :
5551 : /* clean up cruft */
5552 39136 : list_free(non_group_vars);
5553 39136 : list_free(non_group_cols);
5554 :
5555 : /* XXX this causes some redundant cost calculation ... */
5556 39136 : return set_pathtarget_cost_width(root, input_target);
5557 : }
5558 :
5559 : /*
5560 : * make_partial_grouping_target
5561 : * Generate appropriate PathTarget for output of partial aggregate
5562 : * (or partial grouping, if there are no aggregates) nodes.
5563 : *
5564 : * A partial aggregation node needs to emit all the same aggregates that
5565 : * a regular aggregation node would, plus any aggregates used in HAVING;
5566 : * except that the Aggref nodes should be marked as partial aggregates.
5567 : *
5568 : * In addition, we'd better emit any Vars and PlaceHolderVars that are
5569 : * used outside of Aggrefs in the aggregation tlist and HAVING. (Presumably,
5570 : * these would be Vars that are grouped by or used in grouping expressions.)
5571 : *
5572 : * grouping_target is the tlist to be emitted by the topmost aggregation step.
5573 : * havingQual represents the HAVING clause.
5574 : */
5575 : static PathTarget *
5576 2200 : make_partial_grouping_target(PlannerInfo *root,
5577 : PathTarget *grouping_target,
5578 : Node *havingQual)
5579 : {
5580 : PathTarget *partial_target;
5581 : List *non_group_cols;
5582 : List *non_group_exprs;
5583 : int i;
5584 : ListCell *lc;
5585 :
5586 2200 : partial_target = create_empty_pathtarget();
5587 2200 : non_group_cols = NIL;
5588 :
5589 2200 : i = 0;
5590 7818 : foreach(lc, grouping_target->exprs)
5591 : {
5592 5618 : Expr *expr = (Expr *) lfirst(lc);
5593 5618 : Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5594 :
5595 9438 : if (sgref && root->processed_groupClause &&
5596 3820 : get_sortgroupref_clause_noerr(sgref,
5597 : root->processed_groupClause) != NULL)
5598 : {
5599 : /*
5600 : * It's a grouping column, so add it to the partial_target as-is.
5601 : * (This allows the upper agg step to repeat the grouping calcs.)
5602 : */
5603 1906 : add_column_to_pathtarget(partial_target, expr, sgref);
5604 : }
5605 : else
5606 : {
5607 : /*
5608 : * Non-grouping column, so just remember the expression for later
5609 : * call to pull_var_clause.
5610 : */
5611 3712 : non_group_cols = lappend(non_group_cols, expr);
5612 : }
5613 :
5614 5618 : i++;
5615 : }
5616 :
5617 : /*
5618 : * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5619 : */
5620 2200 : if (havingQual)
5621 824 : non_group_cols = lappend(non_group_cols, havingQual);
5622 :
5623 : /*
5624 : * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5625 : * non-group cols (plus HAVING), and add them to the partial_target if not
5626 : * already present. (An expression used directly as a GROUP BY item will
5627 : * be present already.) Note this includes Vars used in resjunk items, so
5628 : * we are covering the needs of ORDER BY and window specifications.
5629 : */
5630 2200 : non_group_exprs = pull_var_clause((Node *) non_group_cols,
5631 : PVC_INCLUDE_AGGREGATES |
5632 : PVC_RECURSE_WINDOWFUNCS |
5633 : PVC_INCLUDE_PLACEHOLDERS);
5634 :
5635 2200 : add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5636 :
5637 : /*
5638 : * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5639 : * are at the top level of the target list, so we can just scan the list
5640 : * rather than recursing through the expression trees.
5641 : */
5642 8390 : foreach(lc, partial_target->exprs)
5643 : {
5644 6190 : Aggref *aggref = (Aggref *) lfirst(lc);
5645 :
5646 6190 : if (IsA(aggref, Aggref))
5647 : {
5648 : Aggref *newaggref;
5649 :
5650 : /*
5651 : * We shouldn't need to copy the substructure of the Aggref node,
5652 : * but flat-copy the node itself to avoid damaging other trees.
5653 : */
5654 4254 : newaggref = makeNode(Aggref);
5655 4254 : memcpy(newaggref, aggref, sizeof(Aggref));
5656 :
5657 : /* For now, assume serialization is required */
5658 4254 : mark_partial_aggref(newaggref, AGGSPLIT_INITIAL_SERIAL);
5659 :
5660 4254 : lfirst(lc) = newaggref;
5661 : }
5662 : }
5663 :
5664 : /* clean up cruft */
5665 2200 : list_free(non_group_exprs);
5666 2200 : list_free(non_group_cols);
5667 :
5668 : /* XXX this causes some redundant cost calculation ... */
5669 2200 : return set_pathtarget_cost_width(root, partial_target);
5670 : }
5671 :
5672 : /*
5673 : * mark_partial_aggref
5674 : * Adjust an Aggref to make it represent a partial-aggregation step.
5675 : *
5676 : * The Aggref node is modified in-place; caller must do any copying required.
5677 : */
5678 : void
5679 7066 : mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
5680 : {
5681 : /* aggtranstype should be computed by this point */
5682 : Assert(OidIsValid(agg->aggtranstype));
5683 : /* ... but aggsplit should still be as the parser left it */
5684 : Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5685 :
5686 : /* Mark the Aggref with the intended partial-aggregation mode */
5687 7066 : agg->aggsplit = aggsplit;
5688 :
5689 : /*
5690 : * Adjust result type if needed. Normally, a partial aggregate returns
5691 : * the aggregate's transition type; but if that's INTERNAL and we're
5692 : * serializing, it returns BYTEA instead.
5693 : */
5694 7066 : if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5695 : {
5696 5660 : if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5697 242 : agg->aggtype = BYTEAOID;
5698 : else
5699 5418 : agg->aggtype = agg->aggtranstype;
5700 : }
5701 7066 : }
5702 :
5703 : /*
5704 : * postprocess_setop_tlist
5705 : * Fix up targetlist returned by plan_set_operations().
5706 : *
5707 : * We need to transpose sort key info from the orig_tlist into new_tlist.
5708 : * NOTE: this would not be good enough if we supported resjunk sort keys
5709 : * for results of set operations --- then, we'd need to project a whole
5710 : * new tlist to evaluate the resjunk columns. For now, just ereport if we
5711 : * find any resjunk columns in orig_tlist.
5712 : */
5713 : static List *
5714 6174 : postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
5715 : {
5716 : ListCell *l;
5717 6174 : ListCell *orig_tlist_item = list_head(orig_tlist);
5718 :
5719 24154 : foreach(l, new_tlist)
5720 : {
5721 17980 : TargetEntry *new_tle = lfirst_node(TargetEntry, l);
5722 : TargetEntry *orig_tle;
5723 :
5724 : /* ignore resjunk columns in setop result */
5725 17980 : if (new_tle->resjunk)
5726 0 : continue;
5727 :
5728 : Assert(orig_tlist_item != NULL);
5729 17980 : orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
5730 17980 : orig_tlist_item = lnext(orig_tlist, orig_tlist_item);
5731 17980 : if (orig_tle->resjunk) /* should not happen */
5732 0 : elog(ERROR, "resjunk output columns are not implemented");
5733 : Assert(new_tle->resno == orig_tle->resno);
5734 17980 : new_tle->ressortgroupref = orig_tle->ressortgroupref;
5735 : }
5736 6174 : if (orig_tlist_item != NULL)
5737 0 : elog(ERROR, "resjunk output columns are not implemented");
5738 6174 : return new_tlist;
5739 : }
5740 :
5741 : /*
5742 : * optimize_window_clauses
5743 : * Call each WindowFunc's prosupport function to see if we're able to
5744 : * make any adjustments to any of the WindowClause's so that the executor
5745 : * can execute the window functions in a more optimal way.
5746 : *
5747 : * Currently we only allow adjustments to the WindowClause's frameOptions. We
5748 : * may allow more things to be done here in the future.
5749 : */
5750 : static void
5751 2378 : optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
5752 : {
5753 2378 : List *windowClause = root->parse->windowClause;
5754 : ListCell *lc;
5755 :
5756 4984 : foreach(lc, windowClause)
5757 : {
5758 2606 : WindowClause *wc = lfirst_node(WindowClause, lc);
5759 : ListCell *lc2;
5760 2606 : int optimizedFrameOptions = 0;
5761 :
5762 : Assert(wc->winref <= wflists->maxWinRef);
5763 :
5764 : /* skip any WindowClauses that have no WindowFuncs */
5765 2606 : if (wflists->windowFuncs[wc->winref] == NIL)
5766 24 : continue;
5767 :
5768 3122 : foreach(lc2, wflists->windowFuncs[wc->winref])
5769 : {
5770 : SupportRequestOptimizeWindowClause req;
5771 : SupportRequestOptimizeWindowClause *res;
5772 2624 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
5773 : Oid prosupport;
5774 :
5775 2624 : prosupport = get_func_support(wfunc->winfnoid);
5776 :
5777 : /* Check if there's a support function for 'wfunc' */
5778 2624 : if (!OidIsValid(prosupport))
5779 2084 : break; /* can't optimize this WindowClause */
5780 :
5781 760 : req.type = T_SupportRequestOptimizeWindowClause;
5782 760 : req.window_clause = wc;
5783 760 : req.window_func = wfunc;
5784 760 : req.frameOptions = wc->frameOptions;
5785 :
5786 : /* call the support function */
5787 : res = (SupportRequestOptimizeWindowClause *)
5788 760 : DatumGetPointer(OidFunctionCall1(prosupport,
5789 : PointerGetDatum(&req)));
5790 :
5791 : /*
5792 : * Skip to next WindowClause if the support function does not
5793 : * support this request type.
5794 : */
5795 760 : if (res == NULL)
5796 220 : break;
5797 :
5798 : /*
5799 : * Save these frameOptions for the first WindowFunc for this
5800 : * WindowClause.
5801 : */
5802 540 : if (foreach_current_index(lc2) == 0)
5803 516 : optimizedFrameOptions = res->frameOptions;
5804 :
5805 : /*
5806 : * On subsequent WindowFuncs, if the frameOptions are not the same
5807 : * then we're unable to optimize the frameOptions for this
5808 : * WindowClause.
5809 : */
5810 24 : else if (optimizedFrameOptions != res->frameOptions)
5811 0 : break; /* skip to the next WindowClause, if any */
5812 : }
5813 :
5814 : /* adjust the frameOptions if all WindowFunc's agree that it's ok */
5815 2582 : if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5816 : {
5817 : ListCell *lc3;
5818 :
5819 : /* apply the new frame options */
5820 498 : wc->frameOptions = optimizedFrameOptions;
5821 :
5822 : /*
5823 : * We now check to see if changing the frameOptions has caused
5824 : * this WindowClause to be a duplicate of some other WindowClause.
5825 : * This can only happen if we have multiple WindowClauses, so
5826 : * don't bother if there's only 1.
5827 : */
5828 498 : if (list_length(windowClause) == 1)
5829 408 : continue;
5830 :
5831 : /*
5832 : * Do the duplicate check and reuse the existing WindowClause if
5833 : * we find a duplicate.
5834 : */
5835 228 : foreach(lc3, windowClause)
5836 : {
5837 174 : WindowClause *existing_wc = lfirst_node(WindowClause, lc3);
5838 :
5839 : /* skip over the WindowClause we're currently editing */
5840 174 : if (existing_wc == wc)
5841 54 : continue;
5842 :
5843 : /*
5844 : * Perform the same duplicate check that is done in
5845 : * transformWindowFuncCall.
5846 : */
5847 240 : if (equal(wc->partitionClause, existing_wc->partitionClause) &&
5848 120 : equal(wc->orderClause, existing_wc->orderClause) &&
5849 120 : wc->frameOptions == existing_wc->frameOptions &&
5850 72 : equal(wc->startOffset, existing_wc->startOffset) &&
5851 36 : equal(wc->endOffset, existing_wc->endOffset))
5852 : {
5853 : ListCell *lc4;
5854 :
5855 : /*
5856 : * Now move each WindowFunc in 'wc' into 'existing_wc'.
5857 : * This required adjusting each WindowFunc's winref and
5858 : * moving the WindowFuncs in 'wc' to the list of
5859 : * WindowFuncs in 'existing_wc'.
5860 : */
5861 78 : foreach(lc4, wflists->windowFuncs[wc->winref])
5862 : {
5863 42 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc4);
5864 :
5865 42 : wfunc->winref = existing_wc->winref;
5866 : }
5867 :
5868 : /* move list items */
5869 72 : wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
5870 36 : wflists->windowFuncs[wc->winref]);
5871 36 : wflists->windowFuncs[wc->winref] = NIL;
5872 :
5873 : /*
5874 : * transformWindowFuncCall() should have made sure there
5875 : * are no other duplicates, so we needn't bother looking
5876 : * any further.
5877 : */
5878 36 : break;
5879 : }
5880 : }
5881 : }
5882 : }
5883 2378 : }
5884 :
5885 : /*
5886 : * select_active_windows
5887 : * Create a list of the "active" window clauses (ie, those referenced
5888 : * by non-deleted WindowFuncs) in the order they are to be executed.
5889 : */
5890 : static List *
5891 2378 : select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
5892 : {
5893 2378 : List *windowClause = root->parse->windowClause;
5894 2378 : List *result = NIL;
5895 : ListCell *lc;
5896 2378 : int nActive = 0;
5897 2378 : WindowClauseSortData *actives = palloc(sizeof(WindowClauseSortData)
5898 2378 : * list_length(windowClause));
5899 :
5900 : /* First, construct an array of the active windows */
5901 4984 : foreach(lc, windowClause)
5902 : {
5903 2606 : WindowClause *wc = lfirst_node(WindowClause, lc);
5904 :
5905 : /* It's only active if wflists shows some related WindowFuncs */
5906 : Assert(wc->winref <= wflists->maxWinRef);
5907 2606 : if (wflists->windowFuncs[wc->winref] == NIL)
5908 60 : continue;
5909 :
5910 2546 : actives[nActive].wc = wc; /* original clause */
5911 :
5912 : /*
5913 : * For sorting, we want the list of partition keys followed by the
5914 : * list of sort keys. But pathkeys construction will remove duplicates
5915 : * between the two, so we can as well (even though we can't detect all
5916 : * of the duplicates, since some may come from ECs - that might mean
5917 : * we miss optimization chances here). We must, however, ensure that
5918 : * the order of entries is preserved with respect to the ones we do
5919 : * keep.
5920 : *
5921 : * partitionClause and orderClause had their own duplicates removed in
5922 : * parse analysis, so we're only concerned here with removing
5923 : * orderClause entries that also appear in partitionClause.
5924 : */
5925 5092 : actives[nActive].uniqueOrder =
5926 2546 : list_concat_unique(list_copy(wc->partitionClause),
5927 2546 : wc->orderClause);
5928 2546 : nActive++;
5929 : }
5930 :
5931 : /*
5932 : * Sort active windows by their partitioning/ordering clauses, ignoring
5933 : * any framing clauses, so that the windows that need the same sorting are
5934 : * adjacent in the list. When we come to generate paths, this will avoid
5935 : * inserting additional Sort nodes.
5936 : *
5937 : * This is how we implement a specific requirement from the SQL standard,
5938 : * which says that when two or more windows are order-equivalent (i.e.
5939 : * have matching partition and order clauses, even if their names or
5940 : * framing clauses differ), then all peer rows must be presented in the
5941 : * same order in all of them. If we allowed multiple sort nodes for such
5942 : * cases, we'd risk having the peer rows end up in different orders in
5943 : * equivalent windows due to sort instability. (See General Rule 4 of
5944 : * <window clause> in SQL2008 - SQL2016.)
5945 : *
5946 : * Additionally, if the entire list of clauses of one window is a prefix
5947 : * of another, put first the window with stronger sorting requirements.
5948 : * This way we will first sort for stronger window, and won't have to sort
5949 : * again for the weaker one.
5950 : */
5951 2378 : qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp);
5952 :
5953 : /* build ordered list of the original WindowClause nodes */
5954 4924 : for (int i = 0; i < nActive; i++)
5955 2546 : result = lappend(result, actives[i].wc);
5956 :
5957 2378 : pfree(actives);
5958 :
5959 2378 : return result;
5960 : }
5961 :
5962 : /*
5963 : * name_active_windows
5964 : * Ensure all active windows have unique names.
5965 : *
5966 : * The parser will have checked that user-assigned window names are unique
5967 : * within the Query. Here we assign made-up names to any unnamed
5968 : * WindowClauses for the benefit of EXPLAIN. (We don't want to do this
5969 : * at parse time, because it'd mess up decompilation of views.)
5970 : *
5971 : * activeWindows: result of select_active_windows
5972 : */
5973 : static void
5974 2378 : name_active_windows(List *activeWindows)
5975 : {
5976 2378 : int next_n = 1;
5977 : char newname[16];
5978 : ListCell *lc;
5979 :
5980 4924 : foreach(lc, activeWindows)
5981 : {
5982 2546 : WindowClause *wc = lfirst_node(WindowClause, lc);
5983 :
5984 : /* Nothing to do if it has a name already. */
5985 2546 : if (wc->name)
5986 498 : continue;
5987 :
5988 : /* Select a name not currently present in the list. */
5989 : for (;;)
5990 6 : {
5991 : ListCell *lc2;
5992 :
5993 2054 : snprintf(newname, sizeof(newname), "w%d", next_n++);
5994 4456 : foreach(lc2, activeWindows)
5995 : {
5996 2408 : WindowClause *wc2 = lfirst_node(WindowClause, lc2);
5997 :
5998 2408 : if (wc2->name && strcmp(wc2->name, newname) == 0)
5999 6 : break; /* matched */
6000 : }
6001 2054 : if (lc2 == NULL)
6002 2048 : break; /* reached the end with no match */
6003 : }
6004 2048 : wc->name = pstrdup(newname);
6005 : }
6006 2378 : }
6007 :
6008 : /*
6009 : * common_prefix_cmp
6010 : * QSort comparison function for WindowClauseSortData
6011 : *
6012 : * Sort the windows by the required sorting clauses. First, compare the sort
6013 : * clauses themselves. Second, if one window's clauses are a prefix of another
6014 : * one's clauses, put the window with more sort clauses first.
6015 : *
6016 : * We purposefully sort by the highest tleSortGroupRef first. Since
6017 : * tleSortGroupRefs are assigned for the query's DISTINCT and ORDER BY first
6018 : * and because here we sort the lowest tleSortGroupRefs last, if a
6019 : * WindowClause is sharing a tleSortGroupRef with the query's DISTINCT or
6020 : * ORDER BY clause, this makes it more likely that the final WindowAgg will
6021 : * provide presorted input for the query's DISTINCT or ORDER BY clause, thus
6022 : * reducing the total number of sorts required for the query.
6023 : */
6024 : static int
6025 186 : common_prefix_cmp(const void *a, const void *b)
6026 : {
6027 186 : const WindowClauseSortData *wcsa = a;
6028 186 : const WindowClauseSortData *wcsb = b;
6029 : ListCell *item_a;
6030 : ListCell *item_b;
6031 :
6032 330 : forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
6033 : {
6034 246 : SortGroupClause *sca = lfirst_node(SortGroupClause, item_a);
6035 246 : SortGroupClause *scb = lfirst_node(SortGroupClause, item_b);
6036 :
6037 246 : if (sca->tleSortGroupRef > scb->tleSortGroupRef)
6038 102 : return -1;
6039 234 : else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
6040 66 : return 1;
6041 168 : else if (sca->sortop > scb->sortop)
6042 0 : return -1;
6043 168 : else if (sca->sortop < scb->sortop)
6044 24 : return 1;
6045 144 : else if (sca->nulls_first && !scb->nulls_first)
6046 0 : return -1;
6047 144 : else if (!sca->nulls_first && scb->nulls_first)
6048 0 : return 1;
6049 : /* no need to compare eqop, since it is fully determined by sortop */
6050 : }
6051 :
6052 84 : if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
6053 6 : return -1;
6054 78 : else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
6055 30 : return 1;
6056 :
6057 48 : return 0;
6058 : }
6059 :
6060 : /*
6061 : * make_window_input_target
6062 : * Generate appropriate PathTarget for initial input to WindowAgg nodes.
6063 : *
6064 : * When the query has window functions, this function computes the desired
6065 : * target to be computed by the node just below the first WindowAgg.
6066 : * This tlist must contain all values needed to evaluate the window functions,
6067 : * compute the final target list, and perform any required final sort step.
6068 : * If multiple WindowAggs are needed, each intermediate one adds its window
6069 : * function results onto this base tlist; only the topmost WindowAgg computes
6070 : * the actual desired target list.
6071 : *
6072 : * This function is much like make_group_input_target, though not quite enough
6073 : * like it to share code. As in that function, we flatten most expressions
6074 : * into their component variables. But we do not want to flatten window
6075 : * PARTITION BY/ORDER BY clauses, since that might result in multiple
6076 : * evaluations of them, which would be bad (possibly even resulting in
6077 : * inconsistent answers, if they contain volatile functions).
6078 : * Also, we must not flatten GROUP BY clauses that were left unflattened by
6079 : * make_group_input_target, because we may no longer have access to the
6080 : * individual Vars in them.
6081 : *
6082 : * Another key difference from make_group_input_target is that we don't
6083 : * flatten Aggref expressions, since those are to be computed below the
6084 : * window functions and just referenced like Vars above that.
6085 : *
6086 : * 'final_target' is the query's final target list (in PathTarget form)
6087 : * 'activeWindows' is the list of active windows previously identified by
6088 : * select_active_windows.
6089 : *
6090 : * The result is the PathTarget to be computed by the plan node immediately
6091 : * below the first WindowAgg node.
6092 : */
6093 : static PathTarget *
6094 2378 : make_window_input_target(PlannerInfo *root,
6095 : PathTarget *final_target,
6096 : List *activeWindows)
6097 : {
6098 : PathTarget *input_target;
6099 : Bitmapset *sgrefs;
6100 : List *flattenable_cols;
6101 : List *flattenable_vars;
6102 : int i;
6103 : ListCell *lc;
6104 :
6105 : Assert(root->parse->hasWindowFuncs);
6106 :
6107 : /*
6108 : * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
6109 : * into a bitmapset for convenient reference below.
6110 : */
6111 2378 : sgrefs = NULL;
6112 4924 : foreach(lc, activeWindows)
6113 : {
6114 2546 : WindowClause *wc = lfirst_node(WindowClause, lc);
6115 : ListCell *lc2;
6116 :
6117 3290 : foreach(lc2, wc->partitionClause)
6118 : {
6119 744 : SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
6120 :
6121 744 : sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6122 : }
6123 4722 : foreach(lc2, wc->orderClause)
6124 : {
6125 2176 : SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
6126 :
6127 2176 : sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6128 : }
6129 : }
6130 :
6131 : /* Add in sortgroupref numbers of GROUP BY clauses, too */
6132 2564 : foreach(lc, root->processed_groupClause)
6133 : {
6134 186 : SortGroupClause *grpcl = lfirst_node(SortGroupClause, lc);
6135 :
6136 186 : sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
6137 : }
6138 :
6139 : /*
6140 : * Construct a target containing all the non-flattenable targetlist items,
6141 : * and save aside the others for a moment.
6142 : */
6143 2378 : input_target = create_empty_pathtarget();
6144 2378 : flattenable_cols = NIL;
6145 :
6146 2378 : i = 0;
6147 10238 : foreach(lc, final_target->exprs)
6148 : {
6149 7860 : Expr *expr = (Expr *) lfirst(lc);
6150 7860 : Index sgref = get_pathtarget_sortgroupref(final_target, i);
6151 :
6152 : /*
6153 : * Don't want to deconstruct window clauses or GROUP BY items. (Note
6154 : * that such items can't contain window functions, so it's okay to
6155 : * compute them below the WindowAgg nodes.)
6156 : */
6157 7860 : if (sgref != 0 && bms_is_member(sgref, sgrefs))
6158 : {
6159 : /*
6160 : * Don't want to deconstruct this value, so add it to the input
6161 : * target as-is.
6162 : */
6163 2774 : add_column_to_pathtarget(input_target, expr, sgref);
6164 : }
6165 : else
6166 : {
6167 : /*
6168 : * Column is to be flattened, so just remember the expression for
6169 : * later call to pull_var_clause.
6170 : */
6171 5086 : flattenable_cols = lappend(flattenable_cols, expr);
6172 : }
6173 :
6174 7860 : i++;
6175 : }
6176 :
6177 : /*
6178 : * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
6179 : * add them to the input target if not already present. (Some might be
6180 : * there already because they're used directly as window/group clauses.)
6181 : *
6182 : * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
6183 : * Aggrefs are placed in the Agg node's tlist and not left to be computed
6184 : * at higher levels. On the other hand, we should recurse into
6185 : * WindowFuncs to make sure their input expressions are available.
6186 : */
6187 2378 : flattenable_vars = pull_var_clause((Node *) flattenable_cols,
6188 : PVC_INCLUDE_AGGREGATES |
6189 : PVC_RECURSE_WINDOWFUNCS |
6190 : PVC_INCLUDE_PLACEHOLDERS);
6191 2378 : add_new_columns_to_pathtarget(input_target, flattenable_vars);
6192 :
6193 : /* clean up cruft */
6194 2378 : list_free(flattenable_vars);
6195 2378 : list_free(flattenable_cols);
6196 :
6197 : /* XXX this causes some redundant cost calculation ... */
6198 2378 : return set_pathtarget_cost_width(root, input_target);
6199 : }
6200 :
6201 : /*
6202 : * make_pathkeys_for_window
6203 : * Create a pathkeys list describing the required input ordering
6204 : * for the given WindowClause.
6205 : *
6206 : * Modifies wc's partitionClause to remove any clauses which are deemed
6207 : * redundant by the pathkey logic.
6208 : *
6209 : * The required ordering is first the PARTITION keys, then the ORDER keys.
6210 : * In the future we might try to implement windowing using hashing, in which
6211 : * case the ordering could be relaxed, but for now we always sort.
6212 : */
6213 : static List *
6214 5132 : make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
6215 : List *tlist)
6216 : {
6217 5132 : List *window_pathkeys = NIL;
6218 :
6219 : /* Throw error if can't sort */
6220 5132 : if (!grouping_is_sortable(wc->partitionClause))
6221 0 : ereport(ERROR,
6222 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6223 : errmsg("could not implement window PARTITION BY"),
6224 : errdetail("Window partitioning columns must be of sortable datatypes.")));
6225 5132 : if (!grouping_is_sortable(wc->orderClause))
6226 0 : ereport(ERROR,
6227 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6228 : errmsg("could not implement window ORDER BY"),
6229 : errdetail("Window ordering columns must be of sortable datatypes.")));
6230 :
6231 : /*
6232 : * First fetch the pathkeys for the PARTITION BY clause. We can safely
6233 : * remove any clauses from the wc->partitionClause for redundant pathkeys.
6234 : */
6235 5132 : if (wc->partitionClause != NIL)
6236 : {
6237 : bool sortable;
6238 :
6239 1290 : window_pathkeys = make_pathkeys_for_sortclauses_extended(root,
6240 : &wc->partitionClause,
6241 : tlist,
6242 : true,
6243 : false,
6244 : &sortable,
6245 : false);
6246 :
6247 : Assert(sortable);
6248 : }
6249 :
6250 : /*
6251 : * In principle, we could also consider removing redundant ORDER BY items
6252 : * too as doing so does not alter the result of peer row checks done by
6253 : * the executor. However, we must *not* remove the ordering column for
6254 : * RANGE OFFSET cases, as the executor needs that for in_range tests even
6255 : * if it's known to be equal to some partitioning column.
6256 : */
6257 5132 : if (wc->orderClause != NIL)
6258 : {
6259 : List *orderby_pathkeys;
6260 :
6261 4276 : orderby_pathkeys = make_pathkeys_for_sortclauses(root,
6262 : wc->orderClause,
6263 : tlist);
6264 :
6265 : /* Okay, make the combined pathkeys */
6266 4276 : if (window_pathkeys != NIL)
6267 934 : window_pathkeys = append_pathkeys(window_pathkeys, orderby_pathkeys);
6268 : else
6269 3342 : window_pathkeys = orderby_pathkeys;
6270 : }
6271 :
6272 5132 : return window_pathkeys;
6273 : }
6274 :
6275 : /*
6276 : * make_sort_input_target
6277 : * Generate appropriate PathTarget for initial input to Sort step.
6278 : *
6279 : * If the query has ORDER BY, this function chooses the target to be computed
6280 : * by the node just below the Sort (and DISTINCT, if any, since Unique can't
6281 : * project) steps. This might or might not be identical to the query's final
6282 : * output target.
6283 : *
6284 : * The main argument for keeping the sort-input tlist the same as the final
6285 : * is that we avoid a separate projection node (which will be needed if
6286 : * they're different, because Sort can't project). However, there are also
6287 : * advantages to postponing tlist evaluation till after the Sort: it ensures
6288 : * a consistent order of evaluation for any volatile functions in the tlist,
6289 : * and if there's also a LIMIT, we can stop the query without ever computing
6290 : * tlist functions for later rows, which is beneficial for both volatile and
6291 : * expensive functions.
6292 : *
6293 : * Our current policy is to postpone volatile expressions till after the sort
6294 : * unconditionally (assuming that that's possible, ie they are in plain tlist
6295 : * columns and not ORDER BY/GROUP BY/DISTINCT columns). We also prefer to
6296 : * postpone set-returning expressions, because running them beforehand would
6297 : * bloat the sort dataset, and because it might cause unexpected output order
6298 : * if the sort isn't stable. However there's a constraint on that: all SRFs
6299 : * in the tlist should be evaluated at the same plan step, so that they can
6300 : * run in sync in nodeProjectSet. So if any SRFs are in sort columns, we
6301 : * mustn't postpone any SRFs. (Note that in principle that policy should
6302 : * probably get applied to the group/window input targetlists too, but we
6303 : * have not done that historically.) Lastly, expensive expressions are
6304 : * postponed if there is a LIMIT, or if root->tuple_fraction shows that
6305 : * partial evaluation of the query is possible (if neither is true, we expect
6306 : * to have to evaluate the expressions for every row anyway), or if there are
6307 : * any volatile or set-returning expressions (since once we've put in a
6308 : * projection at all, it won't cost any more to postpone more stuff).
6309 : *
6310 : * Another issue that could potentially be considered here is that
6311 : * evaluating tlist expressions could result in data that's either wider
6312 : * or narrower than the input Vars, thus changing the volume of data that
6313 : * has to go through the Sort. However, we usually have only a very bad
6314 : * idea of the output width of any expression more complex than a Var,
6315 : * so for now it seems too risky to try to optimize on that basis.
6316 : *
6317 : * Note that if we do produce a modified sort-input target, and then the
6318 : * query ends up not using an explicit Sort, no particular harm is done:
6319 : * we'll initially use the modified target for the preceding path nodes,
6320 : * but then change them to the final target with apply_projection_to_path.
6321 : * Moreover, in such a case the guarantees about evaluation order of
6322 : * volatile functions still hold, since the rows are sorted already.
6323 : *
6324 : * This function has some things in common with make_group_input_target and
6325 : * make_window_input_target, though the detailed rules for what to do are
6326 : * different. We never flatten/postpone any grouping or ordering columns;
6327 : * those are needed before the sort. If we do flatten a particular
6328 : * expression, we leave Aggref and WindowFunc nodes alone, since those were
6329 : * computed earlier.
6330 : *
6331 : * 'final_target' is the query's final target list (in PathTarget form)
6332 : * 'have_postponed_srfs' is an output argument, see below
6333 : *
6334 : * The result is the PathTarget to be computed by the plan node immediately
6335 : * below the Sort step (and the Distinct step, if any). This will be
6336 : * exactly final_target if we decide a projection step wouldn't be helpful.
6337 : *
6338 : * In addition, *have_postponed_srfs is set to true if we choose to postpone
6339 : * any set-returning functions to after the Sort.
6340 : */
6341 : static PathTarget *
6342 72222 : make_sort_input_target(PlannerInfo *root,
6343 : PathTarget *final_target,
6344 : bool *have_postponed_srfs)
6345 : {
6346 72222 : Query *parse = root->parse;
6347 : PathTarget *input_target;
6348 : int ncols;
6349 : bool *col_is_srf;
6350 : bool *postpone_col;
6351 : bool have_srf;
6352 : bool have_volatile;
6353 : bool have_expensive;
6354 : bool have_srf_sortcols;
6355 : bool postpone_srfs;
6356 : List *postponable_cols;
6357 : List *postponable_vars;
6358 : int i;
6359 : ListCell *lc;
6360 :
6361 : /* Shouldn't get here unless query has ORDER BY */
6362 : Assert(parse->sortClause);
6363 :
6364 72222 : *have_postponed_srfs = false; /* default result */
6365 :
6366 : /* Inspect tlist and collect per-column information */
6367 72222 : ncols = list_length(final_target->exprs);
6368 72222 : col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6369 72222 : postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6370 72222 : have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
6371 :
6372 72222 : i = 0;
6373 444946 : foreach(lc, final_target->exprs)
6374 : {
6375 372724 : Expr *expr = (Expr *) lfirst(lc);
6376 :
6377 : /*
6378 : * If the column has a sortgroupref, assume it has to be evaluated
6379 : * before sorting. Generally such columns would be ORDER BY, GROUP
6380 : * BY, etc targets. One exception is columns that were removed from
6381 : * GROUP BY by remove_useless_groupby_columns() ... but those would
6382 : * only be Vars anyway. There don't seem to be any cases where it
6383 : * would be worth the trouble to double-check.
6384 : */
6385 372724 : if (get_pathtarget_sortgroupref(final_target, i) == 0)
6386 : {
6387 : /*
6388 : * Check for SRF or volatile functions. Check the SRF case first
6389 : * because we must know whether we have any postponed SRFs.
6390 : */
6391 270724 : if (parse->hasTargetSRFs &&
6392 216 : expression_returns_set((Node *) expr))
6393 : {
6394 : /* We'll decide below whether these are postponable */
6395 96 : col_is_srf[i] = true;
6396 96 : have_srf = true;
6397 : }
6398 270412 : else if (contain_volatile_functions((Node *) expr))
6399 : {
6400 : /* Unconditionally postpone */
6401 148 : postpone_col[i] = true;
6402 148 : have_volatile = true;
6403 : }
6404 : else
6405 : {
6406 : /*
6407 : * Else check the cost. XXX it's annoying to have to do this
6408 : * when set_pathtarget_cost_width() just did it. Refactor to
6409 : * allow sharing the work?
6410 : */
6411 : QualCost cost;
6412 :
6413 270264 : cost_qual_eval_node(&cost, (Node *) expr, root);
6414 :
6415 : /*
6416 : * We arbitrarily define "expensive" as "more than 10X
6417 : * cpu_operator_cost". Note this will take in any PL function
6418 : * with default cost.
6419 : */
6420 270264 : if (cost.per_tuple > 10 * cpu_operator_cost)
6421 : {
6422 17210 : postpone_col[i] = true;
6423 17210 : have_expensive = true;
6424 : }
6425 : }
6426 : }
6427 : else
6428 : {
6429 : /* For sortgroupref cols, just check if any contain SRFs */
6430 102216 : if (!have_srf_sortcols &&
6431 102526 : parse->hasTargetSRFs &&
6432 310 : expression_returns_set((Node *) expr))
6433 124 : have_srf_sortcols = true;
6434 : }
6435 :
6436 372724 : i++;
6437 : }
6438 :
6439 : /*
6440 : * We can postpone SRFs if we have some but none are in sortgroupref cols.
6441 : */
6442 72222 : postpone_srfs = (have_srf && !have_srf_sortcols);
6443 :
6444 : /*
6445 : * If we don't need a post-sort projection, just return final_target.
6446 : */
6447 72222 : if (!(postpone_srfs || have_volatile ||
6448 72018 : (have_expensive &&
6449 10104 : (parse->limitCount || root->tuple_fraction > 0))))
6450 71982 : return final_target;
6451 :
6452 : /*
6453 : * Report whether the post-sort projection will contain set-returning
6454 : * functions. This is important because it affects whether the Sort can
6455 : * rely on the query's LIMIT (if any) to bound the number of rows it needs
6456 : * to return.
6457 : */
6458 240 : *have_postponed_srfs = postpone_srfs;
6459 :
6460 : /*
6461 : * Construct the sort-input target, taking all non-postponable columns and
6462 : * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6463 : * the postponable ones.
6464 : */
6465 240 : input_target = create_empty_pathtarget();
6466 240 : postponable_cols = NIL;
6467 :
6468 240 : i = 0;
6469 1990 : foreach(lc, final_target->exprs)
6470 : {
6471 1750 : Expr *expr = (Expr *) lfirst(lc);
6472 :
6473 1750 : if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
6474 298 : postponable_cols = lappend(postponable_cols, expr);
6475 : else
6476 1452 : add_column_to_pathtarget(input_target, expr,
6477 1452 : get_pathtarget_sortgroupref(final_target, i));
6478 :
6479 1750 : i++;
6480 : }
6481 :
6482 : /*
6483 : * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6484 : * postponable columns, and add them to the sort-input target if not
6485 : * already present. (Some might be there already.) We mustn't
6486 : * deconstruct Aggrefs or WindowFuncs here, since the projection node
6487 : * would be unable to recompute them.
6488 : */
6489 240 : postponable_vars = pull_var_clause((Node *) postponable_cols,
6490 : PVC_INCLUDE_AGGREGATES |
6491 : PVC_INCLUDE_WINDOWFUNCS |
6492 : PVC_INCLUDE_PLACEHOLDERS);
6493 240 : add_new_columns_to_pathtarget(input_target, postponable_vars);
6494 :
6495 : /* clean up cruft */
6496 240 : list_free(postponable_vars);
6497 240 : list_free(postponable_cols);
6498 :
6499 : /* XXX this represents even more redundant cost calculation ... */
6500 240 : return set_pathtarget_cost_width(root, input_target);
6501 : }
6502 :
6503 : /*
6504 : * get_cheapest_fractional_path
6505 : * Find the cheapest path for retrieving a specified fraction of all
6506 : * the tuples expected to be returned by the given relation.
6507 : *
6508 : * Do not consider parameterized paths. If the caller needs a path for upper
6509 : * rel, it can't have parameterized paths. If the caller needs an append
6510 : * subpath, it could become limited by the treatment of similar
6511 : * parameterization of all the subpaths.
6512 : *
6513 : * We interpret tuple_fraction the same way as grouping_planner.
6514 : *
6515 : * We assume set_cheapest() has been run on the given rel.
6516 : */
6517 : Path *
6518 492638 : get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
6519 : {
6520 492638 : Path *best_path = rel->cheapest_total_path;
6521 : ListCell *l;
6522 :
6523 : /* If all tuples will be retrieved, just return the cheapest-total path */
6524 492638 : if (tuple_fraction <= 0.0)
6525 483122 : return best_path;
6526 :
6527 : /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6528 9516 : if (tuple_fraction >= 1.0 && best_path->rows > 0)
6529 3968 : tuple_fraction /= best_path->rows;
6530 :
6531 25134 : foreach(l, rel->pathlist)
6532 : {
6533 15618 : Path *path = (Path *) lfirst(l);
6534 :
6535 15618 : if (path->param_info)
6536 200 : continue;
6537 :
6538 21320 : if (path == rel->cheapest_total_path ||
6539 5902 : compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6540 14902 : continue;
6541 :
6542 516 : best_path = path;
6543 : }
6544 :
6545 9516 : return best_path;
6546 : }
6547 :
6548 : /*
6549 : * adjust_paths_for_srfs
6550 : * Fix up the Paths of the given upperrel to handle tSRFs properly.
6551 : *
6552 : * The executor can only handle set-returning functions that appear at the
6553 : * top level of the targetlist of a ProjectSet plan node. If we have any SRFs
6554 : * that are not at top level, we need to split up the evaluation into multiple
6555 : * plan levels in which each level satisfies this constraint. This function
6556 : * modifies each Path of an upperrel that (might) compute any SRFs in its
6557 : * output tlist to insert appropriate projection steps.
6558 : *
6559 : * The given targets and targets_contain_srfs lists are from
6560 : * split_pathtarget_at_srfs(). We assume the existing Paths emit the first
6561 : * target in targets.
6562 : */
6563 : static void
6564 12682 : adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
6565 : List *targets, List *targets_contain_srfs)
6566 : {
6567 : ListCell *lc;
6568 :
6569 : Assert(list_length(targets) == list_length(targets_contain_srfs));
6570 : Assert(!linitial_int(targets_contain_srfs));
6571 :
6572 : /* If no SRFs appear at this plan level, nothing to do */
6573 12682 : if (list_length(targets) == 1)
6574 634 : return;
6575 :
6576 : /*
6577 : * Stack SRF-evaluation nodes atop each path for the rel.
6578 : *
6579 : * In principle we should re-run set_cheapest() here to identify the
6580 : * cheapest path, but it seems unlikely that adding the same tlist eval
6581 : * costs to all the paths would change that, so we don't bother. Instead,
6582 : * just assume that the cheapest-startup and cheapest-total paths remain
6583 : * so. (There should be no parameterized paths anymore, so we needn't
6584 : * worry about updating cheapest_parameterized_paths.)
6585 : */
6586 24120 : foreach(lc, rel->pathlist)
6587 : {
6588 12072 : Path *subpath = (Path *) lfirst(lc);
6589 12072 : Path *newpath = subpath;
6590 : ListCell *lc1,
6591 : *lc2;
6592 :
6593 : Assert(subpath->param_info == NULL);
6594 37398 : forboth(lc1, targets, lc2, targets_contain_srfs)
6595 : {
6596 25326 : PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6597 25326 : bool contains_srfs = (bool) lfirst_int(lc2);
6598 :
6599 : /* If this level doesn't contain SRFs, do regular projection */
6600 25326 : if (contains_srfs)
6601 12132 : newpath = (Path *) create_set_projection_path(root,
6602 : rel,
6603 : newpath,
6604 : thistarget);
6605 : else
6606 13194 : newpath = (Path *) apply_projection_to_path(root,
6607 : rel,
6608 : newpath,
6609 : thistarget);
6610 : }
6611 12072 : lfirst(lc) = newpath;
6612 12072 : if (subpath == rel->cheapest_startup_path)
6613 378 : rel->cheapest_startup_path = newpath;
6614 12072 : if (subpath == rel->cheapest_total_path)
6615 378 : rel->cheapest_total_path = newpath;
6616 : }
6617 :
6618 : /* Likewise for partial paths, if any */
6619 12054 : foreach(lc, rel->partial_pathlist)
6620 : {
6621 6 : Path *subpath = (Path *) lfirst(lc);
6622 6 : Path *newpath = subpath;
6623 : ListCell *lc1,
6624 : *lc2;
6625 :
6626 : Assert(subpath->param_info == NULL);
6627 24 : forboth(lc1, targets, lc2, targets_contain_srfs)
6628 : {
6629 18 : PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6630 18 : bool contains_srfs = (bool) lfirst_int(lc2);
6631 :
6632 : /* If this level doesn't contain SRFs, do regular projection */
6633 18 : if (contains_srfs)
6634 6 : newpath = (Path *) create_set_projection_path(root,
6635 : rel,
6636 : newpath,
6637 : thistarget);
6638 : else
6639 : {
6640 : /* avoid apply_projection_to_path, in case of multiple refs */
6641 12 : newpath = (Path *) create_projection_path(root,
6642 : rel,
6643 : newpath,
6644 : thistarget);
6645 : }
6646 : }
6647 6 : lfirst(lc) = newpath;
6648 : }
6649 : }
6650 :
6651 : /*
6652 : * expression_planner
6653 : * Perform planner's transformations on a standalone expression.
6654 : *
6655 : * Various utility commands need to evaluate expressions that are not part
6656 : * of a plannable query. They can do so using the executor's regular
6657 : * expression-execution machinery, but first the expression has to be fed
6658 : * through here to transform it from parser output to something executable.
6659 : *
6660 : * Currently, we disallow sublinks in standalone expressions, so there's no
6661 : * real "planning" involved here. (That might not always be true though.)
6662 : * What we must do is run eval_const_expressions to ensure that any function
6663 : * calls are converted to positional notation and function default arguments
6664 : * get inserted. The fact that constant subexpressions get simplified is a
6665 : * side-effect that is useful when the expression will get evaluated more than
6666 : * once. Also, we must fix operator function IDs.
6667 : *
6668 : * This does not return any information about dependencies of the expression.
6669 : * Hence callers should use the results only for the duration of the current
6670 : * query. Callers that would like to cache the results for longer should use
6671 : * expression_planner_with_deps, probably via the plancache.
6672 : *
6673 : * Note: this must not make any damaging changes to the passed-in expression
6674 : * tree. (It would actually be okay to apply fix_opfuncids to it, but since
6675 : * we first do an expression_tree_mutator-based walk, what is returned will
6676 : * be a new node tree.) The result is constructed in the current memory
6677 : * context; beware that this can leak a lot of additional stuff there, too.
6678 : */
6679 : Expr *
6680 245008 : expression_planner(Expr *expr)
6681 : {
6682 : Node *result;
6683 :
6684 : /*
6685 : * Convert named-argument function calls, insert default arguments and
6686 : * simplify constant subexprs
6687 : */
6688 245008 : result = eval_const_expressions(NULL, (Node *) expr);
6689 :
6690 : /* Fill in opfuncid values if missing */
6691 244990 : fix_opfuncids(result);
6692 :
6693 244990 : return (Expr *) result;
6694 : }
6695 :
6696 : /*
6697 : * expression_planner_with_deps
6698 : * Perform planner's transformations on a standalone expression,
6699 : * returning expression dependency information along with the result.
6700 : *
6701 : * This is identical to expression_planner() except that it also returns
6702 : * information about possible dependencies of the expression, ie identities of
6703 : * objects whose definitions affect the result. As in a PlannedStmt, these
6704 : * are expressed as a list of relation Oids and a list of PlanInvalItems.
6705 : */
6706 : Expr *
6707 376 : expression_planner_with_deps(Expr *expr,
6708 : List **relationOids,
6709 : List **invalItems)
6710 : {
6711 : Node *result;
6712 : PlannerGlobal glob;
6713 : PlannerInfo root;
6714 :
6715 : /* Make up dummy planner state so we can use setrefs machinery */
6716 8648 : MemSet(&glob, 0, sizeof(glob));
6717 376 : glob.type = T_PlannerGlobal;
6718 376 : glob.relationOids = NIL;
6719 376 : glob.invalItems = NIL;
6720 :
6721 33464 : MemSet(&root, 0, sizeof(root));
6722 376 : root.type = T_PlannerInfo;
6723 376 : root.glob = &glob;
6724 :
6725 : /*
6726 : * Convert named-argument function calls, insert default arguments and
6727 : * simplify constant subexprs. Collect identities of inlined functions
6728 : * and elided domains, too.
6729 : */
6730 376 : result = eval_const_expressions(&root, (Node *) expr);
6731 :
6732 : /* Fill in opfuncid values if missing */
6733 376 : fix_opfuncids(result);
6734 :
6735 : /*
6736 : * Now walk the finished expression to find anything else we ought to
6737 : * record as an expression dependency.
6738 : */
6739 376 : (void) extract_query_dependencies_walker(result, &root);
6740 :
6741 376 : *relationOids = glob.relationOids;
6742 376 : *invalItems = glob.invalItems;
6743 :
6744 376 : return (Expr *) result;
6745 : }
6746 :
6747 :
6748 : /*
6749 : * plan_cluster_use_sort
6750 : * Use the planner to decide how CLUSTER should implement sorting
6751 : *
6752 : * tableOid is the OID of a table to be clustered on its index indexOid
6753 : * (which is already known to be a btree index). Decide whether it's
6754 : * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
6755 : * Return true to use sorting, false to use an indexscan.
6756 : *
6757 : * Note: caller had better already hold some type of lock on the table.
6758 : */
6759 : bool
6760 188 : plan_cluster_use_sort(Oid tableOid, Oid indexOid)
6761 : {
6762 : PlannerInfo *root;
6763 : Query *query;
6764 : PlannerGlobal *glob;
6765 : RangeTblEntry *rte;
6766 : RelOptInfo *rel;
6767 : IndexOptInfo *indexInfo;
6768 : QualCost indexExprCost;
6769 : Cost comparisonCost;
6770 : Path *seqScanPath;
6771 : Path seqScanAndSortPath;
6772 : IndexPath *indexScanPath;
6773 : ListCell *lc;
6774 :
6775 : /* We can short-circuit the cost comparison if indexscans are disabled */
6776 188 : if (!enable_indexscan)
6777 30 : return true; /* use sort */
6778 :
6779 : /* Set up mostly-dummy planner state */
6780 158 : query = makeNode(Query);
6781 158 : query->commandType = CMD_SELECT;
6782 :
6783 158 : glob = makeNode(PlannerGlobal);
6784 :
6785 158 : root = makeNode(PlannerInfo);
6786 158 : root->parse = query;
6787 158 : root->glob = glob;
6788 158 : root->query_level = 1;
6789 158 : root->planner_cxt = CurrentMemoryContext;
6790 158 : root->wt_param_id = -1;
6791 158 : root->join_domains = list_make1(makeNode(JoinDomain));
6792 :
6793 : /* Build a minimal RTE for the rel */
6794 158 : rte = makeNode(RangeTblEntry);
6795 158 : rte->rtekind = RTE_RELATION;
6796 158 : rte->relid = tableOid;
6797 158 : rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6798 158 : rte->rellockmode = AccessShareLock;
6799 158 : rte->lateral = false;
6800 158 : rte->inh = false;
6801 158 : rte->inFromCl = true;
6802 158 : query->rtable = list_make1(rte);
6803 158 : addRTEPermissionInfo(&query->rteperminfos, rte);
6804 :
6805 : /* Set up RTE/RelOptInfo arrays */
6806 158 : setup_simple_rel_arrays(root);
6807 :
6808 : /* Build RelOptInfo */
6809 158 : rel = build_simple_rel(root, 1, NULL);
6810 :
6811 : /* Locate IndexOptInfo for the target index */
6812 158 : indexInfo = NULL;
6813 196 : foreach(lc, rel->indexlist)
6814 : {
6815 196 : indexInfo = lfirst_node(IndexOptInfo, lc);
6816 196 : if (indexInfo->indexoid == indexOid)
6817 158 : break;
6818 : }
6819 :
6820 : /*
6821 : * It's possible that get_relation_info did not generate an IndexOptInfo
6822 : * for the desired index; this could happen if it's not yet reached its
6823 : * indcheckxmin usability horizon, or if it's a system index and we're
6824 : * ignoring system indexes. In such cases we should tell CLUSTER to not
6825 : * trust the index contents but use seqscan-and-sort.
6826 : */
6827 158 : if (lc == NULL) /* not in the list? */
6828 0 : return true; /* use sort */
6829 :
6830 : /*
6831 : * Rather than doing all the pushups that would be needed to use
6832 : * set_baserel_size_estimates, just do a quick hack for rows and width.
6833 : */
6834 158 : rel->rows = rel->tuples;
6835 158 : rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6836 :
6837 158 : root->total_table_pages = rel->pages;
6838 :
6839 : /*
6840 : * Determine eval cost of the index expressions, if any. We need to
6841 : * charge twice that amount for each tuple comparison that happens during
6842 : * the sort, since tuplesort.c will have to re-evaluate the index
6843 : * expressions each time. (XXX that's pretty inefficient...)
6844 : */
6845 158 : cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6846 158 : comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6847 :
6848 : /* Estimate the cost of seq scan + sort */
6849 158 : seqScanPath = create_seqscan_path(root, rel, NULL, 0);
6850 158 : cost_sort(&seqScanAndSortPath, root, NIL,
6851 : seqScanPath->disabled_nodes,
6852 158 : seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6853 : comparisonCost, maintenance_work_mem, -1.0);
6854 :
6855 : /* Estimate the cost of index scan */
6856 158 : indexScanPath = create_index_path(root, indexInfo,
6857 : NIL, NIL, NIL, NIL,
6858 : ForwardScanDirection, false,
6859 : NULL, 1.0, false);
6860 :
6861 158 : return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
6862 : }
6863 :
6864 : /*
6865 : * plan_create_index_workers
6866 : * Use the planner to decide how many parallel worker processes
6867 : * CREATE INDEX should request for use
6868 : *
6869 : * tableOid is the table on which the index is to be built. indexOid is the
6870 : * OID of an index to be created or reindexed (which must be an index with
6871 : * support for parallel builds - currently btree, GIN, or BRIN).
6872 : *
6873 : * Return value is the number of parallel worker processes to request. It
6874 : * may be unsafe to proceed if this is 0. Note that this does not include the
6875 : * leader participating as a worker (value is always a number of parallel
6876 : * worker processes).
6877 : *
6878 : * Note: caller had better already hold some type of lock on the table and
6879 : * index.
6880 : */
6881 : int
6882 36386 : plan_create_index_workers(Oid tableOid, Oid indexOid)
6883 : {
6884 : PlannerInfo *root;
6885 : Query *query;
6886 : PlannerGlobal *glob;
6887 : RangeTblEntry *rte;
6888 : Relation heap;
6889 : Relation index;
6890 : RelOptInfo *rel;
6891 : int parallel_workers;
6892 : BlockNumber heap_blocks;
6893 : double reltuples;
6894 : double allvisfrac;
6895 :
6896 : /*
6897 : * We don't allow performing parallel operation in standalone backend or
6898 : * when parallelism is disabled.
6899 : */
6900 36386 : if (!IsUnderPostmaster || max_parallel_maintenance_workers == 0)
6901 514 : return 0;
6902 :
6903 : /* Set up largely-dummy planner state */
6904 35872 : query = makeNode(Query);
6905 35872 : query->commandType = CMD_SELECT;
6906 :
6907 35872 : glob = makeNode(PlannerGlobal);
6908 :
6909 35872 : root = makeNode(PlannerInfo);
6910 35872 : root->parse = query;
6911 35872 : root->glob = glob;
6912 35872 : root->query_level = 1;
6913 35872 : root->planner_cxt = CurrentMemoryContext;
6914 35872 : root->wt_param_id = -1;
6915 35872 : root->join_domains = list_make1(makeNode(JoinDomain));
6916 :
6917 : /*
6918 : * Build a minimal RTE.
6919 : *
6920 : * Mark the RTE with inh = true. This is a kludge to prevent
6921 : * get_relation_info() from fetching index info, which is necessary
6922 : * because it does not expect that any IndexOptInfo is currently
6923 : * undergoing REINDEX.
6924 : */
6925 35872 : rte = makeNode(RangeTblEntry);
6926 35872 : rte->rtekind = RTE_RELATION;
6927 35872 : rte->relid = tableOid;
6928 35872 : rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6929 35872 : rte->rellockmode = AccessShareLock;
6930 35872 : rte->lateral = false;
6931 35872 : rte->inh = true;
6932 35872 : rte->inFromCl = true;
6933 35872 : query->rtable = list_make1(rte);
6934 35872 : addRTEPermissionInfo(&query->rteperminfos, rte);
6935 :
6936 : /* Set up RTE/RelOptInfo arrays */
6937 35872 : setup_simple_rel_arrays(root);
6938 :
6939 : /* Build RelOptInfo */
6940 35872 : rel = build_simple_rel(root, 1, NULL);
6941 :
6942 : /* Rels are assumed already locked by the caller */
6943 35872 : heap = table_open(tableOid, NoLock);
6944 35872 : index = index_open(indexOid, NoLock);
6945 :
6946 : /*
6947 : * Determine if it's safe to proceed.
6948 : *
6949 : * Currently, parallel workers can't access the leader's temporary tables.
6950 : * Furthermore, any index predicate or index expressions must be parallel
6951 : * safe.
6952 : */
6953 35872 : if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
6954 33838 : !is_parallel_safe(root, (Node *) RelationGetIndexExpressions(index)) ||
6955 33718 : !is_parallel_safe(root, (Node *) RelationGetIndexPredicate(index)))
6956 : {
6957 2154 : parallel_workers = 0;
6958 2154 : goto done;
6959 : }
6960 :
6961 : /*
6962 : * If parallel_workers storage parameter is set for the table, accept that
6963 : * as the number of parallel worker processes to launch (though still cap
6964 : * at max_parallel_maintenance_workers). Note that we deliberately do not
6965 : * consider any other factor when parallel_workers is set. (e.g., memory
6966 : * use by workers.)
6967 : */
6968 33718 : if (rel->rel_parallel_workers != -1)
6969 : {
6970 24 : parallel_workers = Min(rel->rel_parallel_workers,
6971 : max_parallel_maintenance_workers);
6972 24 : goto done;
6973 : }
6974 :
6975 : /*
6976 : * Estimate heap relation size ourselves, since rel->pages cannot be
6977 : * trusted (heap RTE was marked as inheritance parent)
6978 : */
6979 33694 : estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
6980 :
6981 : /*
6982 : * Determine number of workers to scan the heap relation using generic
6983 : * model
6984 : */
6985 33694 : parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
6986 : max_parallel_maintenance_workers);
6987 :
6988 : /*
6989 : * Cap workers based on available maintenance_work_mem as needed.
6990 : *
6991 : * Note that each tuplesort participant receives an even share of the
6992 : * total maintenance_work_mem budget. Aim to leave participants
6993 : * (including the leader as a participant) with no less than 32MB of
6994 : * memory. This leaves cases where maintenance_work_mem is set to 64MB
6995 : * immediately past the threshold of being capable of launching a single
6996 : * parallel worker to sort.
6997 : */
6998 33850 : while (parallel_workers > 0 &&
6999 314 : maintenance_work_mem / (parallel_workers + 1) < 32 * 1024)
7000 156 : parallel_workers--;
7001 :
7002 33694 : done:
7003 35872 : index_close(index, NoLock);
7004 35872 : table_close(heap, NoLock);
7005 :
7006 35872 : return parallel_workers;
7007 : }
7008 :
7009 : /*
7010 : * add_paths_to_grouping_rel
7011 : *
7012 : * Add non-partial paths to grouping relation.
7013 : */
7014 : static void
7015 39996 : add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
7016 : RelOptInfo *grouped_rel,
7017 : RelOptInfo *partially_grouped_rel,
7018 : const AggClauseCosts *agg_costs,
7019 : grouping_sets_data *gd, double dNumGroups,
7020 : GroupPathExtraData *extra)
7021 : {
7022 39996 : Query *parse = root->parse;
7023 39996 : Path *cheapest_path = input_rel->cheapest_total_path;
7024 : ListCell *lc;
7025 39996 : bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7026 39996 : bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7027 39996 : List *havingQual = (List *) extra->havingQual;
7028 39996 : AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7029 :
7030 39996 : if (can_sort)
7031 : {
7032 : /*
7033 : * Use any available suitably-sorted path as input, and also consider
7034 : * sorting the cheapest-total path and incremental sort on any paths
7035 : * with presorted keys.
7036 : */
7037 82656 : foreach(lc, input_rel->pathlist)
7038 : {
7039 : ListCell *lc2;
7040 42666 : Path *path = (Path *) lfirst(lc);
7041 42666 : Path *path_save = path;
7042 42666 : List *pathkey_orderings = NIL;
7043 :
7044 : /* generate alternative group orderings that might be useful */
7045 42666 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7046 :
7047 : Assert(list_length(pathkey_orderings) > 0);
7048 :
7049 85476 : foreach(lc2, pathkey_orderings)
7050 : {
7051 42810 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7052 :
7053 : /* restore the path (we replace it in the loop) */
7054 42810 : path = path_save;
7055 :
7056 42810 : path = make_ordered_path(root,
7057 : grouped_rel,
7058 : path,
7059 : cheapest_path,
7060 : info->pathkeys,
7061 : -1.0);
7062 42810 : if (path == NULL)
7063 368 : continue;
7064 :
7065 : /* Now decide what to stick atop it */
7066 42442 : if (parse->groupingSets)
7067 : {
7068 938 : consider_groupingsets_paths(root, grouped_rel,
7069 : path, true, can_hash,
7070 : gd, agg_costs, dNumGroups);
7071 : }
7072 41504 : else if (parse->hasAggs)
7073 : {
7074 : /*
7075 : * We have aggregation, possibly with plain GROUP BY. Make
7076 : * an AggPath.
7077 : */
7078 40732 : add_path(grouped_rel, (Path *)
7079 40732 : create_agg_path(root,
7080 : grouped_rel,
7081 : path,
7082 40732 : grouped_rel->reltarget,
7083 40732 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7084 : AGGSPLIT_SIMPLE,
7085 : info->clauses,
7086 : havingQual,
7087 : agg_costs,
7088 : dNumGroups));
7089 : }
7090 772 : else if (parse->groupClause)
7091 : {
7092 : /*
7093 : * We have GROUP BY without aggregation or grouping sets.
7094 : * Make a GroupPath.
7095 : */
7096 772 : add_path(grouped_rel, (Path *)
7097 772 : create_group_path(root,
7098 : grouped_rel,
7099 : path,
7100 : info->clauses,
7101 : havingQual,
7102 : dNumGroups));
7103 : }
7104 : else
7105 : {
7106 : /* Other cases should have been handled above */
7107 : Assert(false);
7108 : }
7109 : }
7110 : }
7111 :
7112 : /*
7113 : * Instead of operating directly on the input relation, we can
7114 : * consider finalizing a partially aggregated path.
7115 : */
7116 39990 : if (partially_grouped_rel != NULL)
7117 : {
7118 3994 : foreach(lc, partially_grouped_rel->pathlist)
7119 : {
7120 : ListCell *lc2;
7121 2412 : Path *path = (Path *) lfirst(lc);
7122 2412 : Path *path_save = path;
7123 2412 : List *pathkey_orderings = NIL;
7124 :
7125 : /* generate alternative group orderings that might be useful */
7126 2412 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7127 :
7128 : Assert(list_length(pathkey_orderings) > 0);
7129 :
7130 : /* process all potentially interesting grouping reorderings */
7131 4824 : foreach(lc2, pathkey_orderings)
7132 : {
7133 2412 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7134 :
7135 : /* restore the path (we replace it in the loop) */
7136 2412 : path = path_save;
7137 :
7138 2412 : path = make_ordered_path(root,
7139 : grouped_rel,
7140 : path,
7141 2412 : partially_grouped_rel->cheapest_total_path,
7142 : info->pathkeys,
7143 : -1.0);
7144 :
7145 2412 : if (path == NULL)
7146 108 : continue;
7147 :
7148 2304 : if (parse->hasAggs)
7149 2056 : add_path(grouped_rel, (Path *)
7150 2056 : create_agg_path(root,
7151 : grouped_rel,
7152 : path,
7153 2056 : grouped_rel->reltarget,
7154 2056 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7155 : AGGSPLIT_FINAL_DESERIAL,
7156 : info->clauses,
7157 : havingQual,
7158 : agg_final_costs,
7159 : dNumGroups));
7160 : else
7161 248 : add_path(grouped_rel, (Path *)
7162 248 : create_group_path(root,
7163 : grouped_rel,
7164 : path,
7165 : info->clauses,
7166 : havingQual,
7167 : dNumGroups));
7168 :
7169 : }
7170 : }
7171 : }
7172 : }
7173 :
7174 39996 : if (can_hash)
7175 : {
7176 4982 : if (parse->groupingSets)
7177 : {
7178 : /*
7179 : * Try for a hash-only groupingsets path over unsorted input.
7180 : */
7181 794 : consider_groupingsets_paths(root, grouped_rel,
7182 : cheapest_path, false, true,
7183 : gd, agg_costs, dNumGroups);
7184 : }
7185 : else
7186 : {
7187 : /*
7188 : * Generate a HashAgg Path. We just need an Agg over the
7189 : * cheapest-total input path, since input order won't matter.
7190 : */
7191 4188 : add_path(grouped_rel, (Path *)
7192 4188 : create_agg_path(root, grouped_rel,
7193 : cheapest_path,
7194 4188 : grouped_rel->reltarget,
7195 : AGG_HASHED,
7196 : AGGSPLIT_SIMPLE,
7197 : root->processed_groupClause,
7198 : havingQual,
7199 : agg_costs,
7200 : dNumGroups));
7201 : }
7202 :
7203 : /*
7204 : * Generate a Finalize HashAgg Path atop of the cheapest partially
7205 : * grouped path, assuming there is one
7206 : */
7207 4982 : if (partially_grouped_rel && partially_grouped_rel->pathlist)
7208 : {
7209 784 : Path *path = partially_grouped_rel->cheapest_total_path;
7210 :
7211 784 : add_path(grouped_rel, (Path *)
7212 784 : create_agg_path(root,
7213 : grouped_rel,
7214 : path,
7215 784 : grouped_rel->reltarget,
7216 : AGG_HASHED,
7217 : AGGSPLIT_FINAL_DESERIAL,
7218 : root->processed_groupClause,
7219 : havingQual,
7220 : agg_final_costs,
7221 : dNumGroups));
7222 : }
7223 : }
7224 :
7225 : /*
7226 : * When partitionwise aggregate is used, we might have fully aggregated
7227 : * paths in the partial pathlist, because add_paths_to_append_rel() will
7228 : * consider a path for grouped_rel consisting of a Parallel Append of
7229 : * non-partial paths from each child.
7230 : */
7231 39996 : if (grouped_rel->partial_pathlist != NIL)
7232 162 : gather_grouping_paths(root, grouped_rel);
7233 39996 : }
7234 :
7235 : /*
7236 : * create_partial_grouping_paths
7237 : *
7238 : * Create a new upper relation representing the result of partial aggregation
7239 : * and populate it with appropriate paths. Note that we don't finalize the
7240 : * lists of paths here, so the caller can add additional partial or non-partial
7241 : * paths and must afterward call gather_grouping_paths and set_cheapest on
7242 : * the returned upper relation.
7243 : *
7244 : * All paths for this new upper relation -- both partial and non-partial --
7245 : * have been partially aggregated but require a subsequent FinalizeAggregate
7246 : * step.
7247 : *
7248 : * NB: This function is allowed to return NULL if it determines that there is
7249 : * no real need to create a new RelOptInfo.
7250 : */
7251 : static RelOptInfo *
7252 35584 : create_partial_grouping_paths(PlannerInfo *root,
7253 : RelOptInfo *grouped_rel,
7254 : RelOptInfo *input_rel,
7255 : grouping_sets_data *gd,
7256 : GroupPathExtraData *extra,
7257 : bool force_rel_creation)
7258 : {
7259 35584 : Query *parse = root->parse;
7260 : RelOptInfo *partially_grouped_rel;
7261 35584 : AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7262 35584 : AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7263 35584 : Path *cheapest_partial_path = NULL;
7264 35584 : Path *cheapest_total_path = NULL;
7265 35584 : double dNumPartialGroups = 0;
7266 35584 : double dNumPartialPartialGroups = 0;
7267 : ListCell *lc;
7268 35584 : bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7269 35584 : bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7270 :
7271 : /*
7272 : * Consider whether we should generate partially aggregated non-partial
7273 : * paths. We can only do this if we have a non-partial path, and only if
7274 : * the parent of the input rel is performing partial partitionwise
7275 : * aggregation. (Note that extra->patype is the type of partitionwise
7276 : * aggregation being used at the parent level, not this level.)
7277 : */
7278 35584 : if (input_rel->pathlist != NIL &&
7279 35584 : extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
7280 618 : cheapest_total_path = input_rel->cheapest_total_path;
7281 :
7282 : /*
7283 : * If parallelism is possible for grouped_rel, then we should consider
7284 : * generating partially-grouped partial paths. However, if the input rel
7285 : * has no partial paths, then we can't.
7286 : */
7287 35584 : if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7288 1784 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
7289 :
7290 : /*
7291 : * If we can't partially aggregate partial paths, and we can't partially
7292 : * aggregate non-partial paths, then don't bother creating the new
7293 : * RelOptInfo at all, unless the caller specified force_rel_creation.
7294 : */
7295 35584 : if (cheapest_total_path == NULL &&
7296 33482 : cheapest_partial_path == NULL &&
7297 33482 : !force_rel_creation)
7298 33384 : return NULL;
7299 :
7300 : /*
7301 : * Build a new upper relation to represent the result of partially
7302 : * aggregating the rows from the input relation.
7303 : */
7304 2200 : partially_grouped_rel = fetch_upper_rel(root,
7305 : UPPERREL_PARTIAL_GROUP_AGG,
7306 : grouped_rel->relids);
7307 2200 : partially_grouped_rel->consider_parallel =
7308 2200 : grouped_rel->consider_parallel;
7309 2200 : partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7310 2200 : partially_grouped_rel->serverid = grouped_rel->serverid;
7311 2200 : partially_grouped_rel->userid = grouped_rel->userid;
7312 2200 : partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7313 2200 : partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7314 :
7315 : /*
7316 : * Build target list for partial aggregate paths. These paths cannot just
7317 : * emit the same tlist as regular aggregate paths, because (1) we must
7318 : * include Vars and Aggrefs needed in HAVING, which might not appear in
7319 : * the result tlist, and (2) the Aggrefs must be set in partial mode.
7320 : */
7321 2200 : partially_grouped_rel->reltarget =
7322 2200 : make_partial_grouping_target(root, grouped_rel->reltarget,
7323 : extra->havingQual);
7324 :
7325 2200 : if (!extra->partial_costs_set)
7326 : {
7327 : /*
7328 : * Collect statistics about aggregates for estimating costs of
7329 : * performing aggregation in parallel.
7330 : */
7331 7764 : MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7332 7764 : MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7333 1294 : if (parse->hasAggs)
7334 : {
7335 : /* partial phase */
7336 1160 : get_agg_clause_costs(root, AGGSPLIT_INITIAL_SERIAL,
7337 : agg_partial_costs);
7338 :
7339 : /* final phase */
7340 1160 : get_agg_clause_costs(root, AGGSPLIT_FINAL_DESERIAL,
7341 : agg_final_costs);
7342 : }
7343 :
7344 1294 : extra->partial_costs_set = true;
7345 : }
7346 :
7347 : /* Estimate number of partial groups. */
7348 2200 : if (cheapest_total_path != NULL)
7349 : dNumPartialGroups =
7350 618 : get_number_of_groups(root,
7351 : cheapest_total_path->rows,
7352 : gd,
7353 : extra->targetList);
7354 2200 : if (cheapest_partial_path != NULL)
7355 : dNumPartialPartialGroups =
7356 1784 : get_number_of_groups(root,
7357 : cheapest_partial_path->rows,
7358 : gd,
7359 : extra->targetList);
7360 :
7361 2200 : if (can_sort && cheapest_total_path != NULL)
7362 : {
7363 : /* This should have been checked previously */
7364 : Assert(parse->hasAggs || parse->groupClause);
7365 :
7366 : /*
7367 : * Use any available suitably-sorted path as input, and also consider
7368 : * sorting the cheapest partial path.
7369 : */
7370 1236 : foreach(lc, input_rel->pathlist)
7371 : {
7372 : ListCell *lc2;
7373 618 : Path *path = (Path *) lfirst(lc);
7374 618 : Path *path_save = path;
7375 618 : List *pathkey_orderings = NIL;
7376 :
7377 : /* generate alternative group orderings that might be useful */
7378 618 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7379 :
7380 : Assert(list_length(pathkey_orderings) > 0);
7381 :
7382 : /* process all potentially interesting grouping reorderings */
7383 1236 : foreach(lc2, pathkey_orderings)
7384 : {
7385 618 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7386 :
7387 : /* restore the path (we replace it in the loop) */
7388 618 : path = path_save;
7389 :
7390 618 : path = make_ordered_path(root,
7391 : partially_grouped_rel,
7392 : path,
7393 : cheapest_total_path,
7394 : info->pathkeys,
7395 : -1.0);
7396 :
7397 618 : if (path == NULL)
7398 0 : continue;
7399 :
7400 618 : if (parse->hasAggs)
7401 546 : add_path(partially_grouped_rel, (Path *)
7402 546 : create_agg_path(root,
7403 : partially_grouped_rel,
7404 : path,
7405 546 : partially_grouped_rel->reltarget,
7406 546 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7407 : AGGSPLIT_INITIAL_SERIAL,
7408 : info->clauses,
7409 : NIL,
7410 : agg_partial_costs,
7411 : dNumPartialGroups));
7412 : else
7413 72 : add_path(partially_grouped_rel, (Path *)
7414 72 : create_group_path(root,
7415 : partially_grouped_rel,
7416 : path,
7417 : info->clauses,
7418 : NIL,
7419 : dNumPartialGroups));
7420 : }
7421 : }
7422 : }
7423 :
7424 2200 : if (can_sort && cheapest_partial_path != NULL)
7425 : {
7426 : /* Similar to above logic, but for partial paths. */
7427 3580 : foreach(lc, input_rel->partial_pathlist)
7428 : {
7429 : ListCell *lc2;
7430 1796 : Path *path = (Path *) lfirst(lc);
7431 1796 : Path *path_save = path;
7432 1796 : List *pathkey_orderings = NIL;
7433 :
7434 : /* generate alternative group orderings that might be useful */
7435 1796 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7436 :
7437 : Assert(list_length(pathkey_orderings) > 0);
7438 :
7439 : /* process all potentially interesting grouping reorderings */
7440 3592 : foreach(lc2, pathkey_orderings)
7441 : {
7442 1796 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7443 :
7444 :
7445 : /* restore the path (we replace it in the loop) */
7446 1796 : path = path_save;
7447 :
7448 1796 : path = make_ordered_path(root,
7449 : partially_grouped_rel,
7450 : path,
7451 : cheapest_partial_path,
7452 : info->pathkeys,
7453 : -1.0);
7454 :
7455 1796 : if (path == NULL)
7456 6 : continue;
7457 :
7458 1790 : if (parse->hasAggs)
7459 1668 : add_partial_path(partially_grouped_rel, (Path *)
7460 1668 : create_agg_path(root,
7461 : partially_grouped_rel,
7462 : path,
7463 1668 : partially_grouped_rel->reltarget,
7464 1668 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7465 : AGGSPLIT_INITIAL_SERIAL,
7466 : info->clauses,
7467 : NIL,
7468 : agg_partial_costs,
7469 : dNumPartialPartialGroups));
7470 : else
7471 122 : add_partial_path(partially_grouped_rel, (Path *)
7472 122 : create_group_path(root,
7473 : partially_grouped_rel,
7474 : path,
7475 : info->clauses,
7476 : NIL,
7477 : dNumPartialPartialGroups));
7478 : }
7479 : }
7480 : }
7481 :
7482 : /*
7483 : * Add a partially-grouped HashAgg Path where possible
7484 : */
7485 2200 : if (can_hash && cheapest_total_path != NULL)
7486 : {
7487 : /* Checked above */
7488 : Assert(parse->hasAggs || parse->groupClause);
7489 :
7490 618 : add_path(partially_grouped_rel, (Path *)
7491 618 : create_agg_path(root,
7492 : partially_grouped_rel,
7493 : cheapest_total_path,
7494 618 : partially_grouped_rel->reltarget,
7495 : AGG_HASHED,
7496 : AGGSPLIT_INITIAL_SERIAL,
7497 : root->processed_groupClause,
7498 : NIL,
7499 : agg_partial_costs,
7500 : dNumPartialGroups));
7501 : }
7502 :
7503 : /*
7504 : * Now add a partially-grouped HashAgg partial Path where possible
7505 : */
7506 2200 : if (can_hash && cheapest_partial_path != NULL)
7507 : {
7508 986 : add_partial_path(partially_grouped_rel, (Path *)
7509 986 : create_agg_path(root,
7510 : partially_grouped_rel,
7511 : cheapest_partial_path,
7512 986 : partially_grouped_rel->reltarget,
7513 : AGG_HASHED,
7514 : AGGSPLIT_INITIAL_SERIAL,
7515 : root->processed_groupClause,
7516 : NIL,
7517 : agg_partial_costs,
7518 : dNumPartialPartialGroups));
7519 : }
7520 :
7521 : /*
7522 : * If there is an FDW that's responsible for all baserels of the query,
7523 : * let it consider adding partially grouped ForeignPaths.
7524 : */
7525 2200 : if (partially_grouped_rel->fdwroutine &&
7526 6 : partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7527 : {
7528 6 : FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7529 :
7530 6 : fdwroutine->GetForeignUpperPaths(root,
7531 : UPPERREL_PARTIAL_GROUP_AGG,
7532 : input_rel, partially_grouped_rel,
7533 : extra);
7534 : }
7535 :
7536 2200 : return partially_grouped_rel;
7537 : }
7538 :
7539 : /*
7540 : * make_ordered_path
7541 : * Return a path ordered by 'pathkeys' based on the given 'path'. May
7542 : * return NULL if it doesn't make sense to generate an ordered path in
7543 : * this case.
7544 : */
7545 : static Path *
7546 52696 : make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path,
7547 : Path *cheapest_path, List *pathkeys, double limit_tuples)
7548 : {
7549 : bool is_sorted;
7550 : int presorted_keys;
7551 :
7552 52696 : is_sorted = pathkeys_count_contained_in(pathkeys,
7553 : path->pathkeys,
7554 : &presorted_keys);
7555 :
7556 52696 : if (!is_sorted)
7557 : {
7558 : /*
7559 : * Try at least sorting the cheapest path and also try incrementally
7560 : * sorting any path which is partially sorted already (no need to deal
7561 : * with paths which have presorted keys when incremental sort is
7562 : * disabled unless it's the cheapest input path).
7563 : */
7564 12946 : if (path != cheapest_path &&
7565 2052 : (presorted_keys == 0 || !enable_incremental_sort))
7566 1044 : return NULL;
7567 :
7568 : /*
7569 : * We've no need to consider both a sort and incremental sort. We'll
7570 : * just do a sort if there are no presorted keys and an incremental
7571 : * sort when there are presorted keys.
7572 : */
7573 11902 : if (presorted_keys == 0 || !enable_incremental_sort)
7574 10726 : path = (Path *) create_sort_path(root,
7575 : rel,
7576 : path,
7577 : pathkeys,
7578 : limit_tuples);
7579 : else
7580 1176 : path = (Path *) create_incremental_sort_path(root,
7581 : rel,
7582 : path,
7583 : pathkeys,
7584 : presorted_keys,
7585 : limit_tuples);
7586 : }
7587 :
7588 51652 : return path;
7589 : }
7590 :
7591 : /*
7592 : * Generate Gather and Gather Merge paths for a grouping relation or partial
7593 : * grouping relation.
7594 : *
7595 : * generate_useful_gather_paths does most of the work, but we also consider a
7596 : * special case: we could try sorting the data by the group_pathkeys and then
7597 : * applying Gather Merge.
7598 : *
7599 : * NB: This function shouldn't be used for anything other than a grouped or
7600 : * partially grouped relation not only because of the fact that it explicitly
7601 : * references group_pathkeys but we pass "true" as the third argument to
7602 : * generate_useful_gather_paths().
7603 : */
7604 : static void
7605 1646 : gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
7606 : {
7607 : ListCell *lc;
7608 : Path *cheapest_partial_path;
7609 : List *groupby_pathkeys;
7610 :
7611 : /*
7612 : * This occurs after any partial aggregation has taken place, so trim off
7613 : * any pathkeys added for ORDER BY / DISTINCT aggregates.
7614 : */
7615 1646 : if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7616 18 : groupby_pathkeys = list_copy_head(root->group_pathkeys,
7617 : root->num_groupby_pathkeys);
7618 : else
7619 1628 : groupby_pathkeys = root->group_pathkeys;
7620 :
7621 : /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7622 1646 : generate_useful_gather_paths(root, rel, true);
7623 :
7624 1646 : cheapest_partial_path = linitial(rel->partial_pathlist);
7625 :
7626 : /* XXX Shouldn't this also consider the group-key-reordering? */
7627 3898 : foreach(lc, rel->partial_pathlist)
7628 : {
7629 2252 : Path *path = (Path *) lfirst(lc);
7630 : bool is_sorted;
7631 : int presorted_keys;
7632 : double total_groups;
7633 :
7634 2252 : is_sorted = pathkeys_count_contained_in(groupby_pathkeys,
7635 : path->pathkeys,
7636 : &presorted_keys);
7637 :
7638 2252 : if (is_sorted)
7639 1472 : continue;
7640 :
7641 : /*
7642 : * Try at least sorting the cheapest path and also try incrementally
7643 : * sorting any path which is partially sorted already (no need to deal
7644 : * with paths which have presorted keys when incremental sort is
7645 : * disabled unless it's the cheapest input path).
7646 : */
7647 780 : if (path != cheapest_partial_path &&
7648 0 : (presorted_keys == 0 || !enable_incremental_sort))
7649 0 : continue;
7650 :
7651 : /*
7652 : * We've no need to consider both a sort and incremental sort. We'll
7653 : * just do a sort if there are no presorted keys and an incremental
7654 : * sort when there are presorted keys.
7655 : */
7656 780 : if (presorted_keys == 0 || !enable_incremental_sort)
7657 780 : path = (Path *) create_sort_path(root, rel, path,
7658 : groupby_pathkeys,
7659 : -1.0);
7660 : else
7661 0 : path = (Path *) create_incremental_sort_path(root,
7662 : rel,
7663 : path,
7664 : groupby_pathkeys,
7665 : presorted_keys,
7666 : -1.0);
7667 780 : total_groups = compute_gather_rows(path);
7668 : path = (Path *)
7669 780 : create_gather_merge_path(root,
7670 : rel,
7671 : path,
7672 780 : rel->reltarget,
7673 : groupby_pathkeys,
7674 : NULL,
7675 : &total_groups);
7676 :
7677 780 : add_path(rel, path);
7678 : }
7679 1646 : }
7680 :
7681 : /*
7682 : * can_partial_agg
7683 : *
7684 : * Determines whether or not partial grouping and/or aggregation is possible.
7685 : * Returns true when possible, false otherwise.
7686 : */
7687 : static bool
7688 39120 : can_partial_agg(PlannerInfo *root)
7689 : {
7690 39120 : Query *parse = root->parse;
7691 :
7692 39120 : if (!parse->hasAggs && parse->groupClause == NIL)
7693 : {
7694 : /*
7695 : * We don't know how to do parallel aggregation unless we have either
7696 : * some aggregates or a grouping clause.
7697 : */
7698 0 : return false;
7699 : }
7700 39120 : else if (parse->groupingSets)
7701 : {
7702 : /* We don't know how to do grouping sets in parallel. */
7703 872 : return false;
7704 : }
7705 38248 : else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7706 : {
7707 : /* Insufficient support for partial mode. */
7708 4086 : return false;
7709 : }
7710 :
7711 : /* Everything looks good. */
7712 34162 : return true;
7713 : }
7714 :
7715 : /*
7716 : * apply_scanjoin_target_to_paths
7717 : *
7718 : * Adjust the final scan/join relation, and recursively all of its children,
7719 : * to generate the final scan/join target. It would be more correct to model
7720 : * this as a separate planning step with a new RelOptInfo at the toplevel and
7721 : * for each child relation, but doing it this way is noticeably cheaper.
7722 : * Maybe that problem can be solved at some point, but for now we do this.
7723 : *
7724 : * If tlist_same_exprs is true, then the scan/join target to be applied has
7725 : * the same expressions as the existing reltarget, so we need only insert the
7726 : * appropriate sortgroupref information. By avoiding the creation of
7727 : * projection paths we save effort both immediately and at plan creation time.
7728 : */
7729 : static void
7730 540030 : apply_scanjoin_target_to_paths(PlannerInfo *root,
7731 : RelOptInfo *rel,
7732 : List *scanjoin_targets,
7733 : List *scanjoin_targets_contain_srfs,
7734 : bool scanjoin_target_parallel_safe,
7735 : bool tlist_same_exprs)
7736 : {
7737 540030 : bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7738 : PathTarget *scanjoin_target;
7739 : ListCell *lc;
7740 :
7741 : /* This recurses, so be paranoid. */
7742 540030 : check_stack_depth();
7743 :
7744 : /*
7745 : * If the rel is partitioned, we want to drop its existing paths and
7746 : * generate new ones. This function would still be correct if we kept the
7747 : * existing paths: we'd modify them to generate the correct target above
7748 : * the partitioning Append, and then they'd compete on cost with paths
7749 : * generating the target below the Append. However, in our current cost
7750 : * model the latter way is always the same or cheaper cost, so modifying
7751 : * the existing paths would just be useless work. Moreover, when the cost
7752 : * is the same, varying roundoff errors might sometimes allow an existing
7753 : * path to be picked, resulting in undesirable cross-platform plan
7754 : * variations. So we drop old paths and thereby force the work to be done
7755 : * below the Append, except in the case of a non-parallel-safe target.
7756 : *
7757 : * Some care is needed, because we have to allow
7758 : * generate_useful_gather_paths to see the old partial paths in the next
7759 : * stanza. Hence, zap the main pathlist here, then allow
7760 : * generate_useful_gather_paths to add path(s) to the main list, and
7761 : * finally zap the partial pathlist.
7762 : */
7763 540030 : if (rel_is_partitioned)
7764 12568 : rel->pathlist = NIL;
7765 :
7766 : /*
7767 : * If the scan/join target is not parallel-safe, partial paths cannot
7768 : * generate it.
7769 : */
7770 540030 : if (!scanjoin_target_parallel_safe)
7771 : {
7772 : /*
7773 : * Since we can't generate the final scan/join target in parallel
7774 : * workers, this is our last opportunity to use any partial paths that
7775 : * exist; so build Gather path(s) that use them and emit whatever the
7776 : * current reltarget is. We don't do this in the case where the
7777 : * target is parallel-safe, since we will be able to generate superior
7778 : * paths by doing it after the final scan/join target has been
7779 : * applied.
7780 : */
7781 80412 : generate_useful_gather_paths(root, rel, false);
7782 :
7783 : /* Can't use parallel query above this level. */
7784 80412 : rel->partial_pathlist = NIL;
7785 80412 : rel->consider_parallel = false;
7786 : }
7787 :
7788 : /* Finish dropping old paths for a partitioned rel, per comment above */
7789 540030 : if (rel_is_partitioned)
7790 12568 : rel->partial_pathlist = NIL;
7791 :
7792 : /* Extract SRF-free scan/join target. */
7793 540030 : scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7794 :
7795 : /*
7796 : * Apply the SRF-free scan/join target to each existing path.
7797 : *
7798 : * If the tlist exprs are the same, we can just inject the sortgroupref
7799 : * information into the existing pathtargets. Otherwise, replace each
7800 : * path with a projection path that generates the SRF-free scan/join
7801 : * target. This can't change the ordering of paths within rel->pathlist,
7802 : * so we just modify the list in place.
7803 : */
7804 1121152 : foreach(lc, rel->pathlist)
7805 : {
7806 581122 : Path *subpath = (Path *) lfirst(lc);
7807 :
7808 : /* Shouldn't have any parameterized paths anymore */
7809 : Assert(subpath->param_info == NULL);
7810 :
7811 581122 : if (tlist_same_exprs)
7812 206806 : subpath->pathtarget->sortgrouprefs =
7813 206806 : scanjoin_target->sortgrouprefs;
7814 : else
7815 : {
7816 : Path *newpath;
7817 :
7818 374316 : newpath = (Path *) create_projection_path(root, rel, subpath,
7819 : scanjoin_target);
7820 374316 : lfirst(lc) = newpath;
7821 : }
7822 : }
7823 :
7824 : /* Likewise adjust the targets for any partial paths. */
7825 559660 : foreach(lc, rel->partial_pathlist)
7826 : {
7827 19630 : Path *subpath = (Path *) lfirst(lc);
7828 :
7829 : /* Shouldn't have any parameterized paths anymore */
7830 : Assert(subpath->param_info == NULL);
7831 :
7832 19630 : if (tlist_same_exprs)
7833 15990 : subpath->pathtarget->sortgrouprefs =
7834 15990 : scanjoin_target->sortgrouprefs;
7835 : else
7836 : {
7837 : Path *newpath;
7838 :
7839 3640 : newpath = (Path *) create_projection_path(root, rel, subpath,
7840 : scanjoin_target);
7841 3640 : lfirst(lc) = newpath;
7842 : }
7843 : }
7844 :
7845 : /*
7846 : * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
7847 : * atop each existing path. (Note that this function doesn't look at the
7848 : * cheapest-path fields, which is a good thing because they're bogus right
7849 : * now.)
7850 : */
7851 540030 : if (root->parse->hasTargetSRFs)
7852 12048 : adjust_paths_for_srfs(root, rel,
7853 : scanjoin_targets,
7854 : scanjoin_targets_contain_srfs);
7855 :
7856 : /*
7857 : * Update the rel's target to be the final (with SRFs) scan/join target.
7858 : * This now matches the actual output of all the paths, and we might get
7859 : * confused in createplan.c if they don't agree. We must do this now so
7860 : * that any append paths made in the next part will use the correct
7861 : * pathtarget (cf. create_append_path).
7862 : *
7863 : * Note that this is also necessary if GetForeignUpperPaths() gets called
7864 : * on the final scan/join relation or on any of its children, since the
7865 : * FDW might look at the rel's target to create ForeignPaths.
7866 : */
7867 540030 : rel->reltarget = llast_node(PathTarget, scanjoin_targets);
7868 :
7869 : /*
7870 : * If the relation is partitioned, recursively apply the scan/join target
7871 : * to all partitions, and generate brand-new Append paths in which the
7872 : * scan/join target is computed below the Append rather than above it.
7873 : * Since Append is not projection-capable, that might save a separate
7874 : * Result node, and it also is important for partitionwise aggregate.
7875 : */
7876 540030 : if (rel_is_partitioned)
7877 : {
7878 12568 : List *live_children = NIL;
7879 : int i;
7880 :
7881 : /* Adjust each partition. */
7882 12568 : i = -1;
7883 35504 : while ((i = bms_next_member(rel->live_parts, i)) >= 0)
7884 : {
7885 22936 : RelOptInfo *child_rel = rel->part_rels[i];
7886 : AppendRelInfo **appinfos;
7887 : int nappinfos;
7888 22936 : List *child_scanjoin_targets = NIL;
7889 :
7890 : Assert(child_rel != NULL);
7891 :
7892 : /* Dummy children can be ignored. */
7893 22936 : if (IS_DUMMY_REL(child_rel))
7894 42 : continue;
7895 :
7896 : /* Translate scan/join targets for this child. */
7897 22894 : appinfos = find_appinfos_by_relids(root, child_rel->relids,
7898 : &nappinfos);
7899 45788 : foreach(lc, scanjoin_targets)
7900 : {
7901 22894 : PathTarget *target = lfirst_node(PathTarget, lc);
7902 :
7903 22894 : target = copy_pathtarget(target);
7904 22894 : target->exprs = (List *)
7905 22894 : adjust_appendrel_attrs(root,
7906 22894 : (Node *) target->exprs,
7907 : nappinfos, appinfos);
7908 22894 : child_scanjoin_targets = lappend(child_scanjoin_targets,
7909 : target);
7910 : }
7911 22894 : pfree(appinfos);
7912 :
7913 : /* Recursion does the real work. */
7914 22894 : apply_scanjoin_target_to_paths(root, child_rel,
7915 : child_scanjoin_targets,
7916 : scanjoin_targets_contain_srfs,
7917 : scanjoin_target_parallel_safe,
7918 : tlist_same_exprs);
7919 :
7920 : /* Save non-dummy children for Append paths. */
7921 22894 : if (!IS_DUMMY_REL(child_rel))
7922 22894 : live_children = lappend(live_children, child_rel);
7923 : }
7924 :
7925 : /* Build new paths for this relation by appending child paths. */
7926 12568 : add_paths_to_append_rel(root, rel, live_children);
7927 : }
7928 :
7929 : /*
7930 : * Consider generating Gather or Gather Merge paths. We must only do this
7931 : * if the relation is parallel safe, and we don't do it for child rels to
7932 : * avoid creating multiple Gather nodes within the same plan. We must do
7933 : * this after all paths have been generated and before set_cheapest, since
7934 : * one of the generated paths may turn out to be the cheapest one.
7935 : */
7936 540030 : if (rel->consider_parallel && !IS_OTHER_REL(rel))
7937 176058 : generate_useful_gather_paths(root, rel, false);
7938 :
7939 : /*
7940 : * Reassess which paths are the cheapest, now that we've potentially added
7941 : * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
7942 : * this relation.
7943 : */
7944 540030 : set_cheapest(rel);
7945 540030 : }
7946 :
7947 : /*
7948 : * create_partitionwise_grouping_paths
7949 : *
7950 : * If the partition keys of input relation are part of the GROUP BY clause, all
7951 : * the rows belonging to a given group come from a single partition. This
7952 : * allows aggregation/grouping over a partitioned relation to be broken down
7953 : * into aggregation/grouping on each partition. This should be no worse, and
7954 : * often better, than the normal approach.
7955 : *
7956 : * However, if the GROUP BY clause does not contain all the partition keys,
7957 : * rows from a given group may be spread across multiple partitions. In that
7958 : * case, we perform partial aggregation for each group, append the results,
7959 : * and then finalize aggregation. This is less certain to win than the
7960 : * previous case. It may win if the PartialAggregate stage greatly reduces
7961 : * the number of groups, because fewer rows will pass through the Append node.
7962 : * It may lose if we have lots of small groups.
7963 : */
7964 : static void
7965 562 : create_partitionwise_grouping_paths(PlannerInfo *root,
7966 : RelOptInfo *input_rel,
7967 : RelOptInfo *grouped_rel,
7968 : RelOptInfo *partially_grouped_rel,
7969 : const AggClauseCosts *agg_costs,
7970 : grouping_sets_data *gd,
7971 : PartitionwiseAggregateType patype,
7972 : GroupPathExtraData *extra)
7973 : {
7974 562 : List *grouped_live_children = NIL;
7975 562 : List *partially_grouped_live_children = NIL;
7976 562 : PathTarget *target = grouped_rel->reltarget;
7977 562 : bool partial_grouping_valid = true;
7978 : int i;
7979 :
7980 : Assert(patype != PARTITIONWISE_AGGREGATE_NONE);
7981 : Assert(patype != PARTITIONWISE_AGGREGATE_PARTIAL ||
7982 : partially_grouped_rel != NULL);
7983 :
7984 : /* Add paths for partitionwise aggregation/grouping. */
7985 562 : i = -1;
7986 2056 : while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
7987 : {
7988 1494 : RelOptInfo *child_input_rel = input_rel->part_rels[i];
7989 : PathTarget *child_target;
7990 : AppendRelInfo **appinfos;
7991 : int nappinfos;
7992 : GroupPathExtraData child_extra;
7993 : RelOptInfo *child_grouped_rel;
7994 : RelOptInfo *child_partially_grouped_rel;
7995 :
7996 : Assert(child_input_rel != NULL);
7997 :
7998 : /* Dummy children can be ignored. */
7999 1494 : if (IS_DUMMY_REL(child_input_rel))
8000 0 : continue;
8001 :
8002 1494 : child_target = copy_pathtarget(target);
8003 :
8004 : /*
8005 : * Copy the given "extra" structure as is and then override the
8006 : * members specific to this child.
8007 : */
8008 1494 : memcpy(&child_extra, extra, sizeof(child_extra));
8009 :
8010 1494 : appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
8011 : &nappinfos);
8012 :
8013 1494 : child_target->exprs = (List *)
8014 1494 : adjust_appendrel_attrs(root,
8015 1494 : (Node *) target->exprs,
8016 : nappinfos, appinfos);
8017 :
8018 : /* Translate havingQual and targetList. */
8019 1494 : child_extra.havingQual = (Node *)
8020 : adjust_appendrel_attrs(root,
8021 : extra->havingQual,
8022 : nappinfos, appinfos);
8023 1494 : child_extra.targetList = (List *)
8024 1494 : adjust_appendrel_attrs(root,
8025 1494 : (Node *) extra->targetList,
8026 : nappinfos, appinfos);
8027 :
8028 : /*
8029 : * extra->patype was the value computed for our parent rel; patype is
8030 : * the value for this relation. For the child, our value is its
8031 : * parent rel's value.
8032 : */
8033 1494 : child_extra.patype = patype;
8034 :
8035 : /*
8036 : * Create grouping relation to hold fully aggregated grouping and/or
8037 : * aggregation paths for the child.
8038 : */
8039 1494 : child_grouped_rel = make_grouping_rel(root, child_input_rel,
8040 : child_target,
8041 1494 : extra->target_parallel_safe,
8042 : child_extra.havingQual);
8043 :
8044 : /* Create grouping paths for this child relation. */
8045 1494 : create_ordinary_grouping_paths(root, child_input_rel,
8046 : child_grouped_rel,
8047 : agg_costs, gd, &child_extra,
8048 : &child_partially_grouped_rel);
8049 :
8050 1494 : if (child_partially_grouped_rel)
8051 : {
8052 : partially_grouped_live_children =
8053 906 : lappend(partially_grouped_live_children,
8054 : child_partially_grouped_rel);
8055 : }
8056 : else
8057 588 : partial_grouping_valid = false;
8058 :
8059 1494 : if (patype == PARTITIONWISE_AGGREGATE_FULL)
8060 : {
8061 876 : set_cheapest(child_grouped_rel);
8062 876 : grouped_live_children = lappend(grouped_live_children,
8063 : child_grouped_rel);
8064 : }
8065 :
8066 1494 : pfree(appinfos);
8067 : }
8068 :
8069 : /*
8070 : * Try to create append paths for partially grouped children. For full
8071 : * partitionwise aggregation, we might have paths in the partial_pathlist
8072 : * if parallel aggregation is possible. For partial partitionwise
8073 : * aggregation, we may have paths in both pathlist and partial_pathlist.
8074 : *
8075 : * NB: We must have a partially grouped path for every child in order to
8076 : * generate a partially grouped path for this relation.
8077 : */
8078 562 : if (partially_grouped_rel && partial_grouping_valid)
8079 : {
8080 : Assert(partially_grouped_live_children != NIL);
8081 :
8082 350 : add_paths_to_append_rel(root, partially_grouped_rel,
8083 : partially_grouped_live_children);
8084 :
8085 : /*
8086 : * We need call set_cheapest, since the finalization step will use the
8087 : * cheapest path from the rel.
8088 : */
8089 350 : if (partially_grouped_rel->pathlist)
8090 350 : set_cheapest(partially_grouped_rel);
8091 : }
8092 :
8093 : /* If possible, create append paths for fully grouped children. */
8094 562 : if (patype == PARTITIONWISE_AGGREGATE_FULL)
8095 : {
8096 : Assert(grouped_live_children != NIL);
8097 :
8098 320 : add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
8099 : }
8100 562 : }
8101 :
8102 : /*
8103 : * group_by_has_partkey
8104 : *
8105 : * Returns true if all the partition keys of the given relation are part of
8106 : * the GROUP BY clauses, including having matching collation, false otherwise.
8107 : */
8108 : static bool
8109 556 : group_by_has_partkey(RelOptInfo *input_rel,
8110 : List *targetList,
8111 : List *groupClause)
8112 : {
8113 556 : List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8114 556 : int cnt = 0;
8115 : int partnatts;
8116 :
8117 : /* Input relation should be partitioned. */
8118 : Assert(input_rel->part_scheme);
8119 :
8120 : /* Rule out early, if there are no partition keys present. */
8121 556 : if (!input_rel->partexprs)
8122 0 : return false;
8123 :
8124 556 : partnatts = input_rel->part_scheme->partnatts;
8125 :
8126 912 : for (cnt = 0; cnt < partnatts; cnt++)
8127 : {
8128 592 : List *partexprs = input_rel->partexprs[cnt];
8129 : ListCell *lc;
8130 592 : bool found = false;
8131 :
8132 810 : foreach(lc, partexprs)
8133 : {
8134 : ListCell *lg;
8135 586 : Expr *partexpr = lfirst(lc);
8136 586 : Oid partcoll = input_rel->part_scheme->partcollation[cnt];
8137 :
8138 924 : foreach(lg, groupexprs)
8139 : {
8140 706 : Expr *groupexpr = lfirst(lg);
8141 706 : Oid groupcoll = exprCollation((Node *) groupexpr);
8142 :
8143 : /*
8144 : * Note: we can assume there is at most one RelabelType node;
8145 : * eval_const_expressions() will have simplified if more than
8146 : * one.
8147 : */
8148 706 : if (IsA(groupexpr, RelabelType))
8149 24 : groupexpr = ((RelabelType *) groupexpr)->arg;
8150 :
8151 706 : if (equal(groupexpr, partexpr))
8152 : {
8153 : /*
8154 : * Reject a match if the grouping collation does not match
8155 : * the partitioning collation.
8156 : */
8157 368 : if (OidIsValid(partcoll) && OidIsValid(groupcoll) &&
8158 : partcoll != groupcoll)
8159 12 : return false;
8160 :
8161 356 : found = true;
8162 356 : break;
8163 : }
8164 : }
8165 :
8166 574 : if (found)
8167 356 : break;
8168 : }
8169 :
8170 : /*
8171 : * If none of the partition key expressions match with any of the
8172 : * GROUP BY expression, return false.
8173 : */
8174 580 : if (!found)
8175 224 : return false;
8176 : }
8177 :
8178 320 : return true;
8179 : }
8180 :
8181 : /*
8182 : * generate_setop_child_grouplist
8183 : * Build a SortGroupClause list defining the sort/grouping properties
8184 : * of the child of a set operation.
8185 : *
8186 : * This is similar to generate_setop_grouplist() but differs as the setop
8187 : * child query's targetlist entries may already have a tleSortGroupRef
8188 : * assigned for other purposes, such as GROUP BYs. Here we keep the
8189 : * SortGroupClause list in the same order as 'op' groupClauses and just adjust
8190 : * the tleSortGroupRef to reference the TargetEntry's 'ressortgroupref'. If
8191 : * any of the columns in the targetlist don't match to the setop's colTypes
8192 : * then we return an empty list. This may leave some TLEs with unreferenced
8193 : * ressortgroupref markings, but that's harmless.
8194 : */
8195 : static List *
8196 12278 : generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist)
8197 : {
8198 12278 : List *grouplist = copyObject(op->groupClauses);
8199 : ListCell *lg;
8200 : ListCell *lt;
8201 : ListCell *ct;
8202 :
8203 12278 : lg = list_head(grouplist);
8204 12278 : ct = list_head(op->colTypes);
8205 47742 : foreach(lt, targetlist)
8206 : {
8207 35878 : TargetEntry *tle = (TargetEntry *) lfirst(lt);
8208 : SortGroupClause *sgc;
8209 : Oid coltype;
8210 :
8211 : /* resjunk columns could have sortgrouprefs. Leave these alone */
8212 35878 : if (tle->resjunk)
8213 0 : continue;
8214 :
8215 : /*
8216 : * We expect every non-resjunk target to have a SortGroupClause and
8217 : * colTypes.
8218 : */
8219 : Assert(lg != NULL);
8220 : Assert(ct != NULL);
8221 35878 : sgc = (SortGroupClause *) lfirst(lg);
8222 35878 : coltype = lfirst_oid(ct);
8223 :
8224 : /* reject if target type isn't the same as the setop target type */
8225 35878 : if (coltype != exprType((Node *) tle->expr))
8226 414 : return NIL;
8227 :
8228 35464 : lg = lnext(grouplist, lg);
8229 35464 : ct = lnext(op->colTypes, ct);
8230 :
8231 : /* assign a tleSortGroupRef, or reuse the existing one */
8232 35464 : sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8233 : }
8234 :
8235 : Assert(lg == NULL);
8236 : Assert(ct == NULL);
8237 :
8238 11864 : return grouplist;
8239 : }
|