Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * planner.c
4 : * The query optimizer external interface.
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/optimizer/plan/planner.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 :
16 : #include "postgres.h"
17 :
18 : #include <limits.h>
19 : #include <math.h>
20 :
21 : #include "access/genam.h"
22 : #include "access/parallel.h"
23 : #include "access/sysattr.h"
24 : #include "access/table.h"
25 : #include "catalog/pg_aggregate.h"
26 : #include "catalog/pg_inherits.h"
27 : #include "catalog/pg_proc.h"
28 : #include "catalog/pg_type.h"
29 : #include "executor/executor.h"
30 : #include "foreign/fdwapi.h"
31 : #include "jit/jit.h"
32 : #include "lib/bipartite_match.h"
33 : #include "lib/knapsack.h"
34 : #include "miscadmin.h"
35 : #include "nodes/makefuncs.h"
36 : #include "nodes/nodeFuncs.h"
37 : #ifdef OPTIMIZER_DEBUG
38 : #include "nodes/print.h"
39 : #endif
40 : #include "nodes/supportnodes.h"
41 : #include "optimizer/appendinfo.h"
42 : #include "optimizer/clauses.h"
43 : #include "optimizer/cost.h"
44 : #include "optimizer/optimizer.h"
45 : #include "optimizer/paramassign.h"
46 : #include "optimizer/pathnode.h"
47 : #include "optimizer/paths.h"
48 : #include "optimizer/plancat.h"
49 : #include "optimizer/planmain.h"
50 : #include "optimizer/planner.h"
51 : #include "optimizer/prep.h"
52 : #include "optimizer/subselect.h"
53 : #include "optimizer/tlist.h"
54 : #include "parser/analyze.h"
55 : #include "parser/parse_agg.h"
56 : #include "parser/parse_clause.h"
57 : #include "parser/parse_relation.h"
58 : #include "parser/parsetree.h"
59 : #include "partitioning/partdesc.h"
60 : #include "rewrite/rewriteManip.h"
61 : #include "utils/backend_status.h"
62 : #include "utils/lsyscache.h"
63 : #include "utils/rel.h"
64 : #include "utils/selfuncs.h"
65 :
66 : /* GUC parameters */
67 : double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
68 : int debug_parallel_query = DEBUG_PARALLEL_OFF;
69 : bool parallel_leader_participation = true;
70 : bool enable_distinct_reordering = true;
71 :
72 : /* Hook for plugins to get control in planner() */
73 : planner_hook_type planner_hook = NULL;
74 :
75 : /* Hook for plugins to get control when grouping_planner() plans upper rels */
76 : create_upper_paths_hook_type create_upper_paths_hook = NULL;
77 :
78 :
79 : /* Expression kind codes for preprocess_expression */
80 : #define EXPRKIND_QUAL 0
81 : #define EXPRKIND_TARGET 1
82 : #define EXPRKIND_RTFUNC 2
83 : #define EXPRKIND_RTFUNC_LATERAL 3
84 : #define EXPRKIND_VALUES 4
85 : #define EXPRKIND_VALUES_LATERAL 5
86 : #define EXPRKIND_LIMIT 6
87 : #define EXPRKIND_APPINFO 7
88 : #define EXPRKIND_PHV 8
89 : #define EXPRKIND_TABLESAMPLE 9
90 : #define EXPRKIND_ARBITER_ELEM 10
91 : #define EXPRKIND_TABLEFUNC 11
92 : #define EXPRKIND_TABLEFUNC_LATERAL 12
93 : #define EXPRKIND_GROUPEXPR 13
94 :
95 : /*
96 : * Data specific to grouping sets
97 : */
98 : typedef struct
99 : {
100 : List *rollups;
101 : List *hash_sets_idx;
102 : double dNumHashGroups;
103 : bool any_hashable;
104 : Bitmapset *unsortable_refs;
105 : Bitmapset *unhashable_refs;
106 : List *unsortable_sets;
107 : int *tleref_to_colnum_map;
108 : } grouping_sets_data;
109 :
110 : /*
111 : * Temporary structure for use during WindowClause reordering in order to be
112 : * able to sort WindowClauses on partitioning/ordering prefix.
113 : */
114 : typedef struct
115 : {
116 : WindowClause *wc;
117 : List *uniqueOrder; /* A List of unique ordering/partitioning
118 : * clauses per Window */
119 : } WindowClauseSortData;
120 :
121 : /* Passthrough data for standard_qp_callback */
122 : typedef struct
123 : {
124 : List *activeWindows; /* active windows, if any */
125 : grouping_sets_data *gset_data; /* grouping sets data, if any */
126 : SetOperationStmt *setop; /* parent set operation or NULL if not a
127 : * subquery belonging to a set operation */
128 : } standard_qp_extra;
129 :
130 : /* Local functions */
131 : static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
132 : static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
133 : static void grouping_planner(PlannerInfo *root, double tuple_fraction,
134 : SetOperationStmt *setops);
135 : static grouping_sets_data *preprocess_grouping_sets(PlannerInfo *root);
136 : static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
137 : int *tleref_to_colnum_map);
138 : static void preprocess_rowmarks(PlannerInfo *root);
139 : static double preprocess_limit(PlannerInfo *root,
140 : double tuple_fraction,
141 : int64 *offset_est, int64 *count_est);
142 : static List *preprocess_groupclause(PlannerInfo *root, List *force);
143 : static List *extract_rollup_sets(List *groupingSets);
144 : static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
145 : static void standard_qp_callback(PlannerInfo *root, void *extra);
146 : static double get_number_of_groups(PlannerInfo *root,
147 : double path_rows,
148 : grouping_sets_data *gd,
149 : List *target_list);
150 : static RelOptInfo *create_grouping_paths(PlannerInfo *root,
151 : RelOptInfo *input_rel,
152 : PathTarget *target,
153 : bool target_parallel_safe,
154 : grouping_sets_data *gd);
155 : static bool is_degenerate_grouping(PlannerInfo *root);
156 : static void create_degenerate_grouping_paths(PlannerInfo *root,
157 : RelOptInfo *input_rel,
158 : RelOptInfo *grouped_rel);
159 : static RelOptInfo *make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
160 : PathTarget *target, bool target_parallel_safe,
161 : Node *havingQual);
162 : static void create_ordinary_grouping_paths(PlannerInfo *root,
163 : RelOptInfo *input_rel,
164 : RelOptInfo *grouped_rel,
165 : const AggClauseCosts *agg_costs,
166 : grouping_sets_data *gd,
167 : GroupPathExtraData *extra,
168 : RelOptInfo **partially_grouped_rel_p);
169 : static void consider_groupingsets_paths(PlannerInfo *root,
170 : RelOptInfo *grouped_rel,
171 : Path *path,
172 : bool is_sorted,
173 : bool can_hash,
174 : grouping_sets_data *gd,
175 : const AggClauseCosts *agg_costs,
176 : double dNumGroups);
177 : static RelOptInfo *create_window_paths(PlannerInfo *root,
178 : RelOptInfo *input_rel,
179 : PathTarget *input_target,
180 : PathTarget *output_target,
181 : bool output_target_parallel_safe,
182 : WindowFuncLists *wflists,
183 : List *activeWindows);
184 : static void create_one_window_path(PlannerInfo *root,
185 : RelOptInfo *window_rel,
186 : Path *path,
187 : PathTarget *input_target,
188 : PathTarget *output_target,
189 : WindowFuncLists *wflists,
190 : List *activeWindows);
191 : static RelOptInfo *create_distinct_paths(PlannerInfo *root,
192 : RelOptInfo *input_rel,
193 : PathTarget *target);
194 : static void create_partial_distinct_paths(PlannerInfo *root,
195 : RelOptInfo *input_rel,
196 : RelOptInfo *final_distinct_rel,
197 : PathTarget *target);
198 : static RelOptInfo *create_final_distinct_paths(PlannerInfo *root,
199 : RelOptInfo *input_rel,
200 : RelOptInfo *distinct_rel);
201 : static List *get_useful_pathkeys_for_distinct(PlannerInfo *root,
202 : List *needed_pathkeys,
203 : List *path_pathkeys);
204 : static RelOptInfo *create_ordered_paths(PlannerInfo *root,
205 : RelOptInfo *input_rel,
206 : PathTarget *target,
207 : bool target_parallel_safe,
208 : double limit_tuples);
209 : static PathTarget *make_group_input_target(PlannerInfo *root,
210 : PathTarget *final_target);
211 : static PathTarget *make_partial_grouping_target(PlannerInfo *root,
212 : PathTarget *grouping_target,
213 : Node *havingQual);
214 : static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
215 : static void optimize_window_clauses(PlannerInfo *root,
216 : WindowFuncLists *wflists);
217 : static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
218 : static void name_active_windows(List *activeWindows);
219 : static PathTarget *make_window_input_target(PlannerInfo *root,
220 : PathTarget *final_target,
221 : List *activeWindows);
222 : static List *make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
223 : List *tlist);
224 : static PathTarget *make_sort_input_target(PlannerInfo *root,
225 : PathTarget *final_target,
226 : bool *have_postponed_srfs);
227 : static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
228 : List *targets, List *targets_contain_srfs);
229 : static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
230 : RelOptInfo *grouped_rel,
231 : RelOptInfo *partially_grouped_rel,
232 : const AggClauseCosts *agg_costs,
233 : grouping_sets_data *gd,
234 : double dNumGroups,
235 : GroupPathExtraData *extra);
236 : static RelOptInfo *create_partial_grouping_paths(PlannerInfo *root,
237 : RelOptInfo *grouped_rel,
238 : RelOptInfo *input_rel,
239 : grouping_sets_data *gd,
240 : GroupPathExtraData *extra,
241 : bool force_rel_creation);
242 : static Path *make_ordered_path(PlannerInfo *root,
243 : RelOptInfo *rel,
244 : Path *path,
245 : Path *cheapest_path,
246 : List *pathkeys,
247 : double limit_tuples);
248 : static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel);
249 : static bool can_partial_agg(PlannerInfo *root);
250 : static void apply_scanjoin_target_to_paths(PlannerInfo *root,
251 : RelOptInfo *rel,
252 : List *scanjoin_targets,
253 : List *scanjoin_targets_contain_srfs,
254 : bool scanjoin_target_parallel_safe,
255 : bool tlist_same_exprs);
256 : static void create_partitionwise_grouping_paths(PlannerInfo *root,
257 : RelOptInfo *input_rel,
258 : RelOptInfo *grouped_rel,
259 : RelOptInfo *partially_grouped_rel,
260 : const AggClauseCosts *agg_costs,
261 : grouping_sets_data *gd,
262 : PartitionwiseAggregateType patype,
263 : GroupPathExtraData *extra);
264 : static bool group_by_has_partkey(RelOptInfo *input_rel,
265 : List *targetList,
266 : List *groupClause);
267 : static int common_prefix_cmp(const void *a, const void *b);
268 : static List *generate_setop_child_grouplist(SetOperationStmt *op,
269 : List *targetlist);
270 :
271 :
272 : /*****************************************************************************
273 : *
274 : * Query optimizer entry point
275 : *
276 : * To support loadable plugins that monitor or modify planner behavior,
277 : * we provide a hook variable that lets a plugin get control before and
278 : * after the standard planning process. The plugin would normally call
279 : * standard_planner().
280 : *
281 : * Note to plugin authors: standard_planner() scribbles on its Query input,
282 : * so you'd better copy that data structure if you want to plan more than once.
283 : *
284 : *****************************************************************************/
285 : PlannedStmt *
286 474572 : planner(Query *parse, const char *query_string, int cursorOptions,
287 : ParamListInfo boundParams)
288 : {
289 : PlannedStmt *result;
290 :
291 474572 : if (planner_hook)
292 94108 : result = (*planner_hook) (parse, query_string, cursorOptions, boundParams);
293 : else
294 380464 : result = standard_planner(parse, query_string, cursorOptions, boundParams);
295 :
296 470346 : pgstat_report_plan_id(result->planId, false);
297 :
298 470346 : return result;
299 : }
300 :
301 : PlannedStmt *
302 474572 : standard_planner(Query *parse, const char *query_string, int cursorOptions,
303 : ParamListInfo boundParams)
304 : {
305 : PlannedStmt *result;
306 : PlannerGlobal *glob;
307 : double tuple_fraction;
308 : PlannerInfo *root;
309 : RelOptInfo *final_rel;
310 : Path *best_path;
311 : Plan *top_plan;
312 : ListCell *lp,
313 : *lr;
314 :
315 : /*
316 : * Set up global state for this planner invocation. This data is needed
317 : * across all levels of sub-Query that might exist in the given command,
318 : * so we keep it in a separate struct that's linked to by each per-Query
319 : * PlannerInfo.
320 : */
321 474572 : glob = makeNode(PlannerGlobal);
322 :
323 474572 : glob->boundParams = boundParams;
324 474572 : glob->subplans = NIL;
325 474572 : glob->subpaths = NIL;
326 474572 : glob->subroots = NIL;
327 474572 : glob->rewindPlanIDs = NULL;
328 474572 : glob->finalrtable = NIL;
329 474572 : glob->finalrteperminfos = NIL;
330 474572 : glob->finalrowmarks = NIL;
331 474572 : glob->resultRelations = NIL;
332 474572 : glob->appendRelations = NIL;
333 474572 : glob->relationOids = NIL;
334 474572 : glob->invalItems = NIL;
335 474572 : glob->paramExecTypes = NIL;
336 474572 : glob->lastPHId = 0;
337 474572 : glob->lastRowMarkId = 0;
338 474572 : glob->lastPlanNodeId = 0;
339 474572 : glob->transientPlan = false;
340 474572 : glob->dependsOnRole = false;
341 :
342 : /*
343 : * Assess whether it's feasible to use parallel mode for this query. We
344 : * can't do this in a standalone backend, or if the command will try to
345 : * modify any data, or if this is a cursor operation, or if GUCs are set
346 : * to values that don't permit parallelism, or if parallel-unsafe
347 : * functions are present in the query tree.
348 : *
349 : * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
350 : * MATERIALIZED VIEW to use parallel plans, but this is safe only because
351 : * the command is writing into a completely new table which workers won't
352 : * be able to see. If the workers could see the table, the fact that
353 : * group locking would cause them to ignore the leader's heavyweight GIN
354 : * page locks would make this unsafe. We'll have to fix that somehow if
355 : * we want to allow parallel inserts in general; updates and deletes have
356 : * additional problems especially around combo CIDs.)
357 : *
358 : * For now, we don't try to use parallel mode if we're running inside a
359 : * parallel worker. We might eventually be able to relax this
360 : * restriction, but for now it seems best not to have parallel workers
361 : * trying to create their own parallel workers.
362 : */
363 474572 : if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
364 446836 : IsUnderPostmaster &&
365 446836 : parse->commandType == CMD_SELECT &&
366 360968 : !parse->hasModifyingCTE &&
367 360828 : max_parallel_workers_per_gather > 0 &&
368 360224 : !IsParallelWorker())
369 : {
370 : /* all the cheap tests pass, so scan the query tree */
371 360148 : glob->maxParallelHazard = max_parallel_hazard(parse);
372 360148 : glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE);
373 : }
374 : else
375 : {
376 : /* skip the query tree scan, just assume it's unsafe */
377 114424 : glob->maxParallelHazard = PROPARALLEL_UNSAFE;
378 114424 : glob->parallelModeOK = false;
379 : }
380 :
381 : /*
382 : * glob->parallelModeNeeded is normally set to false here and changed to
383 : * true during plan creation if a Gather or Gather Merge plan is actually
384 : * created (cf. create_gather_plan, create_gather_merge_plan).
385 : *
386 : * However, if debug_parallel_query = on or debug_parallel_query =
387 : * regress, then we impose parallel mode whenever it's safe to do so, even
388 : * if the final plan doesn't use parallelism. It's not safe to do so if
389 : * the query contains anything parallel-unsafe; parallelModeOK will be
390 : * false in that case. Note that parallelModeOK can't change after this
391 : * point. Otherwise, everything in the query is either parallel-safe or
392 : * parallel-restricted, and in either case it should be OK to impose
393 : * parallel-mode restrictions. If that ends up breaking something, then
394 : * either some function the user included in the query is incorrectly
395 : * labeled as parallel-safe or parallel-restricted when in reality it's
396 : * parallel-unsafe, or else the query planner itself has a bug.
397 : */
398 769976 : glob->parallelModeNeeded = glob->parallelModeOK &&
399 295404 : (debug_parallel_query != DEBUG_PARALLEL_OFF);
400 :
401 : /* Determine what fraction of the plan is likely to be scanned */
402 474572 : if (cursorOptions & CURSOR_OPT_FAST_PLAN)
403 : {
404 : /*
405 : * We have no real idea how many tuples the user will ultimately FETCH
406 : * from a cursor, but it is often the case that he doesn't want 'em
407 : * all, or would prefer a fast-start plan anyway so that he can
408 : * process some of the tuples sooner. Use a GUC parameter to decide
409 : * what fraction to optimize for.
410 : */
411 4644 : tuple_fraction = cursor_tuple_fraction;
412 :
413 : /*
414 : * We document cursor_tuple_fraction as simply being a fraction, which
415 : * means the edge cases 0 and 1 have to be treated specially here. We
416 : * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
417 : */
418 4644 : if (tuple_fraction >= 1.0)
419 0 : tuple_fraction = 0.0;
420 4644 : else if (tuple_fraction <= 0.0)
421 0 : tuple_fraction = 1e-10;
422 : }
423 : else
424 : {
425 : /* Default assumption is we need all the tuples */
426 469928 : tuple_fraction = 0.0;
427 : }
428 :
429 : /* primary planning entry point (may recurse for subqueries) */
430 474572 : root = subquery_planner(glob, parse, NULL, false, tuple_fraction, NULL);
431 :
432 : /* Select best Path and turn it into a Plan */
433 470742 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
434 470742 : best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
435 :
436 470742 : top_plan = create_plan(root, best_path);
437 :
438 : /*
439 : * If creating a plan for a scrollable cursor, make sure it can run
440 : * backwards on demand. Add a Material node at the top at need.
441 : */
442 470346 : if (cursorOptions & CURSOR_OPT_SCROLL)
443 : {
444 266 : if (!ExecSupportsBackwardScan(top_plan))
445 32 : top_plan = materialize_finished_plan(top_plan);
446 : }
447 :
448 : /*
449 : * Optionally add a Gather node for testing purposes, provided this is
450 : * actually a safe thing to do.
451 : *
452 : * We can add Gather even when top_plan has parallel-safe initPlans, but
453 : * then we have to move the initPlans to the Gather node because of
454 : * SS_finalize_plan's limitations. That would cause cosmetic breakage of
455 : * regression tests when debug_parallel_query = regress, because initPlans
456 : * that would normally appear on the top_plan move to the Gather, causing
457 : * them to disappear from EXPLAIN output. That doesn't seem worth kluging
458 : * EXPLAIN to hide, so skip it when debug_parallel_query = regress.
459 : */
460 470346 : if (debug_parallel_query != DEBUG_PARALLEL_OFF &&
461 192 : top_plan->parallel_safe &&
462 92 : (top_plan->initPlan == NIL ||
463 0 : debug_parallel_query != DEBUG_PARALLEL_REGRESS))
464 : {
465 92 : Gather *gather = makeNode(Gather);
466 : Cost initplan_cost;
467 : bool unsafe_initplans;
468 :
469 92 : gather->plan.targetlist = top_plan->targetlist;
470 92 : gather->plan.qual = NIL;
471 92 : gather->plan.lefttree = top_plan;
472 92 : gather->plan.righttree = NULL;
473 92 : gather->num_workers = 1;
474 92 : gather->single_copy = true;
475 92 : gather->invisible = (debug_parallel_query == DEBUG_PARALLEL_REGRESS);
476 :
477 : /* Transfer any initPlans to the new top node */
478 92 : gather->plan.initPlan = top_plan->initPlan;
479 92 : top_plan->initPlan = NIL;
480 :
481 : /*
482 : * Since this Gather has no parallel-aware descendants to signal to,
483 : * we don't need a rescan Param.
484 : */
485 92 : gather->rescan_param = -1;
486 :
487 : /*
488 : * Ideally we'd use cost_gather here, but setting up dummy path data
489 : * to satisfy it doesn't seem much cleaner than knowing what it does.
490 : */
491 92 : gather->plan.startup_cost = top_plan->startup_cost +
492 : parallel_setup_cost;
493 92 : gather->plan.total_cost = top_plan->total_cost +
494 92 : parallel_setup_cost + parallel_tuple_cost * top_plan->plan_rows;
495 92 : gather->plan.plan_rows = top_plan->plan_rows;
496 92 : gather->plan.plan_width = top_plan->plan_width;
497 92 : gather->plan.parallel_aware = false;
498 92 : gather->plan.parallel_safe = false;
499 :
500 : /*
501 : * Delete the initplans' cost from top_plan. We needn't add it to the
502 : * Gather node, since the above coding already included it there.
503 : */
504 92 : SS_compute_initplan_cost(gather->plan.initPlan,
505 : &initplan_cost, &unsafe_initplans);
506 92 : top_plan->startup_cost -= initplan_cost;
507 92 : top_plan->total_cost -= initplan_cost;
508 :
509 : /* use parallel mode for parallel plans. */
510 92 : root->glob->parallelModeNeeded = true;
511 :
512 92 : top_plan = &gather->plan;
513 : }
514 :
515 : /*
516 : * If any Params were generated, run through the plan tree and compute
517 : * each plan node's extParam/allParam sets. Ideally we'd merge this into
518 : * set_plan_references' tree traversal, but for now it has to be separate
519 : * because we need to visit subplans before not after main plan.
520 : */
521 470346 : if (glob->paramExecTypes != NIL)
522 : {
523 : Assert(list_length(glob->subplans) == list_length(glob->subroots));
524 198434 : forboth(lp, glob->subplans, lr, glob->subroots)
525 : {
526 42480 : Plan *subplan = (Plan *) lfirst(lp);
527 42480 : PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
528 :
529 42480 : SS_finalize_plan(subroot, subplan);
530 : }
531 155954 : SS_finalize_plan(root, top_plan);
532 : }
533 :
534 : /* final cleanup of the plan */
535 : Assert(glob->finalrtable == NIL);
536 : Assert(glob->finalrteperminfos == NIL);
537 : Assert(glob->finalrowmarks == NIL);
538 : Assert(glob->resultRelations == NIL);
539 : Assert(glob->appendRelations == NIL);
540 470346 : top_plan = set_plan_references(root, top_plan);
541 : /* ... and the subplans (both regular subplans and initplans) */
542 : Assert(list_length(glob->subplans) == list_length(glob->subroots));
543 512826 : forboth(lp, glob->subplans, lr, glob->subroots)
544 : {
545 42480 : Plan *subplan = (Plan *) lfirst(lp);
546 42480 : PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
547 :
548 42480 : lfirst(lp) = set_plan_references(subroot, subplan);
549 : }
550 :
551 : /* build the PlannedStmt result */
552 470346 : result = makeNode(PlannedStmt);
553 :
554 470346 : result->commandType = parse->commandType;
555 470346 : result->queryId = parse->queryId;
556 470346 : result->hasReturning = (parse->returningList != NIL);
557 470346 : result->hasModifyingCTE = parse->hasModifyingCTE;
558 470346 : result->canSetTag = parse->canSetTag;
559 470346 : result->transientPlan = glob->transientPlan;
560 470346 : result->dependsOnRole = glob->dependsOnRole;
561 470346 : result->parallelModeNeeded = glob->parallelModeNeeded;
562 470346 : result->planTree = top_plan;
563 470346 : result->partPruneInfos = glob->partPruneInfos;
564 470346 : result->rtable = glob->finalrtable;
565 940692 : result->unprunableRelids = bms_difference(glob->allRelids,
566 470346 : glob->prunableRelids);
567 470346 : result->permInfos = glob->finalrteperminfos;
568 470346 : result->resultRelations = glob->resultRelations;
569 470346 : result->firstResultRels = glob->firstResultRels;
570 470346 : result->appendRelations = glob->appendRelations;
571 470346 : result->subplans = glob->subplans;
572 470346 : result->rewindPlanIDs = glob->rewindPlanIDs;
573 470346 : result->rowMarks = glob->finalrowmarks;
574 470346 : result->relationOids = glob->relationOids;
575 470346 : result->invalItems = glob->invalItems;
576 470346 : result->paramExecTypes = glob->paramExecTypes;
577 : /* utilityStmt should be null, but we might as well copy it */
578 470346 : result->utilityStmt = parse->utilityStmt;
579 470346 : result->stmt_location = parse->stmt_location;
580 470346 : result->stmt_len = parse->stmt_len;
581 :
582 470346 : result->jitFlags = PGJIT_NONE;
583 470346 : if (jit_enabled && jit_above_cost >= 0 &&
584 469708 : top_plan->total_cost > jit_above_cost)
585 : {
586 934 : result->jitFlags |= PGJIT_PERFORM;
587 :
588 : /*
589 : * Decide how much effort should be put into generating better code.
590 : */
591 934 : if (jit_optimize_above_cost >= 0 &&
592 934 : top_plan->total_cost > jit_optimize_above_cost)
593 432 : result->jitFlags |= PGJIT_OPT3;
594 934 : if (jit_inline_above_cost >= 0 &&
595 934 : top_plan->total_cost > jit_inline_above_cost)
596 432 : result->jitFlags |= PGJIT_INLINE;
597 :
598 : /*
599 : * Decide which operations should be JITed.
600 : */
601 934 : if (jit_expressions)
602 934 : result->jitFlags |= PGJIT_EXPR;
603 934 : if (jit_tuple_deforming)
604 934 : result->jitFlags |= PGJIT_DEFORM;
605 : }
606 :
607 470346 : if (glob->partition_directory != NULL)
608 11420 : DestroyPartitionDirectory(glob->partition_directory);
609 :
610 470346 : return result;
611 : }
612 :
613 :
614 : /*--------------------
615 : * subquery_planner
616 : * Invokes the planner on a subquery. We recurse to here for each
617 : * sub-SELECT found in the query tree.
618 : *
619 : * glob is the global state for the current planner run.
620 : * parse is the querytree produced by the parser & rewriter.
621 : * parent_root is the immediate parent Query's info (NULL at the top level).
622 : * hasRecursion is true if this is a recursive WITH query.
623 : * tuple_fraction is the fraction of tuples we expect will be retrieved.
624 : * tuple_fraction is interpreted as explained for grouping_planner, below.
625 : * setops is used for set operation subqueries to provide the subquery with
626 : * the context in which it's being used so that Paths correctly sorted for the
627 : * set operation can be generated. NULL when not planning a set operation
628 : * child, or when a child of a set op that isn't interested in sorted input.
629 : *
630 : * Basically, this routine does the stuff that should only be done once
631 : * per Query object. It then calls grouping_planner. At one time,
632 : * grouping_planner could be invoked recursively on the same Query object;
633 : * that's not currently true, but we keep the separation between the two
634 : * routines anyway, in case we need it again someday.
635 : *
636 : * subquery_planner will be called recursively to handle sub-Query nodes
637 : * found within the query's expressions and rangetable.
638 : *
639 : * Returns the PlannerInfo struct ("root") that contains all data generated
640 : * while planning the subquery. In particular, the Path(s) attached to
641 : * the (UPPERREL_FINAL, NULL) upperrel represent our conclusions about the
642 : * cheapest way(s) to implement the query. The top level will select the
643 : * best Path and pass it through createplan.c to produce a finished Plan.
644 : *--------------------
645 : */
646 : PlannerInfo *
647 542046 : subquery_planner(PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root,
648 : bool hasRecursion, double tuple_fraction,
649 : SetOperationStmt *setops)
650 : {
651 : PlannerInfo *root;
652 : List *newWithCheckOptions;
653 : List *newHaving;
654 : bool hasOuterJoins;
655 : bool hasResultRTEs;
656 : RelOptInfo *final_rel;
657 : ListCell *l;
658 :
659 : /* Create a PlannerInfo data structure for this subquery */
660 542046 : root = makeNode(PlannerInfo);
661 542046 : root->parse = parse;
662 542046 : root->glob = glob;
663 542046 : root->query_level = parent_root ? parent_root->query_level + 1 : 1;
664 542046 : root->parent_root = parent_root;
665 542046 : root->plan_params = NIL;
666 542046 : root->outer_params = NULL;
667 542046 : root->planner_cxt = CurrentMemoryContext;
668 542046 : root->init_plans = NIL;
669 542046 : root->cte_plan_ids = NIL;
670 542046 : root->multiexpr_params = NIL;
671 542046 : root->join_domains = NIL;
672 542046 : root->eq_classes = NIL;
673 542046 : root->ec_merging_done = false;
674 542046 : root->last_rinfo_serial = 0;
675 542046 : root->all_result_relids =
676 542046 : parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
677 542046 : root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
678 542046 : root->append_rel_list = NIL;
679 542046 : root->row_identity_vars = NIL;
680 542046 : root->rowMarks = NIL;
681 542046 : memset(root->upper_rels, 0, sizeof(root->upper_rels));
682 542046 : memset(root->upper_targets, 0, sizeof(root->upper_targets));
683 542046 : root->processed_groupClause = NIL;
684 542046 : root->processed_distinctClause = NIL;
685 542046 : root->processed_tlist = NIL;
686 542046 : root->update_colnos = NIL;
687 542046 : root->grouping_map = NULL;
688 542046 : root->minmax_aggs = NIL;
689 542046 : root->qual_security_level = 0;
690 542046 : root->hasPseudoConstantQuals = false;
691 542046 : root->hasAlternativeSubPlans = false;
692 542046 : root->placeholdersFrozen = false;
693 542046 : root->hasRecursion = hasRecursion;
694 542046 : if (hasRecursion)
695 902 : root->wt_param_id = assign_special_exec_param(root);
696 : else
697 541144 : root->wt_param_id = -1;
698 542046 : root->non_recursive_path = NULL;
699 542046 : root->partColsUpdated = false;
700 :
701 : /*
702 : * Create the top-level join domain. This won't have valid contents until
703 : * deconstruct_jointree fills it in, but the node needs to exist before
704 : * that so we can build EquivalenceClasses referencing it.
705 : */
706 542046 : root->join_domains = list_make1(makeNode(JoinDomain));
707 :
708 : /*
709 : * If there is a WITH list, process each WITH query and either convert it
710 : * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
711 : */
712 542046 : if (parse->cteList)
713 2768 : SS_process_ctes(root);
714 :
715 : /*
716 : * If it's a MERGE command, transform the joinlist as appropriate.
717 : */
718 542040 : transform_MERGE_to_join(parse);
719 :
720 : /*
721 : * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
722 : * that we don't need so many special cases to deal with that situation.
723 : */
724 542040 : replace_empty_jointree(parse);
725 :
726 : /*
727 : * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
728 : * to transform them into joins. Note that this step does not descend
729 : * into subqueries; if we pull up any subqueries below, their SubLinks are
730 : * processed just before pulling them up.
731 : */
732 542040 : if (parse->hasSubLinks)
733 32762 : pull_up_sublinks(root);
734 :
735 : /*
736 : * Scan the rangetable for function RTEs, do const-simplification on them,
737 : * and then inline them if possible (producing subqueries that might get
738 : * pulled up next). Recursion issues here are handled in the same way as
739 : * for SubLinks.
740 : */
741 542040 : preprocess_function_rtes(root);
742 :
743 : /*
744 : * Scan the rangetable for relations with virtual generated columns, and
745 : * replace all Var nodes in the query that reference these columns with
746 : * the generation expressions. Recursion issues here are handled in the
747 : * same way as for SubLinks.
748 : */
749 542034 : parse = root->parse = expand_virtual_generated_columns(root);
750 :
751 : /*
752 : * Check to see if any subqueries in the jointree can be merged into this
753 : * query.
754 : */
755 542034 : pull_up_subqueries(root);
756 :
757 : /*
758 : * If this is a simple UNION ALL query, flatten it into an appendrel. We
759 : * do this now because it requires applying pull_up_subqueries to the leaf
760 : * queries of the UNION ALL, which weren't touched above because they
761 : * weren't referenced by the jointree (they will be after we do this).
762 : */
763 542028 : if (parse->setOperations)
764 6332 : flatten_simple_union_all(root);
765 :
766 : /*
767 : * Survey the rangetable to see what kinds of entries are present. We can
768 : * skip some later processing if relevant SQL features are not used; for
769 : * example if there are no JOIN RTEs we can avoid the expense of doing
770 : * flatten_join_alias_vars(). This must be done after we have finished
771 : * adding rangetable entries, of course. (Note: actually, processing of
772 : * inherited or partitioned rels can cause RTEs for their child tables to
773 : * get added later; but those must all be RTE_RELATION entries, so they
774 : * don't invalidate the conclusions drawn here.)
775 : */
776 542028 : root->hasJoinRTEs = false;
777 542028 : root->hasLateralRTEs = false;
778 542028 : root->group_rtindex = 0;
779 542028 : hasOuterJoins = false;
780 542028 : hasResultRTEs = false;
781 1442678 : foreach(l, parse->rtable)
782 : {
783 900650 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
784 :
785 900650 : switch (rte->rtekind)
786 : {
787 458844 : case RTE_RELATION:
788 458844 : if (rte->inh)
789 : {
790 : /*
791 : * Check to see if the relation actually has any children;
792 : * if not, clear the inh flag so we can treat it as a
793 : * plain base relation.
794 : *
795 : * Note: this could give a false-positive result, if the
796 : * rel once had children but no longer does. We used to
797 : * be able to clear rte->inh later on when we discovered
798 : * that, but no more; we have to handle such cases as
799 : * full-fledged inheritance.
800 : */
801 372714 : rte->inh = has_subclass(rte->relid);
802 : }
803 458844 : break;
804 85604 : case RTE_JOIN:
805 85604 : root->hasJoinRTEs = true;
806 85604 : if (IS_OUTER_JOIN(rte->jointype))
807 47292 : hasOuterJoins = true;
808 85604 : break;
809 225534 : case RTE_RESULT:
810 225534 : hasResultRTEs = true;
811 225534 : break;
812 4466 : case RTE_GROUP:
813 : Assert(parse->hasGroupRTE);
814 4466 : root->group_rtindex = list_cell_number(parse->rtable, l) + 1;
815 4466 : break;
816 126202 : default:
817 : /* No work here for other RTE types */
818 126202 : break;
819 : }
820 :
821 900650 : if (rte->lateral)
822 10750 : root->hasLateralRTEs = true;
823 :
824 : /*
825 : * We can also determine the maximum security level required for any
826 : * securityQuals now. Addition of inheritance-child RTEs won't affect
827 : * this, because child tables don't have their own securityQuals; see
828 : * expand_single_inheritance_child().
829 : */
830 900650 : if (rte->securityQuals)
831 2472 : root->qual_security_level = Max(root->qual_security_level,
832 : list_length(rte->securityQuals));
833 : }
834 :
835 : /*
836 : * If we have now verified that the query target relation is
837 : * non-inheriting, mark it as a leaf target.
838 : */
839 542028 : if (parse->resultRelation)
840 : {
841 92540 : RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
842 :
843 92540 : if (!rte->inh)
844 89748 : root->leaf_result_relids =
845 89748 : bms_make_singleton(parse->resultRelation);
846 : }
847 :
848 : /*
849 : * Preprocess RowMark information. We need to do this after subquery
850 : * pullup, so that all base relations are present.
851 : */
852 542028 : preprocess_rowmarks(root);
853 :
854 : /*
855 : * Set hasHavingQual to remember if HAVING clause is present. Needed
856 : * because preprocess_expression will reduce a constant-true condition to
857 : * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
858 : */
859 542028 : root->hasHavingQual = (parse->havingQual != NULL);
860 :
861 : /*
862 : * Do expression preprocessing on targetlist and quals, as well as other
863 : * random expressions in the querytree. Note that we do not need to
864 : * handle sort/group expressions explicitly, because they are actually
865 : * part of the targetlist.
866 : */
867 538276 : parse->targetList = (List *)
868 542028 : preprocess_expression(root, (Node *) parse->targetList,
869 : EXPRKIND_TARGET);
870 :
871 538276 : newWithCheckOptions = NIL;
872 540692 : foreach(l, parse->withCheckOptions)
873 : {
874 2416 : WithCheckOption *wco = lfirst_node(WithCheckOption, l);
875 :
876 2416 : wco->qual = preprocess_expression(root, wco->qual,
877 : EXPRKIND_QUAL);
878 2416 : if (wco->qual != NULL)
879 2016 : newWithCheckOptions = lappend(newWithCheckOptions, wco);
880 : }
881 538276 : parse->withCheckOptions = newWithCheckOptions;
882 :
883 538276 : parse->returningList = (List *)
884 538276 : preprocess_expression(root, (Node *) parse->returningList,
885 : EXPRKIND_TARGET);
886 :
887 538276 : preprocess_qual_conditions(root, (Node *) parse->jointree);
888 :
889 538276 : parse->havingQual = preprocess_expression(root, parse->havingQual,
890 : EXPRKIND_QUAL);
891 :
892 540894 : foreach(l, parse->windowClause)
893 : {
894 2618 : WindowClause *wc = lfirst_node(WindowClause, l);
895 :
896 : /* partitionClause/orderClause are sort/group expressions */
897 2618 : wc->startOffset = preprocess_expression(root, wc->startOffset,
898 : EXPRKIND_LIMIT);
899 2618 : wc->endOffset = preprocess_expression(root, wc->endOffset,
900 : EXPRKIND_LIMIT);
901 : }
902 :
903 538276 : parse->limitOffset = preprocess_expression(root, parse->limitOffset,
904 : EXPRKIND_LIMIT);
905 538276 : parse->limitCount = preprocess_expression(root, parse->limitCount,
906 : EXPRKIND_LIMIT);
907 :
908 538276 : if (parse->onConflict)
909 : {
910 3628 : parse->onConflict->arbiterElems = (List *)
911 1814 : preprocess_expression(root,
912 1814 : (Node *) parse->onConflict->arbiterElems,
913 : EXPRKIND_ARBITER_ELEM);
914 3628 : parse->onConflict->arbiterWhere =
915 1814 : preprocess_expression(root,
916 1814 : parse->onConflict->arbiterWhere,
917 : EXPRKIND_QUAL);
918 3628 : parse->onConflict->onConflictSet = (List *)
919 1814 : preprocess_expression(root,
920 1814 : (Node *) parse->onConflict->onConflictSet,
921 : EXPRKIND_TARGET);
922 1814 : parse->onConflict->onConflictWhere =
923 1814 : preprocess_expression(root,
924 1814 : parse->onConflict->onConflictWhere,
925 : EXPRKIND_QUAL);
926 : /* exclRelTlist contains only Vars, so no preprocessing needed */
927 : }
928 :
929 541054 : foreach(l, parse->mergeActionList)
930 : {
931 2778 : MergeAction *action = (MergeAction *) lfirst(l);
932 :
933 2778 : action->targetList = (List *)
934 2778 : preprocess_expression(root,
935 2778 : (Node *) action->targetList,
936 : EXPRKIND_TARGET);
937 2778 : action->qual =
938 2778 : preprocess_expression(root,
939 : (Node *) action->qual,
940 : EXPRKIND_QUAL);
941 : }
942 :
943 538276 : parse->mergeJoinCondition =
944 538276 : preprocess_expression(root, parse->mergeJoinCondition, EXPRKIND_QUAL);
945 :
946 538276 : root->append_rel_list = (List *)
947 538276 : preprocess_expression(root, (Node *) root->append_rel_list,
948 : EXPRKIND_APPINFO);
949 :
950 : /* Also need to preprocess expressions within RTEs */
951 1434896 : foreach(l, parse->rtable)
952 : {
953 896620 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
954 : int kind;
955 : ListCell *lcsq;
956 :
957 896620 : if (rte->rtekind == RTE_RELATION)
958 : {
959 458578 : if (rte->tablesample)
960 228 : rte->tablesample = (TableSampleClause *)
961 228 : preprocess_expression(root,
962 228 : (Node *) rte->tablesample,
963 : EXPRKIND_TABLESAMPLE);
964 : }
965 438042 : else if (rte->rtekind == RTE_SUBQUERY)
966 : {
967 : /*
968 : * We don't want to do all preprocessing yet on the subquery's
969 : * expressions, since that will happen when we plan it. But if it
970 : * contains any join aliases of our level, those have to get
971 : * expanded now, because planning of the subquery won't do it.
972 : * That's only possible if the subquery is LATERAL.
973 : */
974 59926 : if (rte->lateral && root->hasJoinRTEs)
975 1228 : rte->subquery = (Query *)
976 1228 : flatten_join_alias_vars(root, root->parse,
977 1228 : (Node *) rte->subquery);
978 : }
979 378116 : else if (rte->rtekind == RTE_FUNCTION)
980 : {
981 : /* Preprocess the function expression(s) fully */
982 52090 : kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
983 52090 : rte->functions = (List *)
984 52090 : preprocess_expression(root, (Node *) rte->functions, kind);
985 : }
986 326026 : else if (rte->rtekind == RTE_TABLEFUNC)
987 : {
988 : /* Preprocess the function expression(s) fully */
989 626 : kind = rte->lateral ? EXPRKIND_TABLEFUNC_LATERAL : EXPRKIND_TABLEFUNC;
990 626 : rte->tablefunc = (TableFunc *)
991 626 : preprocess_expression(root, (Node *) rte->tablefunc, kind);
992 : }
993 325400 : else if (rte->rtekind == RTE_VALUES)
994 : {
995 : /* Preprocess the values lists fully */
996 8172 : kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
997 8172 : rte->values_lists = (List *)
998 8172 : preprocess_expression(root, (Node *) rte->values_lists, kind);
999 : }
1000 317228 : else if (rte->rtekind == RTE_GROUP)
1001 : {
1002 : /* Preprocess the groupexprs list fully */
1003 4466 : rte->groupexprs = (List *)
1004 4466 : preprocess_expression(root, (Node *) rte->groupexprs,
1005 : EXPRKIND_GROUPEXPR);
1006 : }
1007 :
1008 : /*
1009 : * Process each element of the securityQuals list as if it were a
1010 : * separate qual expression (as indeed it is). We need to do it this
1011 : * way to get proper canonicalization of AND/OR structure. Note that
1012 : * this converts each element into an implicit-AND sublist.
1013 : */
1014 899450 : foreach(lcsq, rte->securityQuals)
1015 : {
1016 2830 : lfirst(lcsq) = preprocess_expression(root,
1017 2830 : (Node *) lfirst(lcsq),
1018 : EXPRKIND_QUAL);
1019 : }
1020 : }
1021 :
1022 : /*
1023 : * Now that we are done preprocessing expressions, and in particular done
1024 : * flattening join alias variables, get rid of the joinaliasvars lists.
1025 : * They no longer match what expressions in the rest of the tree look
1026 : * like, because we have not preprocessed expressions in those lists (and
1027 : * do not want to; for example, expanding a SubLink there would result in
1028 : * a useless unreferenced subplan). Leaving them in place simply creates
1029 : * a hazard for later scans of the tree. We could try to prevent that by
1030 : * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
1031 : * but that doesn't sound very reliable.
1032 : */
1033 538276 : if (root->hasJoinRTEs)
1034 : {
1035 294600 : foreach(l, parse->rtable)
1036 : {
1037 243198 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
1038 :
1039 243198 : rte->joinaliasvars = NIL;
1040 : }
1041 : }
1042 :
1043 : /*
1044 : * Replace any Vars in the subquery's targetlist and havingQual that
1045 : * reference GROUP outputs with the underlying grouping expressions.
1046 : *
1047 : * Note that we need to perform this replacement after we've preprocessed
1048 : * the grouping expressions. This is to ensure that there is only one
1049 : * instance of SubPlan for each SubLink contained within the grouping
1050 : * expressions.
1051 : */
1052 538276 : if (parse->hasGroupRTE)
1053 : {
1054 4466 : parse->targetList = (List *)
1055 4466 : flatten_group_exprs(root, root->parse, (Node *) parse->targetList);
1056 4466 : parse->havingQual =
1057 4466 : flatten_group_exprs(root, root->parse, parse->havingQual);
1058 : }
1059 :
1060 : /* Constant-folding might have removed all set-returning functions */
1061 538276 : if (parse->hasTargetSRFs)
1062 9066 : parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
1063 :
1064 : /*
1065 : * In some cases we may want to transfer a HAVING clause into WHERE. We
1066 : * cannot do so if the HAVING clause contains aggregates (obviously) or
1067 : * volatile functions (since a HAVING clause is supposed to be executed
1068 : * only once per group). We also can't do this if there are any nonempty
1069 : * grouping sets and the clause references any columns that are nullable
1070 : * by the grouping sets; moving such a clause into WHERE would potentially
1071 : * change the results. (If there are only empty grouping sets, then the
1072 : * HAVING clause must be degenerate as discussed below.)
1073 : *
1074 : * Also, it may be that the clause is so expensive to execute that we're
1075 : * better off doing it only once per group, despite the loss of
1076 : * selectivity. This is hard to estimate short of doing the entire
1077 : * planning process twice, so we use a heuristic: clauses containing
1078 : * subplans are left in HAVING. Otherwise, we move or copy the HAVING
1079 : * clause into WHERE, in hopes of eliminating tuples before aggregation
1080 : * instead of after.
1081 : *
1082 : * If the query has explicit grouping then we can simply move such a
1083 : * clause into WHERE; any group that fails the clause will not be in the
1084 : * output because none of its tuples will reach the grouping or
1085 : * aggregation stage. Otherwise we must have a degenerate (variable-free)
1086 : * HAVING clause, which we put in WHERE so that query_planner() can use it
1087 : * in a gating Result node, but also keep in HAVING to ensure that we
1088 : * don't emit a bogus aggregated row. (This could be done better, but it
1089 : * seems not worth optimizing.)
1090 : *
1091 : * Note that a HAVING clause may contain expressions that are not fully
1092 : * preprocessed. This can happen if these expressions are part of
1093 : * grouping items. In such cases, they are replaced with GROUP Vars in
1094 : * the parser and then replaced back after we've done with expression
1095 : * preprocessing on havingQual. This is not an issue if the clause
1096 : * remains in HAVING, because these expressions will be matched to lower
1097 : * target items in setrefs.c. However, if the clause is moved or copied
1098 : * into WHERE, we need to ensure that these expressions are fully
1099 : * preprocessed.
1100 : *
1101 : * Note that both havingQual and parse->jointree->quals are in
1102 : * implicitly-ANDed-list form at this point, even though they are declared
1103 : * as Node *.
1104 : */
1105 538276 : newHaving = NIL;
1106 539432 : foreach(l, (List *) parse->havingQual)
1107 : {
1108 1156 : Node *havingclause = (Node *) lfirst(l);
1109 :
1110 1490 : if (contain_agg_clause(havingclause) ||
1111 668 : contain_volatile_functions(havingclause) ||
1112 334 : contain_subplans(havingclause) ||
1113 418 : (parse->groupClause && parse->groupingSets &&
1114 84 : bms_is_member(root->group_rtindex, pull_varnos(root, havingclause))))
1115 : {
1116 : /* keep it in HAVING */
1117 894 : newHaving = lappend(newHaving, havingclause);
1118 : }
1119 262 : else if (parse->groupClause)
1120 : {
1121 : Node *whereclause;
1122 :
1123 : /* Preprocess the HAVING clause fully */
1124 244 : whereclause = preprocess_expression(root, havingclause,
1125 : EXPRKIND_QUAL);
1126 : /* ... and move it to WHERE */
1127 244 : parse->jointree->quals = (Node *)
1128 244 : list_concat((List *) parse->jointree->quals,
1129 : (List *) whereclause);
1130 : }
1131 : else
1132 : {
1133 : Node *whereclause;
1134 :
1135 : /* Preprocess the HAVING clause fully */
1136 18 : whereclause = preprocess_expression(root, copyObject(havingclause),
1137 : EXPRKIND_QUAL);
1138 : /* ... and put a copy in WHERE */
1139 36 : parse->jointree->quals = (Node *)
1140 18 : list_concat((List *) parse->jointree->quals,
1141 : (List *) whereclause);
1142 : /* ... and also keep it in HAVING */
1143 18 : newHaving = lappend(newHaving, havingclause);
1144 : }
1145 : }
1146 538276 : parse->havingQual = (Node *) newHaving;
1147 :
1148 : /*
1149 : * If we have any outer joins, try to reduce them to plain inner joins.
1150 : * This step is most easily done after we've done expression
1151 : * preprocessing.
1152 : */
1153 538276 : if (hasOuterJoins)
1154 33594 : reduce_outer_joins(root);
1155 :
1156 : /*
1157 : * If we have any RTE_RESULT relations, see if they can be deleted from
1158 : * the jointree. We also rely on this processing to flatten single-child
1159 : * FromExprs underneath outer joins. This step is most effectively done
1160 : * after we've done expression preprocessing and outer join reduction.
1161 : */
1162 538276 : if (hasResultRTEs || hasOuterJoins)
1163 254088 : remove_useless_result_rtes(root);
1164 :
1165 : /*
1166 : * Do the main planning.
1167 : */
1168 538276 : grouping_planner(root, tuple_fraction, setops);
1169 :
1170 : /*
1171 : * Capture the set of outer-level param IDs we have access to, for use in
1172 : * extParam/allParam calculations later.
1173 : */
1174 538210 : SS_identify_outer_params(root);
1175 :
1176 : /*
1177 : * If any initPlans were created in this query level, adjust the surviving
1178 : * Paths' costs and parallel-safety flags to account for them. The
1179 : * initPlans won't actually get attached to the plan tree till
1180 : * create_plan() runs, but we must include their effects now.
1181 : */
1182 538210 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1183 538210 : SS_charge_for_initplans(root, final_rel);
1184 :
1185 : /*
1186 : * Make sure we've identified the cheapest Path for the final rel. (By
1187 : * doing this here not in grouping_planner, we include initPlan costs in
1188 : * the decision, though it's unlikely that will change anything.)
1189 : */
1190 538210 : set_cheapest(final_rel);
1191 :
1192 538210 : return root;
1193 : }
1194 :
1195 : /*
1196 : * preprocess_expression
1197 : * Do subquery_planner's preprocessing work for an expression,
1198 : * which can be a targetlist, a WHERE clause (including JOIN/ON
1199 : * conditions), a HAVING clause, or a few other things.
1200 : */
1201 : static Node *
1202 4501542 : preprocess_expression(PlannerInfo *root, Node *expr, int kind)
1203 : {
1204 : /*
1205 : * Fall out quickly if expression is empty. This occurs often enough to
1206 : * be worth checking. Note that null->null is the correct conversion for
1207 : * implicit-AND result format, too.
1208 : */
1209 4501542 : if (expr == NULL)
1210 3583202 : return NULL;
1211 :
1212 : /*
1213 : * If the query has any join RTEs, replace join alias variables with
1214 : * base-relation variables. We must do this first, since any expressions
1215 : * we may extract from the joinaliasvars lists have not been preprocessed.
1216 : * For example, if we did this after sublink processing, sublinks expanded
1217 : * out from join aliases would not get processed. But we can skip this in
1218 : * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1219 : * they can't contain any Vars of the current query level.
1220 : */
1221 918340 : if (root->hasJoinRTEs &&
1222 372620 : !(kind == EXPRKIND_RTFUNC ||
1223 186112 : kind == EXPRKIND_VALUES ||
1224 : kind == EXPRKIND_TABLESAMPLE ||
1225 : kind == EXPRKIND_TABLEFUNC))
1226 186094 : expr = flatten_join_alias_vars(root, root->parse, expr);
1227 :
1228 : /*
1229 : * Simplify constant expressions. For function RTEs, this was already
1230 : * done by preprocess_function_rtes. (But note we must do it again for
1231 : * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1232 : * un-simplified subexpressions inserted by flattening of subqueries or
1233 : * join alias variables.)
1234 : *
1235 : * Note: an essential effect of this is to convert named-argument function
1236 : * calls to positional notation and insert the current actual values of
1237 : * any default arguments for functions. To ensure that happens, we *must*
1238 : * process all expressions here. Previous PG versions sometimes skipped
1239 : * const-simplification if it didn't seem worth the trouble, but we can't
1240 : * do that anymore.
1241 : *
1242 : * Note: this also flattens nested AND and OR expressions into N-argument
1243 : * form. All processing of a qual expression after this point must be
1244 : * careful to maintain AND/OR flatness --- that is, do not generate a tree
1245 : * with AND directly under AND, nor OR directly under OR.
1246 : */
1247 918340 : if (kind != EXPRKIND_RTFUNC)
1248 874902 : expr = eval_const_expressions(root, expr);
1249 :
1250 : /*
1251 : * If it's a qual or havingQual, canonicalize it.
1252 : */
1253 914588 : if (kind == EXPRKIND_QUAL)
1254 : {
1255 312948 : expr = (Node *) canonicalize_qual((Expr *) expr, false);
1256 :
1257 : #ifdef OPTIMIZER_DEBUG
1258 : printf("After canonicalize_qual()\n");
1259 : pprint(expr);
1260 : #endif
1261 : }
1262 :
1263 : /*
1264 : * Check for ANY ScalarArrayOpExpr with Const arrays and set the
1265 : * hashfuncid of any that might execute more quickly by using hash lookups
1266 : * instead of a linear search.
1267 : */
1268 914588 : if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1269 : {
1270 838554 : convert_saop_to_hashed_saop(expr);
1271 : }
1272 :
1273 : /* Expand SubLinks to SubPlans */
1274 914588 : if (root->parse->hasSubLinks)
1275 91484 : expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1276 :
1277 : /*
1278 : * XXX do not insert anything here unless you have grokked the comments in
1279 : * SS_replace_correlation_vars ...
1280 : */
1281 :
1282 : /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1283 914588 : if (root->query_level > 1)
1284 153084 : expr = SS_replace_correlation_vars(root, expr);
1285 :
1286 : /*
1287 : * If it's a qual or havingQual, convert it to implicit-AND format. (We
1288 : * don't want to do this before eval_const_expressions, since the latter
1289 : * would be unable to simplify a top-level AND correctly. Also,
1290 : * SS_process_sublinks expects explicit-AND format.)
1291 : */
1292 914588 : if (kind == EXPRKIND_QUAL)
1293 312948 : expr = (Node *) make_ands_implicit((Expr *) expr);
1294 :
1295 914588 : return expr;
1296 : }
1297 :
1298 : /*
1299 : * preprocess_qual_conditions
1300 : * Recursively scan the query's jointree and do subquery_planner's
1301 : * preprocessing work on each qual condition found therein.
1302 : */
1303 : static void
1304 1313386 : preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
1305 : {
1306 1313386 : if (jtnode == NULL)
1307 0 : return;
1308 1313386 : if (IsA(jtnode, RangeTblRef))
1309 : {
1310 : /* nothing to do here */
1311 : }
1312 640648 : else if (IsA(jtnode, FromExpr))
1313 : {
1314 550100 : FromExpr *f = (FromExpr *) jtnode;
1315 : ListCell *l;
1316 :
1317 1144114 : foreach(l, f->fromlist)
1318 594014 : preprocess_qual_conditions(root, lfirst(l));
1319 :
1320 550100 : f->quals = preprocess_expression(root, f->quals, EXPRKIND_QUAL);
1321 : }
1322 90548 : else if (IsA(jtnode, JoinExpr))
1323 : {
1324 90548 : JoinExpr *j = (JoinExpr *) jtnode;
1325 :
1326 90548 : preprocess_qual_conditions(root, j->larg);
1327 90548 : preprocess_qual_conditions(root, j->rarg);
1328 :
1329 90548 : j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1330 : }
1331 : else
1332 0 : elog(ERROR, "unrecognized node type: %d",
1333 : (int) nodeTag(jtnode));
1334 : }
1335 :
1336 : /*
1337 : * preprocess_phv_expression
1338 : * Do preprocessing on a PlaceHolderVar expression that's been pulled up.
1339 : *
1340 : * If a LATERAL subquery references an output of another subquery, and that
1341 : * output must be wrapped in a PlaceHolderVar because of an intermediate outer
1342 : * join, then we'll push the PlaceHolderVar expression down into the subquery
1343 : * and later pull it back up during find_lateral_references, which runs after
1344 : * subquery_planner has preprocessed all the expressions that were in the
1345 : * current query level to start with. So we need to preprocess it then.
1346 : */
1347 : Expr *
1348 72 : preprocess_phv_expression(PlannerInfo *root, Expr *expr)
1349 : {
1350 72 : return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1351 : }
1352 :
1353 : /*--------------------
1354 : * grouping_planner
1355 : * Perform planning steps related to grouping, aggregation, etc.
1356 : *
1357 : * This function adds all required top-level processing to the scan/join
1358 : * Path(s) produced by query_planner.
1359 : *
1360 : * tuple_fraction is the fraction of tuples we expect will be retrieved.
1361 : * tuple_fraction is interpreted as follows:
1362 : * 0: expect all tuples to be retrieved (normal case)
1363 : * 0 < tuple_fraction < 1: expect the given fraction of tuples available
1364 : * from the plan to be retrieved
1365 : * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
1366 : * expected to be retrieved (ie, a LIMIT specification).
1367 : * setops is used for set operation subqueries to provide the subquery with
1368 : * the context in which it's being used so that Paths correctly sorted for the
1369 : * set operation can be generated. NULL when not planning a set operation
1370 : * child, or when a child of a set op that isn't interested in sorted input.
1371 : *
1372 : * Returns nothing; the useful output is in the Paths we attach to the
1373 : * (UPPERREL_FINAL, NULL) upperrel in *root. In addition,
1374 : * root->processed_tlist contains the final processed targetlist.
1375 : *
1376 : * Note that we have not done set_cheapest() on the final rel; it's convenient
1377 : * to leave this to the caller.
1378 : *--------------------
1379 : */
1380 : static void
1381 538276 : grouping_planner(PlannerInfo *root, double tuple_fraction,
1382 : SetOperationStmt *setops)
1383 : {
1384 538276 : Query *parse = root->parse;
1385 538276 : int64 offset_est = 0;
1386 538276 : int64 count_est = 0;
1387 538276 : double limit_tuples = -1.0;
1388 538276 : bool have_postponed_srfs = false;
1389 : PathTarget *final_target;
1390 : List *final_targets;
1391 : List *final_targets_contain_srfs;
1392 : bool final_target_parallel_safe;
1393 : RelOptInfo *current_rel;
1394 : RelOptInfo *final_rel;
1395 : FinalPathExtraData extra;
1396 : ListCell *lc;
1397 :
1398 : /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1399 538276 : if (parse->limitCount || parse->limitOffset)
1400 : {
1401 4852 : tuple_fraction = preprocess_limit(root, tuple_fraction,
1402 : &offset_est, &count_est);
1403 :
1404 : /*
1405 : * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1406 : * estimate the effects of using a bounded sort.
1407 : */
1408 4852 : if (count_est > 0 && offset_est >= 0)
1409 4336 : limit_tuples = (double) count_est + (double) offset_est;
1410 : }
1411 :
1412 : /* Make tuple_fraction accessible to lower-level routines */
1413 538276 : root->tuple_fraction = tuple_fraction;
1414 :
1415 538276 : if (parse->setOperations)
1416 : {
1417 : /*
1418 : * Construct Paths for set operations. The results will not need any
1419 : * work except perhaps a top-level sort and/or LIMIT. Note that any
1420 : * special work for recursive unions is the responsibility of
1421 : * plan_set_operations.
1422 : */
1423 5896 : current_rel = plan_set_operations(root);
1424 :
1425 : /*
1426 : * We should not need to call preprocess_targetlist, since we must be
1427 : * in a SELECT query node. Instead, use the processed_tlist returned
1428 : * by plan_set_operations (since this tells whether it returned any
1429 : * resjunk columns!), and transfer any sort key information from the
1430 : * original tlist.
1431 : */
1432 : Assert(parse->commandType == CMD_SELECT);
1433 :
1434 : /* for safety, copy processed_tlist instead of modifying in-place */
1435 5890 : root->processed_tlist =
1436 5890 : postprocess_setop_tlist(copyObject(root->processed_tlist),
1437 : parse->targetList);
1438 :
1439 : /* Also extract the PathTarget form of the setop result tlist */
1440 5890 : final_target = current_rel->cheapest_total_path->pathtarget;
1441 :
1442 : /* And check whether it's parallel safe */
1443 : final_target_parallel_safe =
1444 5890 : is_parallel_safe(root, (Node *) final_target->exprs);
1445 :
1446 : /* The setop result tlist couldn't contain any SRFs */
1447 : Assert(!parse->hasTargetSRFs);
1448 5890 : final_targets = final_targets_contain_srfs = NIL;
1449 :
1450 : /*
1451 : * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1452 : * checked already, but let's make sure).
1453 : */
1454 5890 : if (parse->rowMarks)
1455 0 : ereport(ERROR,
1456 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1457 : /*------
1458 : translator: %s is a SQL row locking clause such as FOR UPDATE */
1459 : errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1460 : LCS_asString(linitial_node(RowMarkClause,
1461 : parse->rowMarks)->strength))));
1462 :
1463 : /*
1464 : * Calculate pathkeys that represent result ordering requirements
1465 : */
1466 : Assert(parse->distinctClause == NIL);
1467 5890 : root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1468 : parse->sortClause,
1469 : root->processed_tlist);
1470 : }
1471 : else
1472 : {
1473 : /* No set operations, do regular planning */
1474 : PathTarget *sort_input_target;
1475 : List *sort_input_targets;
1476 : List *sort_input_targets_contain_srfs;
1477 : bool sort_input_target_parallel_safe;
1478 : PathTarget *grouping_target;
1479 : List *grouping_targets;
1480 : List *grouping_targets_contain_srfs;
1481 : bool grouping_target_parallel_safe;
1482 : PathTarget *scanjoin_target;
1483 : List *scanjoin_targets;
1484 : List *scanjoin_targets_contain_srfs;
1485 : bool scanjoin_target_parallel_safe;
1486 : bool scanjoin_target_same_exprs;
1487 : bool have_grouping;
1488 532380 : WindowFuncLists *wflists = NULL;
1489 532380 : List *activeWindows = NIL;
1490 532380 : grouping_sets_data *gset_data = NULL;
1491 : standard_qp_extra qp_extra;
1492 :
1493 : /* A recursive query should always have setOperations */
1494 : Assert(!root->hasRecursion);
1495 :
1496 : /* Preprocess grouping sets and GROUP BY clause, if any */
1497 532380 : if (parse->groupingSets)
1498 : {
1499 878 : gset_data = preprocess_grouping_sets(root);
1500 : }
1501 531502 : else if (parse->groupClause)
1502 : {
1503 : /* Preprocess regular GROUP BY clause, if any */
1504 3630 : root->processed_groupClause = preprocess_groupclause(root, NIL);
1505 : }
1506 :
1507 : /*
1508 : * Preprocess targetlist. Note that much of the remaining planning
1509 : * work will be done with the PathTarget representation of tlists, but
1510 : * we must also maintain the full representation of the final tlist so
1511 : * that we can transfer its decoration (resnames etc) to the topmost
1512 : * tlist of the finished Plan. This is kept in processed_tlist.
1513 : */
1514 532374 : preprocess_targetlist(root);
1515 :
1516 : /*
1517 : * Mark all the aggregates with resolved aggtranstypes, and detect
1518 : * aggregates that are duplicates or can share transition state. We
1519 : * must do this before slicing and dicing the tlist into various
1520 : * pathtargets, else some copies of the Aggref nodes might escape
1521 : * being marked.
1522 : */
1523 532374 : if (parse->hasAggs)
1524 : {
1525 37662 : preprocess_aggrefs(root, (Node *) root->processed_tlist);
1526 37662 : preprocess_aggrefs(root, (Node *) parse->havingQual);
1527 : }
1528 :
1529 : /*
1530 : * Locate any window functions in the tlist. (We don't need to look
1531 : * anywhere else, since expressions used in ORDER BY will be in there
1532 : * too.) Note that they could all have been eliminated by constant
1533 : * folding, in which case we don't need to do any more work.
1534 : */
1535 532374 : if (parse->hasWindowFuncs)
1536 : {
1537 2384 : wflists = find_window_functions((Node *) root->processed_tlist,
1538 2384 : list_length(parse->windowClause));
1539 2384 : if (wflists->numWindowFuncs > 0)
1540 : {
1541 : /*
1542 : * See if any modifications can be made to each WindowClause
1543 : * to allow the executor to execute the WindowFuncs more
1544 : * quickly.
1545 : */
1546 2378 : optimize_window_clauses(root, wflists);
1547 :
1548 : /* Extract the list of windows actually in use. */
1549 2378 : activeWindows = select_active_windows(root, wflists);
1550 :
1551 : /* Make sure they all have names, for EXPLAIN's use. */
1552 2378 : name_active_windows(activeWindows);
1553 : }
1554 : else
1555 6 : parse->hasWindowFuncs = false;
1556 : }
1557 :
1558 : /*
1559 : * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1560 : * adding logic between here and the query_planner() call. Anything
1561 : * that is needed in MIN/MAX-optimizable cases will have to be
1562 : * duplicated in planagg.c.
1563 : */
1564 532374 : if (parse->hasAggs)
1565 37662 : preprocess_minmax_aggregates(root);
1566 :
1567 : /*
1568 : * Figure out whether there's a hard limit on the number of rows that
1569 : * query_planner's result subplan needs to return. Even if we know a
1570 : * hard limit overall, it doesn't apply if the query has any
1571 : * grouping/aggregation operations, or SRFs in the tlist.
1572 : */
1573 532374 : if (parse->groupClause ||
1574 527914 : parse->groupingSets ||
1575 527872 : parse->distinctClause ||
1576 525346 : parse->hasAggs ||
1577 491574 : parse->hasWindowFuncs ||
1578 489334 : parse->hasTargetSRFs ||
1579 480728 : root->hasHavingQual)
1580 51664 : root->limit_tuples = -1.0;
1581 : else
1582 480710 : root->limit_tuples = limit_tuples;
1583 :
1584 : /* Set up data needed by standard_qp_callback */
1585 532374 : qp_extra.activeWindows = activeWindows;
1586 532374 : qp_extra.gset_data = gset_data;
1587 :
1588 : /*
1589 : * If we're a subquery for a set operation, store the SetOperationStmt
1590 : * in qp_extra.
1591 : */
1592 532374 : qp_extra.setop = setops;
1593 :
1594 : /*
1595 : * Generate the best unsorted and presorted paths for the scan/join
1596 : * portion of this Query, ie the processing represented by the
1597 : * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1598 : * We also generate (in standard_qp_callback) pathkey representations
1599 : * of the query's sort clause, distinct clause, etc.
1600 : */
1601 532374 : current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1602 :
1603 : /*
1604 : * Convert the query's result tlist into PathTarget format.
1605 : *
1606 : * Note: this cannot be done before query_planner() has performed
1607 : * appendrel expansion, because that might add resjunk entries to
1608 : * root->processed_tlist. Waiting till afterwards is also helpful
1609 : * because the target width estimates can use per-Var width numbers
1610 : * that were obtained within query_planner().
1611 : */
1612 532326 : final_target = create_pathtarget(root, root->processed_tlist);
1613 : final_target_parallel_safe =
1614 532326 : is_parallel_safe(root, (Node *) final_target->exprs);
1615 :
1616 : /*
1617 : * If ORDER BY was given, consider whether we should use a post-sort
1618 : * projection, and compute the adjusted target for preceding steps if
1619 : * so.
1620 : */
1621 532326 : if (parse->sortClause)
1622 : {
1623 64510 : sort_input_target = make_sort_input_target(root,
1624 : final_target,
1625 : &have_postponed_srfs);
1626 : sort_input_target_parallel_safe =
1627 64510 : is_parallel_safe(root, (Node *) sort_input_target->exprs);
1628 : }
1629 : else
1630 : {
1631 467816 : sort_input_target = final_target;
1632 467816 : sort_input_target_parallel_safe = final_target_parallel_safe;
1633 : }
1634 :
1635 : /*
1636 : * If we have window functions to deal with, the output from any
1637 : * grouping step needs to be what the window functions want;
1638 : * otherwise, it should be sort_input_target.
1639 : */
1640 532326 : if (activeWindows)
1641 : {
1642 2378 : grouping_target = make_window_input_target(root,
1643 : final_target,
1644 : activeWindows);
1645 : grouping_target_parallel_safe =
1646 2378 : is_parallel_safe(root, (Node *) grouping_target->exprs);
1647 : }
1648 : else
1649 : {
1650 529948 : grouping_target = sort_input_target;
1651 529948 : grouping_target_parallel_safe = sort_input_target_parallel_safe;
1652 : }
1653 :
1654 : /*
1655 : * If we have grouping or aggregation to do, the topmost scan/join
1656 : * plan node must emit what the grouping step wants; otherwise, it
1657 : * should emit grouping_target.
1658 : */
1659 527866 : have_grouping = (parse->groupClause || parse->groupingSets ||
1660 1060192 : parse->hasAggs || root->hasHavingQual);
1661 532326 : if (have_grouping)
1662 : {
1663 38330 : scanjoin_target = make_group_input_target(root, final_target);
1664 : scanjoin_target_parallel_safe =
1665 38330 : is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1666 : }
1667 : else
1668 : {
1669 493996 : scanjoin_target = grouping_target;
1670 493996 : scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1671 : }
1672 :
1673 : /*
1674 : * If there are any SRFs in the targetlist, we must separate each of
1675 : * these PathTargets into SRF-computing and SRF-free targets. Replace
1676 : * each of the named targets with a SRF-free version, and remember the
1677 : * list of additional projection steps we need to add afterwards.
1678 : */
1679 532326 : if (parse->hasTargetSRFs)
1680 : {
1681 : /* final_target doesn't recompute any SRFs in sort_input_target */
1682 9066 : split_pathtarget_at_srfs(root, final_target, sort_input_target,
1683 : &final_targets,
1684 : &final_targets_contain_srfs);
1685 9066 : final_target = linitial_node(PathTarget, final_targets);
1686 : Assert(!linitial_int(final_targets_contain_srfs));
1687 : /* likewise for sort_input_target vs. grouping_target */
1688 9066 : split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1689 : &sort_input_targets,
1690 : &sort_input_targets_contain_srfs);
1691 9066 : sort_input_target = linitial_node(PathTarget, sort_input_targets);
1692 : Assert(!linitial_int(sort_input_targets_contain_srfs));
1693 : /* likewise for grouping_target vs. scanjoin_target */
1694 9066 : split_pathtarget_at_srfs(root, grouping_target, scanjoin_target,
1695 : &grouping_targets,
1696 : &grouping_targets_contain_srfs);
1697 9066 : grouping_target = linitial_node(PathTarget, grouping_targets);
1698 : Assert(!linitial_int(grouping_targets_contain_srfs));
1699 : /* scanjoin_target will not have any SRFs precomputed for it */
1700 9066 : split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1701 : &scanjoin_targets,
1702 : &scanjoin_targets_contain_srfs);
1703 9066 : scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1704 : Assert(!linitial_int(scanjoin_targets_contain_srfs));
1705 : }
1706 : else
1707 : {
1708 : /* initialize lists; for most of these, dummy values are OK */
1709 523260 : final_targets = final_targets_contain_srfs = NIL;
1710 523260 : sort_input_targets = sort_input_targets_contain_srfs = NIL;
1711 523260 : grouping_targets = grouping_targets_contain_srfs = NIL;
1712 523260 : scanjoin_targets = list_make1(scanjoin_target);
1713 523260 : scanjoin_targets_contain_srfs = NIL;
1714 : }
1715 :
1716 : /* Apply scan/join target. */
1717 532326 : scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1718 532326 : && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1719 532326 : apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1720 : scanjoin_targets_contain_srfs,
1721 : scanjoin_target_parallel_safe,
1722 : scanjoin_target_same_exprs);
1723 :
1724 : /*
1725 : * Save the various upper-rel PathTargets we just computed into
1726 : * root->upper_targets[]. The core code doesn't use this, but it
1727 : * provides a convenient place for extensions to get at the info. For
1728 : * consistency, we save all the intermediate targets, even though some
1729 : * of the corresponding upperrels might not be needed for this query.
1730 : */
1731 532326 : root->upper_targets[UPPERREL_FINAL] = final_target;
1732 532326 : root->upper_targets[UPPERREL_ORDERED] = final_target;
1733 532326 : root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1734 532326 : root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1735 532326 : root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1736 532326 : root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1737 :
1738 : /*
1739 : * If we have grouping and/or aggregation, consider ways to implement
1740 : * that. We build a new upperrel representing the output of this
1741 : * phase.
1742 : */
1743 532326 : if (have_grouping)
1744 : {
1745 38330 : current_rel = create_grouping_paths(root,
1746 : current_rel,
1747 : grouping_target,
1748 : grouping_target_parallel_safe,
1749 : gset_data);
1750 : /* Fix things up if grouping_target contains SRFs */
1751 38324 : if (parse->hasTargetSRFs)
1752 418 : adjust_paths_for_srfs(root, current_rel,
1753 : grouping_targets,
1754 : grouping_targets_contain_srfs);
1755 : }
1756 :
1757 : /*
1758 : * If we have window functions, consider ways to implement those. We
1759 : * build a new upperrel representing the output of this phase.
1760 : */
1761 532320 : if (activeWindows)
1762 : {
1763 2378 : current_rel = create_window_paths(root,
1764 : current_rel,
1765 : grouping_target,
1766 : sort_input_target,
1767 : sort_input_target_parallel_safe,
1768 : wflists,
1769 : activeWindows);
1770 : /* Fix things up if sort_input_target contains SRFs */
1771 2378 : if (parse->hasTargetSRFs)
1772 12 : adjust_paths_for_srfs(root, current_rel,
1773 : sort_input_targets,
1774 : sort_input_targets_contain_srfs);
1775 : }
1776 :
1777 : /*
1778 : * If there is a DISTINCT clause, consider ways to implement that. We
1779 : * build a new upperrel representing the output of this phase.
1780 : */
1781 532320 : if (parse->distinctClause)
1782 : {
1783 2560 : current_rel = create_distinct_paths(root,
1784 : current_rel,
1785 : sort_input_target);
1786 : }
1787 : } /* end of if (setOperations) */
1788 :
1789 : /*
1790 : * If ORDER BY was given, consider ways to implement that, and generate a
1791 : * new upperrel containing only paths that emit the correct ordering and
1792 : * project the correct final_target. We can apply the original
1793 : * limit_tuples limit in sort costing here, but only if there are no
1794 : * postponed SRFs.
1795 : */
1796 538210 : if (parse->sortClause)
1797 : {
1798 68304 : current_rel = create_ordered_paths(root,
1799 : current_rel,
1800 : final_target,
1801 : final_target_parallel_safe,
1802 : have_postponed_srfs ? -1.0 :
1803 : limit_tuples);
1804 : /* Fix things up if final_target contains SRFs */
1805 68304 : if (parse->hasTargetSRFs)
1806 196 : adjust_paths_for_srfs(root, current_rel,
1807 : final_targets,
1808 : final_targets_contain_srfs);
1809 : }
1810 :
1811 : /*
1812 : * Now we are prepared to build the final-output upperrel.
1813 : */
1814 538210 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1815 :
1816 : /*
1817 : * If the input rel is marked consider_parallel and there's nothing that's
1818 : * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1819 : * consider_parallel as well. Note that if the query has rowMarks or is
1820 : * not a SELECT, consider_parallel will be false for every relation in the
1821 : * query.
1822 : */
1823 699654 : if (current_rel->consider_parallel &&
1824 322864 : is_parallel_safe(root, parse->limitOffset) &&
1825 161420 : is_parallel_safe(root, parse->limitCount))
1826 161414 : final_rel->consider_parallel = true;
1827 :
1828 : /*
1829 : * If the current_rel belongs to a single FDW, so does the final_rel.
1830 : */
1831 538210 : final_rel->serverid = current_rel->serverid;
1832 538210 : final_rel->userid = current_rel->userid;
1833 538210 : final_rel->useridiscurrent = current_rel->useridiscurrent;
1834 538210 : final_rel->fdwroutine = current_rel->fdwroutine;
1835 :
1836 : /*
1837 : * Generate paths for the final_rel. Insert all surviving paths, with
1838 : * LockRows, Limit, and/or ModifyTable steps added if needed.
1839 : */
1840 1095704 : foreach(lc, current_rel->pathlist)
1841 : {
1842 557494 : Path *path = (Path *) lfirst(lc);
1843 :
1844 : /*
1845 : * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1846 : * (Note: we intentionally test parse->rowMarks not root->rowMarks
1847 : * here. If there are only non-locking rowmarks, they should be
1848 : * handled by the ModifyTable node instead. However, root->rowMarks
1849 : * is what goes into the LockRows node.)
1850 : */
1851 557494 : if (parse->rowMarks)
1852 : {
1853 8120 : path = (Path *) create_lockrows_path(root, final_rel, path,
1854 : root->rowMarks,
1855 : assign_special_exec_param(root));
1856 : }
1857 :
1858 : /*
1859 : * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1860 : */
1861 557494 : if (limit_needed(parse))
1862 : {
1863 5734 : path = (Path *) create_limit_path(root, final_rel, path,
1864 : parse->limitOffset,
1865 : parse->limitCount,
1866 : parse->limitOption,
1867 : offset_est, count_est);
1868 : }
1869 :
1870 : /*
1871 : * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
1872 : */
1873 557494 : if (parse->commandType != CMD_SELECT)
1874 : {
1875 : Index rootRelation;
1876 92292 : List *resultRelations = NIL;
1877 92292 : List *updateColnosLists = NIL;
1878 92292 : List *withCheckOptionLists = NIL;
1879 92292 : List *returningLists = NIL;
1880 92292 : List *mergeActionLists = NIL;
1881 92292 : List *mergeJoinConditions = NIL;
1882 : List *rowMarks;
1883 :
1884 92292 : if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
1885 : {
1886 : /* Inherited UPDATE/DELETE/MERGE */
1887 2762 : RelOptInfo *top_result_rel = find_base_rel(root,
1888 : parse->resultRelation);
1889 2762 : int resultRelation = -1;
1890 :
1891 : /* Pass the root result rel forward to the executor. */
1892 2762 : rootRelation = parse->resultRelation;
1893 :
1894 : /* Add only leaf children to ModifyTable. */
1895 8020 : while ((resultRelation = bms_next_member(root->leaf_result_relids,
1896 : resultRelation)) >= 0)
1897 : {
1898 5258 : RelOptInfo *this_result_rel = find_base_rel(root,
1899 : resultRelation);
1900 :
1901 : /*
1902 : * Also exclude any leaf rels that have turned dummy since
1903 : * being added to the list, for example, by being excluded
1904 : * by constraint exclusion.
1905 : */
1906 5258 : if (IS_DUMMY_REL(this_result_rel))
1907 84 : continue;
1908 :
1909 : /* Build per-target-rel lists needed by ModifyTable */
1910 5174 : resultRelations = lappend_int(resultRelations,
1911 : resultRelation);
1912 5174 : if (parse->commandType == CMD_UPDATE)
1913 : {
1914 3606 : List *update_colnos = root->update_colnos;
1915 :
1916 3606 : if (this_result_rel != top_result_rel)
1917 : update_colnos =
1918 3606 : adjust_inherited_attnums_multilevel(root,
1919 : update_colnos,
1920 : this_result_rel->relid,
1921 : top_result_rel->relid);
1922 3606 : updateColnosLists = lappend(updateColnosLists,
1923 : update_colnos);
1924 : }
1925 5174 : if (parse->withCheckOptions)
1926 : {
1927 486 : List *withCheckOptions = parse->withCheckOptions;
1928 :
1929 486 : if (this_result_rel != top_result_rel)
1930 : withCheckOptions = (List *)
1931 486 : adjust_appendrel_attrs_multilevel(root,
1932 : (Node *) withCheckOptions,
1933 : this_result_rel,
1934 : top_result_rel);
1935 486 : withCheckOptionLists = lappend(withCheckOptionLists,
1936 : withCheckOptions);
1937 : }
1938 5174 : if (parse->returningList)
1939 : {
1940 816 : List *returningList = parse->returningList;
1941 :
1942 816 : if (this_result_rel != top_result_rel)
1943 : returningList = (List *)
1944 816 : adjust_appendrel_attrs_multilevel(root,
1945 : (Node *) returningList,
1946 : this_result_rel,
1947 : top_result_rel);
1948 816 : returningLists = lappend(returningLists,
1949 : returningList);
1950 : }
1951 5174 : if (parse->mergeActionList)
1952 : {
1953 : ListCell *l;
1954 492 : List *mergeActionList = NIL;
1955 :
1956 : /*
1957 : * Copy MergeActions and translate stuff that
1958 : * references attribute numbers.
1959 : */
1960 1554 : foreach(l, parse->mergeActionList)
1961 : {
1962 1062 : MergeAction *action = lfirst(l),
1963 1062 : *leaf_action = copyObject(action);
1964 :
1965 1062 : leaf_action->qual =
1966 1062 : adjust_appendrel_attrs_multilevel(root,
1967 : (Node *) action->qual,
1968 : this_result_rel,
1969 : top_result_rel);
1970 1062 : leaf_action->targetList = (List *)
1971 1062 : adjust_appendrel_attrs_multilevel(root,
1972 1062 : (Node *) action->targetList,
1973 : this_result_rel,
1974 : top_result_rel);
1975 1062 : if (leaf_action->commandType == CMD_UPDATE)
1976 592 : leaf_action->updateColnos =
1977 592 : adjust_inherited_attnums_multilevel(root,
1978 : action->updateColnos,
1979 : this_result_rel->relid,
1980 : top_result_rel->relid);
1981 1062 : mergeActionList = lappend(mergeActionList,
1982 : leaf_action);
1983 : }
1984 :
1985 492 : mergeActionLists = lappend(mergeActionLists,
1986 : mergeActionList);
1987 : }
1988 5174 : if (parse->commandType == CMD_MERGE)
1989 : {
1990 492 : Node *mergeJoinCondition = parse->mergeJoinCondition;
1991 :
1992 492 : if (this_result_rel != top_result_rel)
1993 : mergeJoinCondition =
1994 492 : adjust_appendrel_attrs_multilevel(root,
1995 : mergeJoinCondition,
1996 : this_result_rel,
1997 : top_result_rel);
1998 492 : mergeJoinConditions = lappend(mergeJoinConditions,
1999 : mergeJoinCondition);
2000 : }
2001 : }
2002 :
2003 2762 : if (resultRelations == NIL)
2004 : {
2005 : /*
2006 : * We managed to exclude every child rel, so generate a
2007 : * dummy one-relation plan using info for the top target
2008 : * rel (even though that may not be a leaf target).
2009 : * Although it's clear that no data will be updated or
2010 : * deleted, we still need to have a ModifyTable node so
2011 : * that any statement triggers will be executed. (This
2012 : * could be cleaner if we fixed nodeModifyTable.c to allow
2013 : * zero target relations, but that probably wouldn't be a
2014 : * net win.)
2015 : */
2016 30 : resultRelations = list_make1_int(parse->resultRelation);
2017 30 : if (parse->commandType == CMD_UPDATE)
2018 30 : updateColnosLists = list_make1(root->update_colnos);
2019 30 : if (parse->withCheckOptions)
2020 0 : withCheckOptionLists = list_make1(parse->withCheckOptions);
2021 30 : if (parse->returningList)
2022 18 : returningLists = list_make1(parse->returningList);
2023 30 : if (parse->mergeActionList)
2024 0 : mergeActionLists = list_make1(parse->mergeActionList);
2025 30 : if (parse->commandType == CMD_MERGE)
2026 0 : mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2027 : }
2028 : }
2029 : else
2030 : {
2031 : /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2032 89530 : rootRelation = 0; /* there's no separate root rel */
2033 89530 : resultRelations = list_make1_int(parse->resultRelation);
2034 89530 : if (parse->commandType == CMD_UPDATE)
2035 11840 : updateColnosLists = list_make1(root->update_colnos);
2036 89530 : if (parse->withCheckOptions)
2037 926 : withCheckOptionLists = list_make1(parse->withCheckOptions);
2038 89530 : if (parse->returningList)
2039 2408 : returningLists = list_make1(parse->returningList);
2040 89530 : if (parse->mergeActionList)
2041 1620 : mergeActionLists = list_make1(parse->mergeActionList);
2042 89530 : if (parse->commandType == CMD_MERGE)
2043 1620 : mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2044 : }
2045 :
2046 : /*
2047 : * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2048 : * will have dealt with fetching non-locked marked rows, else we
2049 : * need to have ModifyTable do that.
2050 : */
2051 92292 : if (parse->rowMarks)
2052 0 : rowMarks = NIL;
2053 : else
2054 92292 : rowMarks = root->rowMarks;
2055 :
2056 : path = (Path *)
2057 92292 : create_modifytable_path(root, final_rel,
2058 : path,
2059 : parse->commandType,
2060 92292 : parse->canSetTag,
2061 92292 : parse->resultRelation,
2062 : rootRelation,
2063 92292 : root->partColsUpdated,
2064 : resultRelations,
2065 : updateColnosLists,
2066 : withCheckOptionLists,
2067 : returningLists,
2068 : rowMarks,
2069 : parse->onConflict,
2070 : mergeActionLists,
2071 : mergeJoinConditions,
2072 : assign_special_exec_param(root));
2073 : }
2074 :
2075 : /* And shove it into final_rel */
2076 557494 : add_path(final_rel, path);
2077 : }
2078 :
2079 : /*
2080 : * Generate partial paths for final_rel, too, if outer query levels might
2081 : * be able to make use of them.
2082 : */
2083 538210 : if (final_rel->consider_parallel && root->query_level > 1 &&
2084 21922 : !limit_needed(parse))
2085 : {
2086 : Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2087 21874 : foreach(lc, current_rel->partial_pathlist)
2088 : {
2089 108 : Path *partial_path = (Path *) lfirst(lc);
2090 :
2091 108 : add_partial_path(final_rel, partial_path);
2092 : }
2093 : }
2094 :
2095 538210 : extra.limit_needed = limit_needed(parse);
2096 538210 : extra.limit_tuples = limit_tuples;
2097 538210 : extra.count_est = count_est;
2098 538210 : extra.offset_est = offset_est;
2099 :
2100 : /*
2101 : * If there is an FDW that's responsible for all baserels of the query,
2102 : * let it consider adding ForeignPaths.
2103 : */
2104 538210 : if (final_rel->fdwroutine &&
2105 1254 : final_rel->fdwroutine->GetForeignUpperPaths)
2106 1186 : final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2107 : current_rel, final_rel,
2108 : &extra);
2109 :
2110 : /* Let extensions possibly add some more paths */
2111 538210 : if (create_upper_paths_hook)
2112 0 : (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2113 : current_rel, final_rel, &extra);
2114 :
2115 : /* Note: currently, we leave it to callers to do set_cheapest() */
2116 538210 : }
2117 :
2118 : /*
2119 : * Do preprocessing for groupingSets clause and related data. This handles the
2120 : * preliminary steps of expanding the grouping sets, organizing them into lists
2121 : * of rollups, and preparing annotations which will later be filled in with
2122 : * size estimates.
2123 : */
2124 : static grouping_sets_data *
2125 878 : preprocess_grouping_sets(PlannerInfo *root)
2126 : {
2127 878 : Query *parse = root->parse;
2128 : List *sets;
2129 878 : int maxref = 0;
2130 : ListCell *lc_set;
2131 878 : grouping_sets_data *gd = palloc0(sizeof(grouping_sets_data));
2132 :
2133 878 : parse->groupingSets = expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
2134 :
2135 878 : gd->any_hashable = false;
2136 878 : gd->unhashable_refs = NULL;
2137 878 : gd->unsortable_refs = NULL;
2138 878 : gd->unsortable_sets = NIL;
2139 :
2140 : /*
2141 : * We don't currently make any attempt to optimize the groupClause when
2142 : * there are grouping sets, so just duplicate it in processed_groupClause.
2143 : */
2144 878 : root->processed_groupClause = parse->groupClause;
2145 :
2146 878 : if (parse->groupClause)
2147 : {
2148 : ListCell *lc;
2149 :
2150 2672 : foreach(lc, parse->groupClause)
2151 : {
2152 1836 : SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2153 1836 : Index ref = gc->tleSortGroupRef;
2154 :
2155 1836 : if (ref > maxref)
2156 1800 : maxref = ref;
2157 :
2158 1836 : if (!gc->hashable)
2159 30 : gd->unhashable_refs = bms_add_member(gd->unhashable_refs, ref);
2160 :
2161 1836 : if (!OidIsValid(gc->sortop))
2162 42 : gd->unsortable_refs = bms_add_member(gd->unsortable_refs, ref);
2163 : }
2164 : }
2165 :
2166 : /* Allocate workspace array for remapping */
2167 878 : gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2168 :
2169 : /*
2170 : * If we have any unsortable sets, we must extract them before trying to
2171 : * prepare rollups. Unsortable sets don't go through
2172 : * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2173 : * here.
2174 : */
2175 878 : if (!bms_is_empty(gd->unsortable_refs))
2176 : {
2177 42 : List *sortable_sets = NIL;
2178 : ListCell *lc;
2179 :
2180 126 : foreach(lc, parse->groupingSets)
2181 : {
2182 90 : List *gset = (List *) lfirst(lc);
2183 :
2184 90 : if (bms_overlap_list(gd->unsortable_refs, gset))
2185 : {
2186 48 : GroupingSetData *gs = makeNode(GroupingSetData);
2187 :
2188 48 : gs->set = gset;
2189 48 : gd->unsortable_sets = lappend(gd->unsortable_sets, gs);
2190 :
2191 : /*
2192 : * We must enforce here that an unsortable set is hashable;
2193 : * later code assumes this. Parse analysis only checks that
2194 : * every individual column is either hashable or sortable.
2195 : *
2196 : * Note that passing this test doesn't guarantee we can
2197 : * generate a plan; there might be other showstoppers.
2198 : */
2199 48 : if (bms_overlap_list(gd->unhashable_refs, gset))
2200 6 : ereport(ERROR,
2201 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2202 : errmsg("could not implement GROUP BY"),
2203 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2204 : }
2205 : else
2206 42 : sortable_sets = lappend(sortable_sets, gset);
2207 : }
2208 :
2209 36 : if (sortable_sets)
2210 30 : sets = extract_rollup_sets(sortable_sets);
2211 : else
2212 6 : sets = NIL;
2213 : }
2214 : else
2215 836 : sets = extract_rollup_sets(parse->groupingSets);
2216 :
2217 2298 : foreach(lc_set, sets)
2218 : {
2219 1426 : List *current_sets = (List *) lfirst(lc_set);
2220 1426 : RollupData *rollup = makeNode(RollupData);
2221 : GroupingSetData *gs;
2222 :
2223 : /*
2224 : * Reorder the current list of grouping sets into correct prefix
2225 : * order. If only one aggregation pass is needed, try to make the
2226 : * list match the ORDER BY clause; if more than one pass is needed, we
2227 : * don't bother with that.
2228 : *
2229 : * Note that this reorders the sets from smallest-member-first to
2230 : * largest-member-first, and applies the GroupingSetData annotations,
2231 : * though the data will be filled in later.
2232 : */
2233 1426 : current_sets = reorder_grouping_sets(current_sets,
2234 1426 : (list_length(sets) == 1
2235 : ? parse->sortClause
2236 : : NIL));
2237 :
2238 : /*
2239 : * Get the initial (and therefore largest) grouping set.
2240 : */
2241 1426 : gs = linitial_node(GroupingSetData, current_sets);
2242 :
2243 : /*
2244 : * Order the groupClause appropriately. If the first grouping set is
2245 : * empty, then the groupClause must also be empty; otherwise we have
2246 : * to force the groupClause to match that grouping set's order.
2247 : *
2248 : * (The first grouping set can be empty even though parse->groupClause
2249 : * is not empty only if all non-empty grouping sets are unsortable.
2250 : * The groupClauses for hashed grouping sets are built later on.)
2251 : */
2252 1426 : if (gs->set)
2253 1384 : rollup->groupClause = preprocess_groupclause(root, gs->set);
2254 : else
2255 42 : rollup->groupClause = NIL;
2256 :
2257 : /*
2258 : * Is it hashable? We pretend empty sets are hashable even though we
2259 : * actually force them not to be hashed later. But don't bother if
2260 : * there's nothing but empty sets (since in that case we can't hash
2261 : * anything).
2262 : */
2263 1426 : if (gs->set &&
2264 1384 : !bms_overlap_list(gd->unhashable_refs, gs->set))
2265 : {
2266 1360 : rollup->hashable = true;
2267 1360 : gd->any_hashable = true;
2268 : }
2269 :
2270 : /*
2271 : * Now that we've pinned down an order for the groupClause for this
2272 : * list of grouping sets, we need to remap the entries in the grouping
2273 : * sets from sortgrouprefs to plain indices (0-based) into the
2274 : * groupClause for this collection of grouping sets. We keep the
2275 : * original form for later use, though.
2276 : */
2277 1426 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2278 : current_sets,
2279 : gd->tleref_to_colnum_map);
2280 1426 : rollup->gsets_data = current_sets;
2281 :
2282 1426 : gd->rollups = lappend(gd->rollups, rollup);
2283 : }
2284 :
2285 872 : if (gd->unsortable_sets)
2286 : {
2287 : /*
2288 : * We have not yet pinned down a groupclause for this, but we will
2289 : * need index-based lists for estimation purposes. Construct
2290 : * hash_sets_idx based on the entire original groupclause for now.
2291 : */
2292 36 : gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2293 : gd->unsortable_sets,
2294 : gd->tleref_to_colnum_map);
2295 36 : gd->any_hashable = true;
2296 : }
2297 :
2298 872 : return gd;
2299 : }
2300 :
2301 : /*
2302 : * Given a groupclause and a list of GroupingSetData, return equivalent sets
2303 : * (without annotation) mapped to indexes into the given groupclause.
2304 : */
2305 : static List *
2306 4164 : remap_to_groupclause_idx(List *groupClause,
2307 : List *gsets,
2308 : int *tleref_to_colnum_map)
2309 : {
2310 4164 : int ref = 0;
2311 4164 : List *result = NIL;
2312 : ListCell *lc;
2313 :
2314 10192 : foreach(lc, groupClause)
2315 : {
2316 6028 : SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2317 :
2318 6028 : tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2319 : }
2320 :
2321 9630 : foreach(lc, gsets)
2322 : {
2323 5466 : List *set = NIL;
2324 : ListCell *lc2;
2325 5466 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
2326 :
2327 12332 : foreach(lc2, gs->set)
2328 : {
2329 6866 : set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2330 : }
2331 :
2332 5466 : result = lappend(result, set);
2333 : }
2334 :
2335 4164 : return result;
2336 : }
2337 :
2338 :
2339 : /*
2340 : * preprocess_rowmarks - set up PlanRowMarks if needed
2341 : */
2342 : static void
2343 542028 : preprocess_rowmarks(PlannerInfo *root)
2344 : {
2345 542028 : Query *parse = root->parse;
2346 : Bitmapset *rels;
2347 : List *prowmarks;
2348 : ListCell *l;
2349 : int i;
2350 :
2351 542028 : if (parse->rowMarks)
2352 : {
2353 : /*
2354 : * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2355 : * grouping, since grouping renders a reference to individual tuple
2356 : * CTIDs invalid. This is also checked at parse time, but that's
2357 : * insufficient because of rule substitution, query pullup, etc.
2358 : */
2359 7646 : CheckSelectLocking(parse, linitial_node(RowMarkClause,
2360 : parse->rowMarks)->strength);
2361 : }
2362 : else
2363 : {
2364 : /*
2365 : * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
2366 : * UPDATE/SHARE.
2367 : */
2368 534382 : if (parse->commandType != CMD_UPDATE &&
2369 520564 : parse->commandType != CMD_DELETE &&
2370 516222 : parse->commandType != CMD_MERGE)
2371 514392 : return;
2372 : }
2373 :
2374 : /*
2375 : * We need to have rowmarks for all base relations except the target. We
2376 : * make a bitmapset of all base rels and then remove the items we don't
2377 : * need or have FOR [KEY] UPDATE/SHARE marks for.
2378 : */
2379 27636 : rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
2380 27636 : if (parse->resultRelation)
2381 19990 : rels = bms_del_member(rels, parse->resultRelation);
2382 :
2383 : /*
2384 : * Convert RowMarkClauses to PlanRowMark representation.
2385 : */
2386 27636 : prowmarks = NIL;
2387 35544 : foreach(l, parse->rowMarks)
2388 : {
2389 7908 : RowMarkClause *rc = lfirst_node(RowMarkClause, l);
2390 7908 : RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2391 : PlanRowMark *newrc;
2392 :
2393 : /*
2394 : * Currently, it is syntactically impossible to have FOR UPDATE et al
2395 : * applied to an update/delete target rel. If that ever becomes
2396 : * possible, we should drop the target from the PlanRowMark list.
2397 : */
2398 : Assert(rc->rti != parse->resultRelation);
2399 :
2400 : /*
2401 : * Ignore RowMarkClauses for subqueries; they aren't real tables and
2402 : * can't support true locking. Subqueries that got flattened into the
2403 : * main query should be ignored completely. Any that didn't will get
2404 : * ROW_MARK_COPY items in the next loop.
2405 : */
2406 7908 : if (rte->rtekind != RTE_RELATION)
2407 108 : continue;
2408 :
2409 7800 : rels = bms_del_member(rels, rc->rti);
2410 :
2411 7800 : newrc = makeNode(PlanRowMark);
2412 7800 : newrc->rti = newrc->prti = rc->rti;
2413 7800 : newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2414 7800 : newrc->markType = select_rowmark_type(rte, rc->strength);
2415 7800 : newrc->allMarkTypes = (1 << newrc->markType);
2416 7800 : newrc->strength = rc->strength;
2417 7800 : newrc->waitPolicy = rc->waitPolicy;
2418 7800 : newrc->isParent = false;
2419 :
2420 7800 : prowmarks = lappend(prowmarks, newrc);
2421 : }
2422 :
2423 : /*
2424 : * Now, add rowmarks for any non-target, non-locked base relations.
2425 : */
2426 27636 : i = 0;
2427 66536 : foreach(l, parse->rtable)
2428 : {
2429 38900 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
2430 : PlanRowMark *newrc;
2431 :
2432 38900 : i++;
2433 38900 : if (!bms_is_member(i, rels))
2434 35180 : continue;
2435 :
2436 3720 : newrc = makeNode(PlanRowMark);
2437 3720 : newrc->rti = newrc->prti = i;
2438 3720 : newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2439 3720 : newrc->markType = select_rowmark_type(rte, LCS_NONE);
2440 3720 : newrc->allMarkTypes = (1 << newrc->markType);
2441 3720 : newrc->strength = LCS_NONE;
2442 3720 : newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2443 3720 : newrc->isParent = false;
2444 :
2445 3720 : prowmarks = lappend(prowmarks, newrc);
2446 : }
2447 :
2448 27636 : root->rowMarks = prowmarks;
2449 : }
2450 :
2451 : /*
2452 : * Select RowMarkType to use for a given table
2453 : */
2454 : RowMarkType
2455 13732 : select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
2456 : {
2457 13732 : if (rte->rtekind != RTE_RELATION)
2458 : {
2459 : /* If it's not a table at all, use ROW_MARK_COPY */
2460 1470 : return ROW_MARK_COPY;
2461 : }
2462 12262 : else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2463 : {
2464 : /* Let the FDW select the rowmark type, if it wants to */
2465 200 : FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2466 :
2467 200 : if (fdwroutine->GetForeignRowMarkType != NULL)
2468 0 : return fdwroutine->GetForeignRowMarkType(rte, strength);
2469 : /* Otherwise, use ROW_MARK_COPY by default */
2470 200 : return ROW_MARK_COPY;
2471 : }
2472 : else
2473 : {
2474 : /* Regular table, apply the appropriate lock type */
2475 12062 : switch (strength)
2476 : {
2477 2480 : case LCS_NONE:
2478 :
2479 : /*
2480 : * We don't need a tuple lock, only the ability to re-fetch
2481 : * the row.
2482 : */
2483 2480 : return ROW_MARK_REFERENCE;
2484 : break;
2485 7696 : case LCS_FORKEYSHARE:
2486 7696 : return ROW_MARK_KEYSHARE;
2487 : break;
2488 300 : case LCS_FORSHARE:
2489 300 : return ROW_MARK_SHARE;
2490 : break;
2491 72 : case LCS_FORNOKEYUPDATE:
2492 72 : return ROW_MARK_NOKEYEXCLUSIVE;
2493 : break;
2494 1514 : case LCS_FORUPDATE:
2495 1514 : return ROW_MARK_EXCLUSIVE;
2496 : break;
2497 : }
2498 0 : elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2499 : return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2500 : }
2501 : }
2502 :
2503 : /*
2504 : * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
2505 : *
2506 : * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
2507 : * results back in *count_est and *offset_est. These variables are set to
2508 : * 0 if the corresponding clause is not present, and -1 if it's present
2509 : * but we couldn't estimate the value for it. (The "0" convention is OK
2510 : * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
2511 : * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
2512 : * usual practice of never estimating less than one row.) These values will
2513 : * be passed to create_limit_path, which see if you change this code.
2514 : *
2515 : * The return value is the suitably adjusted tuple_fraction to use for
2516 : * planning the query. This adjustment is not overridable, since it reflects
2517 : * plan actions that grouping_planner() will certainly take, not assumptions
2518 : * about context.
2519 : */
2520 : static double
2521 4852 : preprocess_limit(PlannerInfo *root, double tuple_fraction,
2522 : int64 *offset_est, int64 *count_est)
2523 : {
2524 4852 : Query *parse = root->parse;
2525 : Node *est;
2526 : double limit_fraction;
2527 :
2528 : /* Should not be called unless LIMIT or OFFSET */
2529 : Assert(parse->limitCount || parse->limitOffset);
2530 :
2531 : /*
2532 : * Try to obtain the clause values. We use estimate_expression_value
2533 : * primarily because it can sometimes do something useful with Params.
2534 : */
2535 4852 : if (parse->limitCount)
2536 : {
2537 4360 : est = estimate_expression_value(root, parse->limitCount);
2538 4360 : if (est && IsA(est, Const))
2539 : {
2540 4354 : if (((Const *) est)->constisnull)
2541 : {
2542 : /* NULL indicates LIMIT ALL, ie, no limit */
2543 0 : *count_est = 0; /* treat as not present */
2544 : }
2545 : else
2546 : {
2547 4354 : *count_est = DatumGetInt64(((Const *) est)->constvalue);
2548 4354 : if (*count_est <= 0)
2549 150 : *count_est = 1; /* force to at least 1 */
2550 : }
2551 : }
2552 : else
2553 6 : *count_est = -1; /* can't estimate */
2554 : }
2555 : else
2556 492 : *count_est = 0; /* not present */
2557 :
2558 4852 : if (parse->limitOffset)
2559 : {
2560 864 : est = estimate_expression_value(root, parse->limitOffset);
2561 864 : if (est && IsA(est, Const))
2562 : {
2563 840 : if (((Const *) est)->constisnull)
2564 : {
2565 : /* Treat NULL as no offset; the executor will too */
2566 0 : *offset_est = 0; /* treat as not present */
2567 : }
2568 : else
2569 : {
2570 840 : *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2571 840 : if (*offset_est < 0)
2572 0 : *offset_est = 0; /* treat as not present */
2573 : }
2574 : }
2575 : else
2576 24 : *offset_est = -1; /* can't estimate */
2577 : }
2578 : else
2579 3988 : *offset_est = 0; /* not present */
2580 :
2581 4852 : if (*count_est != 0)
2582 : {
2583 : /*
2584 : * A LIMIT clause limits the absolute number of tuples returned.
2585 : * However, if it's not a constant LIMIT then we have to guess; for
2586 : * lack of a better idea, assume 10% of the plan's result is wanted.
2587 : */
2588 4360 : if (*count_est < 0 || *offset_est < 0)
2589 : {
2590 : /* LIMIT or OFFSET is an expression ... punt ... */
2591 24 : limit_fraction = 0.10;
2592 : }
2593 : else
2594 : {
2595 : /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2596 4336 : limit_fraction = (double) *count_est + (double) *offset_est;
2597 : }
2598 :
2599 : /*
2600 : * If we have absolute limits from both caller and LIMIT, use the
2601 : * smaller value; likewise if they are both fractional. If one is
2602 : * fractional and the other absolute, we can't easily determine which
2603 : * is smaller, but we use the heuristic that the absolute will usually
2604 : * be smaller.
2605 : */
2606 4360 : if (tuple_fraction >= 1.0)
2607 : {
2608 6 : if (limit_fraction >= 1.0)
2609 : {
2610 : /* both absolute */
2611 6 : tuple_fraction = Min(tuple_fraction, limit_fraction);
2612 : }
2613 : else
2614 : {
2615 : /* caller absolute, limit fractional; use caller's value */
2616 : }
2617 : }
2618 4354 : else if (tuple_fraction > 0.0)
2619 : {
2620 148 : if (limit_fraction >= 1.0)
2621 : {
2622 : /* caller fractional, limit absolute; use limit */
2623 148 : tuple_fraction = limit_fraction;
2624 : }
2625 : else
2626 : {
2627 : /* both fractional */
2628 0 : tuple_fraction = Min(tuple_fraction, limit_fraction);
2629 : }
2630 : }
2631 : else
2632 : {
2633 : /* no info from caller, just use limit */
2634 4206 : tuple_fraction = limit_fraction;
2635 : }
2636 : }
2637 492 : else if (*offset_est != 0 && tuple_fraction > 0.0)
2638 : {
2639 : /*
2640 : * We have an OFFSET but no LIMIT. This acts entirely differently
2641 : * from the LIMIT case: here, we need to increase rather than decrease
2642 : * the caller's tuple_fraction, because the OFFSET acts to cause more
2643 : * tuples to be fetched instead of fewer. This only matters if we got
2644 : * a tuple_fraction > 0, however.
2645 : *
2646 : * As above, use 10% if OFFSET is present but unestimatable.
2647 : */
2648 12 : if (*offset_est < 0)
2649 0 : limit_fraction = 0.10;
2650 : else
2651 12 : limit_fraction = (double) *offset_est;
2652 :
2653 : /*
2654 : * If we have absolute counts from both caller and OFFSET, add them
2655 : * together; likewise if they are both fractional. If one is
2656 : * fractional and the other absolute, we want to take the larger, and
2657 : * we heuristically assume that's the fractional one.
2658 : */
2659 12 : if (tuple_fraction >= 1.0)
2660 : {
2661 0 : if (limit_fraction >= 1.0)
2662 : {
2663 : /* both absolute, so add them together */
2664 0 : tuple_fraction += limit_fraction;
2665 : }
2666 : else
2667 : {
2668 : /* caller absolute, limit fractional; use limit */
2669 0 : tuple_fraction = limit_fraction;
2670 : }
2671 : }
2672 : else
2673 : {
2674 12 : if (limit_fraction >= 1.0)
2675 : {
2676 : /* caller fractional, limit absolute; use caller's value */
2677 : }
2678 : else
2679 : {
2680 : /* both fractional, so add them together */
2681 0 : tuple_fraction += limit_fraction;
2682 0 : if (tuple_fraction >= 1.0)
2683 0 : tuple_fraction = 0.0; /* assume fetch all */
2684 : }
2685 : }
2686 : }
2687 :
2688 4852 : return tuple_fraction;
2689 : }
2690 :
2691 : /*
2692 : * limit_needed - do we actually need a Limit plan node?
2693 : *
2694 : * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
2695 : * a Limit node. This is worth checking for because "OFFSET 0" is a common
2696 : * locution for an optimization fence. (Because other places in the planner
2697 : * merely check whether parse->limitOffset isn't NULL, it will still work as
2698 : * an optimization fence --- we're just suppressing unnecessary run-time
2699 : * overhead.)
2700 : *
2701 : * This might look like it could be merged into preprocess_limit, but there's
2702 : * a key distinction: here we need hard constants in OFFSET/LIMIT, whereas
2703 : * in preprocess_limit it's good enough to consider estimated values.
2704 : */
2705 : bool
2706 1125990 : limit_needed(Query *parse)
2707 : {
2708 : Node *node;
2709 :
2710 1125990 : node = parse->limitCount;
2711 1125990 : if (node)
2712 : {
2713 10368 : if (IsA(node, Const))
2714 : {
2715 : /* NULL indicates LIMIT ALL, ie, no limit */
2716 10174 : if (!((Const *) node)->constisnull)
2717 10174 : return true; /* LIMIT with a constant value */
2718 : }
2719 : else
2720 194 : return true; /* non-constant LIMIT */
2721 : }
2722 :
2723 1115622 : node = parse->limitOffset;
2724 1115622 : if (node)
2725 : {
2726 1432 : if (IsA(node, Const))
2727 : {
2728 : /* Treat NULL as no offset; the executor would too */
2729 1142 : if (!((Const *) node)->constisnull)
2730 : {
2731 1142 : int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2732 :
2733 1142 : if (offset != 0)
2734 92 : return true; /* OFFSET with a nonzero value */
2735 : }
2736 : }
2737 : else
2738 290 : return true; /* non-constant OFFSET */
2739 : }
2740 :
2741 1115240 : return false; /* don't need a Limit plan node */
2742 : }
2743 :
2744 : /*
2745 : * preprocess_groupclause - do preparatory work on GROUP BY clause
2746 : *
2747 : * The idea here is to adjust the ordering of the GROUP BY elements
2748 : * (which in itself is semantically insignificant) to match ORDER BY,
2749 : * thereby allowing a single sort operation to both implement the ORDER BY
2750 : * requirement and set up for a Unique step that implements GROUP BY.
2751 : * We also consider partial match between GROUP BY and ORDER BY elements,
2752 : * which could allow to implement ORDER BY using the incremental sort.
2753 : *
2754 : * We also consider other orderings of the GROUP BY elements, which could
2755 : * match the sort ordering of other possible plans (eg an indexscan) and
2756 : * thereby reduce cost. This is implemented during the generation of grouping
2757 : * paths. See get_useful_group_keys_orderings() for details.
2758 : *
2759 : * Note: we need no comparable processing of the distinctClause because
2760 : * the parser already enforced that that matches ORDER BY.
2761 : *
2762 : * Note: we return a fresh List, but its elements are the same
2763 : * SortGroupClauses appearing in parse->groupClause. This is important
2764 : * because later processing may modify the processed_groupClause list.
2765 : *
2766 : * For grouping sets, the order of items is instead forced to agree with that
2767 : * of the grouping set (and items not in the grouping set are skipped). The
2768 : * work of sorting the order of grouping set elements to match the ORDER BY if
2769 : * possible is done elsewhere.
2770 : */
2771 : static List *
2772 7716 : preprocess_groupclause(PlannerInfo *root, List *force)
2773 : {
2774 7716 : Query *parse = root->parse;
2775 7716 : List *new_groupclause = NIL;
2776 : ListCell *sl;
2777 : ListCell *gl;
2778 :
2779 : /* For grouping sets, we need to force the ordering */
2780 7716 : if (force)
2781 : {
2782 10036 : foreach(sl, force)
2783 : {
2784 5950 : Index ref = lfirst_int(sl);
2785 5950 : SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
2786 :
2787 5950 : new_groupclause = lappend(new_groupclause, cl);
2788 : }
2789 :
2790 4086 : return new_groupclause;
2791 : }
2792 :
2793 : /* If no ORDER BY, nothing useful to do here */
2794 3630 : if (parse->sortClause == NIL)
2795 2056 : return list_copy(parse->groupClause);
2796 :
2797 : /*
2798 : * Scan the ORDER BY clause and construct a list of matching GROUP BY
2799 : * items, but only as far as we can make a matching prefix.
2800 : *
2801 : * This code assumes that the sortClause contains no duplicate items.
2802 : */
2803 3054 : foreach(sl, parse->sortClause)
2804 : {
2805 2126 : SortGroupClause *sc = lfirst_node(SortGroupClause, sl);
2806 :
2807 3222 : foreach(gl, parse->groupClause)
2808 : {
2809 2576 : SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
2810 :
2811 2576 : if (equal(gc, sc))
2812 : {
2813 1480 : new_groupclause = lappend(new_groupclause, gc);
2814 1480 : break;
2815 : }
2816 : }
2817 2126 : if (gl == NULL)
2818 646 : break; /* no match, so stop scanning */
2819 : }
2820 :
2821 :
2822 : /* If no match at all, no point in reordering GROUP BY */
2823 1574 : if (new_groupclause == NIL)
2824 298 : return list_copy(parse->groupClause);
2825 :
2826 : /*
2827 : * Add any remaining GROUP BY items to the new list. We don't require a
2828 : * complete match, because even partial match allows ORDER BY to be
2829 : * implemented using incremental sort. Also, give up if there are any
2830 : * non-sortable GROUP BY items, since then there's no hope anyway.
2831 : */
2832 2922 : foreach(gl, parse->groupClause)
2833 : {
2834 1646 : SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
2835 :
2836 1646 : if (list_member_ptr(new_groupclause, gc))
2837 1480 : continue; /* it matched an ORDER BY item */
2838 166 : if (!OidIsValid(gc->sortop)) /* give up, GROUP BY can't be sorted */
2839 0 : return list_copy(parse->groupClause);
2840 166 : new_groupclause = lappend(new_groupclause, gc);
2841 : }
2842 :
2843 : /* Success --- install the rearranged GROUP BY list */
2844 : Assert(list_length(parse->groupClause) == list_length(new_groupclause));
2845 1276 : return new_groupclause;
2846 : }
2847 :
2848 : /*
2849 : * Extract lists of grouping sets that can be implemented using a single
2850 : * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
2851 : *
2852 : * Input must be sorted with smallest sets first. Result has each sublist
2853 : * sorted with smallest sets first.
2854 : *
2855 : * We want to produce the absolute minimum possible number of lists here to
2856 : * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
2857 : * of finding the minimal partition of a partially-ordered set into chains
2858 : * (which is what we need, taking the list of grouping sets as a poset ordered
2859 : * by set inclusion) can be mapped to the problem of finding the maximum
2860 : * cardinality matching on a bipartite graph, which is solvable in polynomial
2861 : * time with a worst case of no worse than O(n^2.5) and usually much
2862 : * better. Since our N is at most 4096, we don't need to consider fallbacks to
2863 : * heuristic or approximate methods. (Planning time for a 12-d cube is under
2864 : * half a second on my modest system even with optimization off and assertions
2865 : * on.)
2866 : */
2867 : static List *
2868 866 : extract_rollup_sets(List *groupingSets)
2869 : {
2870 866 : int num_sets_raw = list_length(groupingSets);
2871 866 : int num_empty = 0;
2872 866 : int num_sets = 0; /* distinct sets */
2873 866 : int num_chains = 0;
2874 866 : List *result = NIL;
2875 : List **results;
2876 : List **orig_sets;
2877 : Bitmapset **set_masks;
2878 : int *chains;
2879 : short **adjacency;
2880 : short *adjacency_buf;
2881 : BipartiteMatchState *state;
2882 : int i;
2883 : int j;
2884 : int j_size;
2885 866 : ListCell *lc1 = list_head(groupingSets);
2886 : ListCell *lc;
2887 :
2888 : /*
2889 : * Start by stripping out empty sets. The algorithm doesn't require this,
2890 : * but the planner currently needs all empty sets to be returned in the
2891 : * first list, so we strip them here and add them back after.
2892 : */
2893 1476 : while (lc1 && lfirst(lc1) == NIL)
2894 : {
2895 610 : ++num_empty;
2896 610 : lc1 = lnext(groupingSets, lc1);
2897 : }
2898 :
2899 : /* bail out now if it turns out that all we had were empty sets. */
2900 866 : if (!lc1)
2901 42 : return list_make1(groupingSets);
2902 :
2903 : /*----------
2904 : * We don't strictly need to remove duplicate sets here, but if we don't,
2905 : * they tend to become scattered through the result, which is a bit
2906 : * confusing (and irritating if we ever decide to optimize them out).
2907 : * So we remove them here and add them back after.
2908 : *
2909 : * For each non-duplicate set, we fill in the following:
2910 : *
2911 : * orig_sets[i] = list of the original set lists
2912 : * set_masks[i] = bitmapset for testing inclusion
2913 : * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
2914 : *
2915 : * chains[i] will be the result group this set is assigned to.
2916 : *
2917 : * We index all of these from 1 rather than 0 because it is convenient
2918 : * to leave 0 free for the NIL node in the graph algorithm.
2919 : *----------
2920 : */
2921 824 : orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
2922 824 : set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
2923 824 : adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
2924 824 : adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
2925 :
2926 824 : j_size = 0;
2927 824 : j = 0;
2928 824 : i = 1;
2929 :
2930 2936 : for_each_cell(lc, groupingSets, lc1)
2931 : {
2932 2112 : List *candidate = (List *) lfirst(lc);
2933 2112 : Bitmapset *candidate_set = NULL;
2934 : ListCell *lc2;
2935 2112 : int dup_of = 0;
2936 :
2937 5118 : foreach(lc2, candidate)
2938 : {
2939 3006 : candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
2940 : }
2941 :
2942 : /* we can only be a dup if we're the same length as a previous set */
2943 2112 : if (j_size == list_length(candidate))
2944 : {
2945 : int k;
2946 :
2947 1844 : for (k = j; k < i; ++k)
2948 : {
2949 1188 : if (bms_equal(set_masks[k], candidate_set))
2950 : {
2951 158 : dup_of = k;
2952 158 : break;
2953 : }
2954 : }
2955 : }
2956 1298 : else if (j_size < list_length(candidate))
2957 : {
2958 1298 : j_size = list_length(candidate);
2959 1298 : j = i;
2960 : }
2961 :
2962 2112 : if (dup_of > 0)
2963 : {
2964 158 : orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
2965 158 : bms_free(candidate_set);
2966 : }
2967 : else
2968 : {
2969 : int k;
2970 1954 : int n_adj = 0;
2971 :
2972 1954 : orig_sets[i] = list_make1(candidate);
2973 1954 : set_masks[i] = candidate_set;
2974 :
2975 : /* fill in adjacency list; no need to compare equal-size sets */
2976 :
2977 3226 : for (k = j - 1; k > 0; --k)
2978 : {
2979 1272 : if (bms_is_subset(set_masks[k], candidate_set))
2980 1110 : adjacency_buf[++n_adj] = k;
2981 : }
2982 :
2983 1954 : if (n_adj > 0)
2984 : {
2985 598 : adjacency_buf[0] = n_adj;
2986 598 : adjacency[i] = palloc((n_adj + 1) * sizeof(short));
2987 598 : memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
2988 : }
2989 : else
2990 1356 : adjacency[i] = NULL;
2991 :
2992 1954 : ++i;
2993 : }
2994 : }
2995 :
2996 824 : num_sets = i - 1;
2997 :
2998 : /*
2999 : * Apply the graph matching algorithm to do the work.
3000 : */
3001 824 : state = BipartiteMatch(num_sets, num_sets, adjacency);
3002 :
3003 : /*
3004 : * Now, the state->pair* fields have the info we need to assign sets to
3005 : * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3006 : * pair_vu[v] = u (both will be true, but we check both so that we can do
3007 : * it in one pass)
3008 : */
3009 824 : chains = palloc0((num_sets + 1) * sizeof(int));
3010 :
3011 2778 : for (i = 1; i <= num_sets; ++i)
3012 : {
3013 1954 : int u = state->pair_vu[i];
3014 1954 : int v = state->pair_uv[i];
3015 :
3016 1954 : if (u > 0 && u < i)
3017 0 : chains[i] = chains[u];
3018 1954 : else if (v > 0 && v < i)
3019 570 : chains[i] = chains[v];
3020 : else
3021 1384 : chains[i] = ++num_chains;
3022 : }
3023 :
3024 : /* build result lists. */
3025 824 : results = palloc0((num_chains + 1) * sizeof(List *));
3026 :
3027 2778 : for (i = 1; i <= num_sets; ++i)
3028 : {
3029 1954 : int c = chains[i];
3030 :
3031 : Assert(c > 0);
3032 :
3033 1954 : results[c] = list_concat(results[c], orig_sets[i]);
3034 : }
3035 :
3036 : /* push any empty sets back on the first list. */
3037 1344 : while (num_empty-- > 0)
3038 520 : results[1] = lcons(NIL, results[1]);
3039 :
3040 : /* make result list */
3041 2208 : for (i = 1; i <= num_chains; ++i)
3042 1384 : result = lappend(result, results[i]);
3043 :
3044 : /*
3045 : * Free all the things.
3046 : *
3047 : * (This is over-fussy for small sets but for large sets we could have
3048 : * tied up a nontrivial amount of memory.)
3049 : */
3050 824 : BipartiteMatchFree(state);
3051 824 : pfree(results);
3052 824 : pfree(chains);
3053 2778 : for (i = 1; i <= num_sets; ++i)
3054 1954 : if (adjacency[i])
3055 598 : pfree(adjacency[i]);
3056 824 : pfree(adjacency);
3057 824 : pfree(adjacency_buf);
3058 824 : pfree(orig_sets);
3059 2778 : for (i = 1; i <= num_sets; ++i)
3060 1954 : bms_free(set_masks[i]);
3061 824 : pfree(set_masks);
3062 :
3063 824 : return result;
3064 : }
3065 :
3066 : /*
3067 : * Reorder the elements of a list of grouping sets such that they have correct
3068 : * prefix relationships. Also inserts the GroupingSetData annotations.
3069 : *
3070 : * The input must be ordered with smallest sets first; the result is returned
3071 : * with largest sets first. Note that the result shares no list substructure
3072 : * with the input, so it's safe for the caller to modify it later.
3073 : *
3074 : * If we're passed in a sortclause, we follow its order of columns to the
3075 : * extent possible, to minimize the chance that we add unnecessary sorts.
3076 : * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
3077 : * gets implemented in one pass.)
3078 : */
3079 : static List *
3080 1426 : reorder_grouping_sets(List *groupingSets, List *sortclause)
3081 : {
3082 : ListCell *lc;
3083 1426 : List *previous = NIL;
3084 1426 : List *result = NIL;
3085 :
3086 4148 : foreach(lc, groupingSets)
3087 : {
3088 2722 : List *candidate = (List *) lfirst(lc);
3089 2722 : List *new_elems = list_difference_int(candidate, previous);
3090 2722 : GroupingSetData *gs = makeNode(GroupingSetData);
3091 :
3092 2886 : while (list_length(sortclause) > list_length(previous) &&
3093 : new_elems != NIL)
3094 : {
3095 272 : SortGroupClause *sc = list_nth(sortclause, list_length(previous));
3096 272 : int ref = sc->tleSortGroupRef;
3097 :
3098 272 : if (list_member_int(new_elems, ref))
3099 : {
3100 164 : previous = lappend_int(previous, ref);
3101 164 : new_elems = list_delete_int(new_elems, ref);
3102 : }
3103 : else
3104 : {
3105 : /* diverged from the sortclause; give up on it */
3106 108 : sortclause = NIL;
3107 108 : break;
3108 : }
3109 : }
3110 :
3111 2722 : previous = list_concat(previous, new_elems);
3112 :
3113 2722 : gs->set = list_copy(previous);
3114 2722 : result = lcons(gs, result);
3115 : }
3116 :
3117 1426 : list_free(previous);
3118 :
3119 1426 : return result;
3120 : }
3121 :
3122 : /*
3123 : * has_volatile_pathkey
3124 : * Returns true if any PathKey in 'keys' has an EquivalenceClass
3125 : * containing a volatile function. Otherwise returns false.
3126 : */
3127 : static bool
3128 2738 : has_volatile_pathkey(List *keys)
3129 : {
3130 : ListCell *lc;
3131 :
3132 5632 : foreach(lc, keys)
3133 : {
3134 2912 : PathKey *pathkey = lfirst_node(PathKey, lc);
3135 :
3136 2912 : if (pathkey->pk_eclass->ec_has_volatile)
3137 18 : return true;
3138 : }
3139 :
3140 2720 : return false;
3141 : }
3142 :
3143 : /*
3144 : * adjust_group_pathkeys_for_groupagg
3145 : * Add pathkeys to root->group_pathkeys to reflect the best set of
3146 : * pre-ordered input for ordered aggregates.
3147 : *
3148 : * We define "best" as the pathkeys that suit the largest number of
3149 : * aggregate functions. We find these by looking at the first ORDER BY /
3150 : * DISTINCT aggregate and take the pathkeys for that before searching for
3151 : * other aggregates that require the same or a more strict variation of the
3152 : * same pathkeys. We then repeat that process for any remaining aggregates
3153 : * with different pathkeys and if we find another set of pathkeys that suits a
3154 : * larger number of aggregates then we select those pathkeys instead.
3155 : *
3156 : * When the best pathkeys are found we also mark each Aggref that can use
3157 : * those pathkeys as aggpresorted = true.
3158 : *
3159 : * Note: When an aggregate function's ORDER BY / DISTINCT clause contains any
3160 : * volatile functions, we never make use of these pathkeys. We want to ensure
3161 : * that sorts using volatile functions are done independently in each Aggref
3162 : * rather than once at the query level. If we were to allow this then Aggrefs
3163 : * with compatible sort orders would all transition their rows in the same
3164 : * order if those pathkeys were deemed to be the best pathkeys to sort on.
3165 : * Whereas, if some other set of Aggref's pathkeys happened to be deemed
3166 : * better pathkeys to sort on, then the volatile function Aggrefs would be
3167 : * left to perform their sorts individually. To avoid this inconsistent
3168 : * behavior which could make Aggref results depend on what other Aggrefs the
3169 : * query contains, we always force Aggrefs with volatile functions to perform
3170 : * their own sorts.
3171 : */
3172 : static void
3173 2318 : adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
3174 : {
3175 2318 : List *grouppathkeys = root->group_pathkeys;
3176 : List *bestpathkeys;
3177 : Bitmapset *bestaggs;
3178 : Bitmapset *unprocessed_aggs;
3179 : ListCell *lc;
3180 : int i;
3181 :
3182 : /* Shouldn't be here if there are grouping sets */
3183 : Assert(root->parse->groupingSets == NIL);
3184 : /* Shouldn't be here unless there are some ordered aggregates */
3185 : Assert(root->numOrderedAggs > 0);
3186 :
3187 : /* Do nothing if disabled */
3188 2318 : if (!enable_presorted_aggregate)
3189 6 : return;
3190 :
3191 : /*
3192 : * Make a first pass over all AggInfos to collect a Bitmapset containing
3193 : * the indexes of all AggInfos to be processed below.
3194 : */
3195 2312 : unprocessed_aggs = NULL;
3196 5308 : foreach(lc, root->agginfos)
3197 : {
3198 2996 : AggInfo *agginfo = lfirst_node(AggInfo, lc);
3199 2996 : Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3200 :
3201 2996 : if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3202 264 : continue;
3203 :
3204 : /* only add aggregates with a DISTINCT or ORDER BY */
3205 2732 : if (aggref->aggdistinct != NIL || aggref->aggorder != NIL)
3206 2432 : unprocessed_aggs = bms_add_member(unprocessed_aggs,
3207 : foreach_current_index(lc));
3208 : }
3209 :
3210 : /*
3211 : * Now process all the unprocessed_aggs to find the best set of pathkeys
3212 : * for the given set of aggregates.
3213 : *
3214 : * On the first outer loop here 'bestaggs' will be empty. We'll populate
3215 : * this during the first loop using the pathkeys for the very first
3216 : * AggInfo then taking any stronger pathkeys from any other AggInfos with
3217 : * a more strict set of compatible pathkeys. Once the outer loop is
3218 : * complete, we mark off all the aggregates with compatible pathkeys then
3219 : * remove those from the unprocessed_aggs and repeat the process to try to
3220 : * find another set of pathkeys that are suitable for a larger number of
3221 : * aggregates. The outer loop will stop when there are not enough
3222 : * unprocessed aggregates for it to be possible to find a set of pathkeys
3223 : * to suit a larger number of aggregates.
3224 : */
3225 2312 : bestpathkeys = NIL;
3226 2312 : bestaggs = NULL;
3227 4576 : while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3228 : {
3229 2264 : Bitmapset *aggindexes = NULL;
3230 2264 : List *currpathkeys = NIL;
3231 :
3232 2264 : i = -1;
3233 7266 : while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3234 : {
3235 2738 : AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3236 2738 : Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3237 : List *sortlist;
3238 : List *pathkeys;
3239 :
3240 2738 : if (aggref->aggdistinct != NIL)
3241 718 : sortlist = aggref->aggdistinct;
3242 : else
3243 2020 : sortlist = aggref->aggorder;
3244 :
3245 2738 : pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3246 : aggref->args);
3247 :
3248 : /*
3249 : * Ignore Aggrefs which have volatile functions in their ORDER BY
3250 : * or DISTINCT clause.
3251 : */
3252 2738 : if (has_volatile_pathkey(pathkeys))
3253 : {
3254 18 : unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3255 18 : continue;
3256 : }
3257 :
3258 : /*
3259 : * When not set yet, take the pathkeys from the first unprocessed
3260 : * aggregate.
3261 : */
3262 2720 : if (currpathkeys == NIL)
3263 : {
3264 2258 : currpathkeys = pathkeys;
3265 :
3266 : /* include the GROUP BY pathkeys, if they exist */
3267 2258 : if (grouppathkeys != NIL)
3268 276 : currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3269 : currpathkeys);
3270 :
3271 : /* record that we found pathkeys for this aggregate */
3272 2258 : aggindexes = bms_add_member(aggindexes, i);
3273 : }
3274 : else
3275 : {
3276 : /* now look for a stronger set of matching pathkeys */
3277 :
3278 : /* include the GROUP BY pathkeys, if they exist */
3279 462 : if (grouppathkeys != NIL)
3280 288 : pathkeys = append_pathkeys(list_copy(grouppathkeys),
3281 : pathkeys);
3282 :
3283 : /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3284 462 : switch (compare_pathkeys(currpathkeys, pathkeys))
3285 : {
3286 12 : case PATHKEYS_BETTER2:
3287 : /* 'pathkeys' are stronger, use these ones instead */
3288 12 : currpathkeys = pathkeys;
3289 : /* FALLTHROUGH */
3290 :
3291 72 : case PATHKEYS_BETTER1:
3292 : /* 'pathkeys' are less strict */
3293 : /* FALLTHROUGH */
3294 :
3295 : case PATHKEYS_EQUAL:
3296 : /* mark this aggregate as covered by 'currpathkeys' */
3297 72 : aggindexes = bms_add_member(aggindexes, i);
3298 72 : break;
3299 :
3300 390 : case PATHKEYS_DIFFERENT:
3301 390 : break;
3302 : }
3303 5002 : }
3304 : }
3305 :
3306 : /* remove the aggregates that we've just processed */
3307 2264 : unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3308 :
3309 : /*
3310 : * If this pass included more aggregates than the previous best then
3311 : * use these ones as the best set.
3312 : */
3313 2264 : if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3314 : {
3315 2156 : bestaggs = aggindexes;
3316 2156 : bestpathkeys = currpathkeys;
3317 : }
3318 : }
3319 :
3320 : /*
3321 : * If we found any ordered aggregates, update root->group_pathkeys to add
3322 : * the best set of aggregate pathkeys. Note that bestpathkeys includes
3323 : * the original GROUP BY pathkeys already.
3324 : */
3325 2312 : if (bestpathkeys != NIL)
3326 2108 : root->group_pathkeys = bestpathkeys;
3327 :
3328 : /*
3329 : * Now that we've found the best set of aggregates we can set the
3330 : * presorted flag to indicate to the executor that it needn't bother
3331 : * performing a sort for these Aggrefs. We're able to do this now as
3332 : * there's no chance of a Hash Aggregate plan as create_grouping_paths
3333 : * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3334 : * of ordered aggregates.
3335 : */
3336 2312 : i = -1;
3337 4510 : while ((i = bms_next_member(bestaggs, i)) >= 0)
3338 : {
3339 2198 : AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3340 :
3341 4414 : foreach(lc, agginfo->aggrefs)
3342 : {
3343 2216 : Aggref *aggref = lfirst_node(Aggref, lc);
3344 :
3345 2216 : aggref->aggpresorted = true;
3346 : }
3347 : }
3348 : }
3349 :
3350 : /*
3351 : * Compute query_pathkeys and other pathkeys during plan generation
3352 : */
3353 : static void
3354 532356 : standard_qp_callback(PlannerInfo *root, void *extra)
3355 : {
3356 532356 : Query *parse = root->parse;
3357 532356 : standard_qp_extra *qp_extra = (standard_qp_extra *) extra;
3358 532356 : List *tlist = root->processed_tlist;
3359 532356 : List *activeWindows = qp_extra->activeWindows;
3360 :
3361 : /*
3362 : * Calculate pathkeys that represent grouping/ordering and/or ordered
3363 : * aggregate requirements.
3364 : */
3365 532356 : if (qp_extra->gset_data)
3366 : {
3367 : /*
3368 : * With grouping sets, just use the first RollupData's groupClause. We
3369 : * don't make any effort to optimize grouping clauses when there are
3370 : * grouping sets, nor can we combine aggregate ordering keys with
3371 : * grouping.
3372 : */
3373 872 : List *rollups = qp_extra->gset_data->rollups;
3374 872 : List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3375 :
3376 872 : if (grouping_is_sortable(groupClause))
3377 : {
3378 : bool sortable;
3379 :
3380 : /*
3381 : * The groupClause is logically below the grouping step. So if
3382 : * there is an RTE entry for the grouping step, we need to remove
3383 : * its RT index from the sort expressions before we make PathKeys
3384 : * for them.
3385 : */
3386 872 : root->group_pathkeys =
3387 872 : make_pathkeys_for_sortclauses_extended(root,
3388 : &groupClause,
3389 : tlist,
3390 : false,
3391 872 : parse->hasGroupRTE,
3392 : &sortable,
3393 : false);
3394 : Assert(sortable);
3395 872 : root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3396 : }
3397 : else
3398 : {
3399 0 : root->group_pathkeys = NIL;
3400 0 : root->num_groupby_pathkeys = 0;
3401 : }
3402 : }
3403 531484 : else if (parse->groupClause || root->numOrderedAggs > 0)
3404 5704 : {
3405 : /*
3406 : * With a plain GROUP BY list, we can remove any grouping items that
3407 : * are proven redundant by EquivalenceClass processing. For example,
3408 : * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3409 : * especially common cases, but they're nearly free to detect. Note
3410 : * that we remove redundant items from processed_groupClause but not
3411 : * the original parse->groupClause.
3412 : */
3413 : bool sortable;
3414 :
3415 : /*
3416 : * Convert group clauses into pathkeys. Set the ec_sortref field of
3417 : * EquivalenceClass'es if it's not set yet.
3418 : */
3419 5704 : root->group_pathkeys =
3420 5704 : make_pathkeys_for_sortclauses_extended(root,
3421 : &root->processed_groupClause,
3422 : tlist,
3423 : true,
3424 : false,
3425 : &sortable,
3426 : true);
3427 5704 : if (!sortable)
3428 : {
3429 : /* Can't sort; no point in considering aggregate ordering either */
3430 0 : root->group_pathkeys = NIL;
3431 0 : root->num_groupby_pathkeys = 0;
3432 : }
3433 : else
3434 : {
3435 5704 : root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3436 : /* If we have ordered aggs, consider adding onto group_pathkeys */
3437 5704 : if (root->numOrderedAggs > 0)
3438 2318 : adjust_group_pathkeys_for_groupagg(root);
3439 : }
3440 : }
3441 : else
3442 : {
3443 525780 : root->group_pathkeys = NIL;
3444 525780 : root->num_groupby_pathkeys = 0;
3445 : }
3446 :
3447 : /* We consider only the first (bottom) window in pathkeys logic */
3448 532356 : if (activeWindows != NIL)
3449 : {
3450 2378 : WindowClause *wc = linitial_node(WindowClause, activeWindows);
3451 :
3452 2378 : root->window_pathkeys = make_pathkeys_for_window(root,
3453 : wc,
3454 : tlist);
3455 : }
3456 : else
3457 529978 : root->window_pathkeys = NIL;
3458 :
3459 : /*
3460 : * As with GROUP BY, we can discard any DISTINCT items that are proven
3461 : * redundant by EquivalenceClass processing. The non-redundant list is
3462 : * kept in root->processed_distinctClause, leaving the original
3463 : * parse->distinctClause alone.
3464 : */
3465 532356 : if (parse->distinctClause)
3466 : {
3467 : bool sortable;
3468 :
3469 : /* Make a copy since pathkey processing can modify the list */
3470 2560 : root->processed_distinctClause = list_copy(parse->distinctClause);
3471 2560 : root->distinct_pathkeys =
3472 2560 : make_pathkeys_for_sortclauses_extended(root,
3473 : &root->processed_distinctClause,
3474 : tlist,
3475 : true,
3476 : false,
3477 : &sortable,
3478 : false);
3479 2560 : if (!sortable)
3480 6 : root->distinct_pathkeys = NIL;
3481 : }
3482 : else
3483 529796 : root->distinct_pathkeys = NIL;
3484 :
3485 532356 : root->sort_pathkeys =
3486 532356 : make_pathkeys_for_sortclauses(root,
3487 : parse->sortClause,
3488 : tlist);
3489 :
3490 : /* setting setop_pathkeys might be useful to the union planner */
3491 532356 : if (qp_extra->setop != NULL)
3492 : {
3493 : List *groupClauses;
3494 : bool sortable;
3495 :
3496 12122 : groupClauses = generate_setop_child_grouplist(qp_extra->setop, tlist);
3497 :
3498 12122 : root->setop_pathkeys =
3499 12122 : make_pathkeys_for_sortclauses_extended(root,
3500 : &groupClauses,
3501 : tlist,
3502 : false,
3503 : false,
3504 : &sortable,
3505 : false);
3506 12122 : if (!sortable)
3507 196 : root->setop_pathkeys = NIL;
3508 : }
3509 : else
3510 520234 : root->setop_pathkeys = NIL;
3511 :
3512 : /*
3513 : * Figure out whether we want a sorted result from query_planner.
3514 : *
3515 : * If we have a sortable GROUP BY clause, then we want a result sorted
3516 : * properly for grouping. Otherwise, if we have window functions to
3517 : * evaluate, we try to sort for the first window. Otherwise, if there's a
3518 : * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3519 : * we try to produce output that's sufficiently well sorted for the
3520 : * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3521 : * by the ORDER BY clause. Otherwise, if we're a subquery being planned
3522 : * for a set operation which can benefit from presorted results and have a
3523 : * sortable targetlist, we want to sort by the target list.
3524 : *
3525 : * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3526 : * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3527 : * that might just leave us failing to exploit an available sort order at
3528 : * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3529 : * much easier, since we know that the parser ensured that one is a
3530 : * superset of the other.
3531 : */
3532 532356 : if (root->group_pathkeys)
3533 6250 : root->query_pathkeys = root->group_pathkeys;
3534 526106 : else if (root->window_pathkeys)
3535 2036 : root->query_pathkeys = root->window_pathkeys;
3536 1048140 : else if (list_length(root->distinct_pathkeys) >
3537 524070 : list_length(root->sort_pathkeys))
3538 2106 : root->query_pathkeys = root->distinct_pathkeys;
3539 521964 : else if (root->sort_pathkeys)
3540 62076 : root->query_pathkeys = root->sort_pathkeys;
3541 459888 : else if (root->setop_pathkeys != NIL)
3542 10718 : root->query_pathkeys = root->setop_pathkeys;
3543 : else
3544 449170 : root->query_pathkeys = NIL;
3545 532356 : }
3546 :
3547 : /*
3548 : * Estimate number of groups produced by grouping clauses (1 if not grouping)
3549 : *
3550 : * path_rows: number of output rows from scan/join step
3551 : * gd: grouping sets data including list of grouping sets and their clauses
3552 : * target_list: target list containing group clause references
3553 : *
3554 : * If doing grouping sets, we also annotate the gsets data with the estimates
3555 : * for each set and each individual rollup list, with a view to later
3556 : * determining whether some combination of them could be hashed instead.
3557 : */
3558 : static double
3559 41588 : get_number_of_groups(PlannerInfo *root,
3560 : double path_rows,
3561 : grouping_sets_data *gd,
3562 : List *target_list)
3563 : {
3564 41588 : Query *parse = root->parse;
3565 : double dNumGroups;
3566 :
3567 41588 : if (parse->groupClause)
3568 : {
3569 : List *groupExprs;
3570 :
3571 6940 : if (parse->groupingSets)
3572 : {
3573 : /* Add up the estimates for each grouping set */
3574 : ListCell *lc;
3575 :
3576 : Assert(gd); /* keep Coverity happy */
3577 :
3578 830 : dNumGroups = 0;
3579 :
3580 2214 : foreach(lc, gd->rollups)
3581 : {
3582 1384 : RollupData *rollup = lfirst_node(RollupData, lc);
3583 : ListCell *lc2;
3584 : ListCell *lc3;
3585 :
3586 1384 : groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3587 : target_list);
3588 :
3589 1384 : rollup->numGroups = 0.0;
3590 :
3591 4016 : forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3592 : {
3593 2632 : List *gset = (List *) lfirst(lc2);
3594 2632 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc3);
3595 2632 : double numGroups = estimate_num_groups(root,
3596 : groupExprs,
3597 : path_rows,
3598 : &gset,
3599 : NULL);
3600 :
3601 2632 : gs->numGroups = numGroups;
3602 2632 : rollup->numGroups += numGroups;
3603 : }
3604 :
3605 1384 : dNumGroups += rollup->numGroups;
3606 : }
3607 :
3608 830 : if (gd->hash_sets_idx)
3609 : {
3610 : ListCell *lc2;
3611 :
3612 36 : gd->dNumHashGroups = 0;
3613 :
3614 36 : groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3615 : target_list);
3616 :
3617 78 : forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3618 : {
3619 42 : List *gset = (List *) lfirst(lc);
3620 42 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
3621 42 : double numGroups = estimate_num_groups(root,
3622 : groupExprs,
3623 : path_rows,
3624 : &gset,
3625 : NULL);
3626 :
3627 42 : gs->numGroups = numGroups;
3628 42 : gd->dNumHashGroups += numGroups;
3629 : }
3630 :
3631 36 : dNumGroups += gd->dNumHashGroups;
3632 : }
3633 : }
3634 : else
3635 : {
3636 : /* Plain GROUP BY -- estimate based on optimized groupClause */
3637 6110 : groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3638 : target_list);
3639 :
3640 6110 : dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3641 : NULL, NULL);
3642 : }
3643 : }
3644 34648 : else if (parse->groupingSets)
3645 : {
3646 : /* Empty grouping sets ... one result row for each one */
3647 42 : dNumGroups = list_length(parse->groupingSets);
3648 : }
3649 34606 : else if (parse->hasAggs || root->hasHavingQual)
3650 : {
3651 : /* Plain aggregation, one result row */
3652 34606 : dNumGroups = 1;
3653 : }
3654 : else
3655 : {
3656 : /* Not grouping */
3657 0 : dNumGroups = 1;
3658 : }
3659 :
3660 41588 : return dNumGroups;
3661 : }
3662 :
3663 : /*
3664 : * create_grouping_paths
3665 : *
3666 : * Build a new upperrel containing Paths for grouping and/or aggregation.
3667 : * Along the way, we also build an upperrel for Paths which are partially
3668 : * grouped and/or aggregated. A partially grouped and/or aggregated path
3669 : * needs a FinalizeAggregate node to complete the aggregation. Currently,
3670 : * the only partially grouped paths we build are also partial paths; that
3671 : * is, they need a Gather and then a FinalizeAggregate.
3672 : *
3673 : * input_rel: contains the source-data Paths
3674 : * target: the pathtarget for the result Paths to compute
3675 : * gd: grouping sets data including list of grouping sets and their clauses
3676 : *
3677 : * Note: all Paths in input_rel are expected to return the target computed
3678 : * by make_group_input_target.
3679 : */
3680 : static RelOptInfo *
3681 38330 : create_grouping_paths(PlannerInfo *root,
3682 : RelOptInfo *input_rel,
3683 : PathTarget *target,
3684 : bool target_parallel_safe,
3685 : grouping_sets_data *gd)
3686 : {
3687 38330 : Query *parse = root->parse;
3688 : RelOptInfo *grouped_rel;
3689 : RelOptInfo *partially_grouped_rel;
3690 : AggClauseCosts agg_costs;
3691 :
3692 229980 : MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3693 38330 : get_agg_clause_costs(root, AGGSPLIT_SIMPLE, &agg_costs);
3694 :
3695 : /*
3696 : * Create grouping relation to hold fully aggregated grouping and/or
3697 : * aggregation paths.
3698 : */
3699 38330 : grouped_rel = make_grouping_rel(root, input_rel, target,
3700 : target_parallel_safe, parse->havingQual);
3701 :
3702 : /*
3703 : * Create either paths for a degenerate grouping or paths for ordinary
3704 : * grouping, as appropriate.
3705 : */
3706 38330 : if (is_degenerate_grouping(root))
3707 18 : create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3708 : else
3709 : {
3710 38312 : int flags = 0;
3711 : GroupPathExtraData extra;
3712 :
3713 : /*
3714 : * Determine whether it's possible to perform sort-based
3715 : * implementations of grouping. (Note that if processed_groupClause
3716 : * is empty, grouping_is_sortable() is trivially true, and all the
3717 : * pathkeys_contained_in() tests will succeed too, so that we'll
3718 : * consider every surviving input path.)
3719 : *
3720 : * If we have grouping sets, we might be able to sort some but not all
3721 : * of them; in this case, we need can_sort to be true as long as we
3722 : * must consider any sorted-input plan.
3723 : */
3724 38312 : if ((gd && gd->rollups != NIL)
3725 37446 : || grouping_is_sortable(root->processed_groupClause))
3726 38306 : flags |= GROUPING_CAN_USE_SORT;
3727 :
3728 : /*
3729 : * Determine whether we should consider hash-based implementations of
3730 : * grouping.
3731 : *
3732 : * Hashed aggregation only applies if we're grouping. If we have
3733 : * grouping sets, some groups might be hashable but others not; in
3734 : * this case we set can_hash true as long as there is nothing globally
3735 : * preventing us from hashing (and we should therefore consider plans
3736 : * with hashes).
3737 : *
3738 : * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3739 : * BY aggregates. (Doing so would imply storing *all* the input
3740 : * values in the hash table, and/or running many sorts in parallel,
3741 : * either of which seems like a certain loser.) We similarly don't
3742 : * support ordered-set aggregates in hashed aggregation, but that case
3743 : * is also included in the numOrderedAggs count.
3744 : *
3745 : * Note: grouping_is_hashable() is much more expensive to check than
3746 : * the other gating conditions, so we want to do it last.
3747 : */
3748 38312 : if ((parse->groupClause != NIL &&
3749 8640 : root->numOrderedAggs == 0 &&
3750 4180 : (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3751 4176 : flags |= GROUPING_CAN_USE_HASH;
3752 :
3753 : /*
3754 : * Determine whether partial aggregation is possible.
3755 : */
3756 38312 : if (can_partial_agg(root))
3757 33746 : flags |= GROUPING_CAN_PARTIAL_AGG;
3758 :
3759 38312 : extra.flags = flags;
3760 38312 : extra.target_parallel_safe = target_parallel_safe;
3761 38312 : extra.havingQual = parse->havingQual;
3762 38312 : extra.targetList = parse->targetList;
3763 38312 : extra.partial_costs_set = false;
3764 :
3765 : /*
3766 : * Determine whether partitionwise aggregation is in theory possible.
3767 : * It can be disabled by the user, and for now, we don't try to
3768 : * support grouping sets. create_ordinary_grouping_paths() will check
3769 : * additional conditions, such as whether input_rel is partitioned.
3770 : */
3771 38312 : if (enable_partitionwise_aggregate && !parse->groupingSets)
3772 556 : extra.patype = PARTITIONWISE_AGGREGATE_FULL;
3773 : else
3774 37756 : extra.patype = PARTITIONWISE_AGGREGATE_NONE;
3775 :
3776 38312 : create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3777 : &agg_costs, gd, &extra,
3778 : &partially_grouped_rel);
3779 : }
3780 :
3781 38324 : set_cheapest(grouped_rel);
3782 38324 : return grouped_rel;
3783 : }
3784 :
3785 : /*
3786 : * make_grouping_rel
3787 : *
3788 : * Create a new grouping rel and set basic properties.
3789 : *
3790 : * input_rel represents the underlying scan/join relation.
3791 : * target is the output expected from the grouping relation.
3792 : */
3793 : static RelOptInfo *
3794 39824 : make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
3795 : PathTarget *target, bool target_parallel_safe,
3796 : Node *havingQual)
3797 : {
3798 : RelOptInfo *grouped_rel;
3799 :
3800 39824 : if (IS_OTHER_REL(input_rel))
3801 : {
3802 1494 : grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
3803 : input_rel->relids);
3804 1494 : grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3805 : }
3806 : else
3807 : {
3808 : /*
3809 : * By tradition, the relids set for the main grouping relation is
3810 : * NULL. (This could be changed, but might require adjustments
3811 : * elsewhere.)
3812 : */
3813 38330 : grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3814 : }
3815 :
3816 : /* Set target. */
3817 39824 : grouped_rel->reltarget = target;
3818 :
3819 : /*
3820 : * If the input relation is not parallel-safe, then the grouped relation
3821 : * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3822 : * target list and HAVING quals are parallel-safe.
3823 : */
3824 67758 : if (input_rel->consider_parallel && target_parallel_safe &&
3825 27934 : is_parallel_safe(root, (Node *) havingQual))
3826 27916 : grouped_rel->consider_parallel = true;
3827 :
3828 : /*
3829 : * If the input rel belongs to a single FDW, so does the grouped rel.
3830 : */
3831 39824 : grouped_rel->serverid = input_rel->serverid;
3832 39824 : grouped_rel->userid = input_rel->userid;
3833 39824 : grouped_rel->useridiscurrent = input_rel->useridiscurrent;
3834 39824 : grouped_rel->fdwroutine = input_rel->fdwroutine;
3835 :
3836 39824 : return grouped_rel;
3837 : }
3838 :
3839 : /*
3840 : * is_degenerate_grouping
3841 : *
3842 : * A degenerate grouping is one in which the query has a HAVING qual and/or
3843 : * grouping sets, but no aggregates and no GROUP BY (which implies that the
3844 : * grouping sets are all empty).
3845 : */
3846 : static bool
3847 38330 : is_degenerate_grouping(PlannerInfo *root)
3848 : {
3849 38330 : Query *parse = root->parse;
3850 :
3851 37312 : return (root->hasHavingQual || parse->groupingSets) &&
3852 75642 : !parse->hasAggs && parse->groupClause == NIL;
3853 : }
3854 :
3855 : /*
3856 : * create_degenerate_grouping_paths
3857 : *
3858 : * When the grouping is degenerate (see is_degenerate_grouping), we are
3859 : * supposed to emit either zero or one row for each grouping set depending on
3860 : * whether HAVING succeeds. Furthermore, there cannot be any variables in
3861 : * either HAVING or the targetlist, so we actually do not need the FROM table
3862 : * at all! We can just throw away the plan-so-far and generate a Result node.
3863 : * This is a sufficiently unusual corner case that it's not worth contorting
3864 : * the structure of this module to avoid having to generate the earlier paths
3865 : * in the first place.
3866 : */
3867 : static void
3868 18 : create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
3869 : RelOptInfo *grouped_rel)
3870 : {
3871 18 : Query *parse = root->parse;
3872 : int nrows;
3873 : Path *path;
3874 :
3875 18 : nrows = list_length(parse->groupingSets);
3876 18 : if (nrows > 1)
3877 : {
3878 : /*
3879 : * Doesn't seem worthwhile writing code to cons up a generate_series
3880 : * or a values scan to emit multiple rows. Instead just make N clones
3881 : * and append them. (With a volatile HAVING clause, this means you
3882 : * might get between 0 and N output rows. Offhand I think that's
3883 : * desired.)
3884 : */
3885 0 : List *paths = NIL;
3886 :
3887 0 : while (--nrows >= 0)
3888 : {
3889 : path = (Path *)
3890 0 : create_group_result_path(root, grouped_rel,
3891 0 : grouped_rel->reltarget,
3892 0 : (List *) parse->havingQual);
3893 0 : paths = lappend(paths, path);
3894 : }
3895 : path = (Path *)
3896 0 : create_append_path(root,
3897 : grouped_rel,
3898 : paths,
3899 : NIL,
3900 : NIL,
3901 : NULL,
3902 : 0,
3903 : false,
3904 : -1);
3905 : }
3906 : else
3907 : {
3908 : /* No grouping sets, or just one, so one output row */
3909 : path = (Path *)
3910 18 : create_group_result_path(root, grouped_rel,
3911 18 : grouped_rel->reltarget,
3912 18 : (List *) parse->havingQual);
3913 : }
3914 :
3915 18 : add_path(grouped_rel, path);
3916 18 : }
3917 :
3918 : /*
3919 : * create_ordinary_grouping_paths
3920 : *
3921 : * Create grouping paths for the ordinary (that is, non-degenerate) case.
3922 : *
3923 : * We need to consider sorted and hashed aggregation in the same function,
3924 : * because otherwise (1) it would be harder to throw an appropriate error
3925 : * message if neither way works, and (2) we should not allow hashtable size
3926 : * considerations to dissuade us from using hashing if sorting is not possible.
3927 : *
3928 : * *partially_grouped_rel_p will be set to the partially grouped rel which this
3929 : * function creates, or to NULL if it doesn't create one.
3930 : */
3931 : static void
3932 39806 : create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
3933 : RelOptInfo *grouped_rel,
3934 : const AggClauseCosts *agg_costs,
3935 : grouping_sets_data *gd,
3936 : GroupPathExtraData *extra,
3937 : RelOptInfo **partially_grouped_rel_p)
3938 : {
3939 39806 : Path *cheapest_path = input_rel->cheapest_total_path;
3940 39806 : RelOptInfo *partially_grouped_rel = NULL;
3941 : double dNumGroups;
3942 39806 : PartitionwiseAggregateType patype = PARTITIONWISE_AGGREGATE_NONE;
3943 :
3944 : /*
3945 : * If this is the topmost grouping relation or if the parent relation is
3946 : * doing some form of partitionwise aggregation, then we may be able to do
3947 : * it at this level also. However, if the input relation is not
3948 : * partitioned, partitionwise aggregate is impossible.
3949 : */
3950 39806 : if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
3951 2050 : IS_PARTITIONED_REL(input_rel))
3952 : {
3953 : /*
3954 : * If this is the topmost relation or if the parent relation is doing
3955 : * full partitionwise aggregation, then we can do full partitionwise
3956 : * aggregation provided that the GROUP BY clause contains all of the
3957 : * partitioning columns at this level and the collation used by GROUP
3958 : * BY matches the partitioning collation. Otherwise, we can do at
3959 : * most partial partitionwise aggregation. But if partial aggregation
3960 : * is not supported in general then we can't use it for partitionwise
3961 : * aggregation either.
3962 : *
3963 : * Check parse->groupClause not processed_groupClause, because it's
3964 : * okay if some of the partitioning columns were proved redundant.
3965 : */
3966 1160 : if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
3967 556 : group_by_has_partkey(input_rel, extra->targetList,
3968 556 : root->parse->groupClause))
3969 320 : patype = PARTITIONWISE_AGGREGATE_FULL;
3970 284 : else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3971 242 : patype = PARTITIONWISE_AGGREGATE_PARTIAL;
3972 : else
3973 42 : patype = PARTITIONWISE_AGGREGATE_NONE;
3974 : }
3975 :
3976 : /*
3977 : * Before generating paths for grouped_rel, we first generate any possible
3978 : * partially grouped paths; that way, later code can easily consider both
3979 : * parallel and non-parallel approaches to grouping.
3980 : */
3981 39806 : if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
3982 : {
3983 : bool force_rel_creation;
3984 :
3985 : /*
3986 : * If we're doing partitionwise aggregation at this level, force
3987 : * creation of a partially_grouped_rel so we can add partitionwise
3988 : * paths to it.
3989 : */
3990 35168 : force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
3991 :
3992 : partially_grouped_rel =
3993 35168 : create_partial_grouping_paths(root,
3994 : grouped_rel,
3995 : input_rel,
3996 : gd,
3997 : extra,
3998 : force_rel_creation);
3999 : }
4000 :
4001 : /* Set out parameter. */
4002 39806 : *partially_grouped_rel_p = partially_grouped_rel;
4003 :
4004 : /* Apply partitionwise aggregation technique, if possible. */
4005 39806 : if (patype != PARTITIONWISE_AGGREGATE_NONE)
4006 562 : create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
4007 : partially_grouped_rel, agg_costs,
4008 : gd, patype, extra);
4009 :
4010 : /* If we are doing partial aggregation only, return. */
4011 39806 : if (extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
4012 : {
4013 : Assert(partially_grouped_rel);
4014 :
4015 618 : if (partially_grouped_rel->pathlist)
4016 618 : set_cheapest(partially_grouped_rel);
4017 :
4018 618 : return;
4019 : }
4020 :
4021 : /* Gather any partially grouped partial paths. */
4022 39188 : if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4023 : {
4024 1482 : gather_grouping_paths(root, partially_grouped_rel);
4025 1482 : set_cheapest(partially_grouped_rel);
4026 : }
4027 :
4028 : /*
4029 : * Estimate number of groups.
4030 : */
4031 39188 : dNumGroups = get_number_of_groups(root,
4032 : cheapest_path->rows,
4033 : gd,
4034 : extra->targetList);
4035 :
4036 : /* Build final grouping paths */
4037 39188 : add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4038 : partially_grouped_rel, agg_costs, gd,
4039 : dNumGroups, extra);
4040 :
4041 : /* Give a helpful error if we failed to find any implementation */
4042 39188 : if (grouped_rel->pathlist == NIL)
4043 6 : ereport(ERROR,
4044 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4045 : errmsg("could not implement GROUP BY"),
4046 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4047 :
4048 : /*
4049 : * If there is an FDW that's responsible for all baserels of the query,
4050 : * let it consider adding ForeignPaths.
4051 : */
4052 39182 : if (grouped_rel->fdwroutine &&
4053 334 : grouped_rel->fdwroutine->GetForeignUpperPaths)
4054 334 : grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4055 : input_rel, grouped_rel,
4056 : extra);
4057 :
4058 : /* Let extensions possibly add some more paths */
4059 39182 : if (create_upper_paths_hook)
4060 0 : (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4061 : input_rel, grouped_rel,
4062 : extra);
4063 : }
4064 :
4065 : /*
4066 : * For a given input path, consider the possible ways of doing grouping sets on
4067 : * it, by combinations of hashing and sorting. This can be called multiple
4068 : * times, so it's important that it not scribble on input. No result is
4069 : * returned, but any generated paths are added to grouped_rel.
4070 : */
4071 : static void
4072 1732 : consider_groupingsets_paths(PlannerInfo *root,
4073 : RelOptInfo *grouped_rel,
4074 : Path *path,
4075 : bool is_sorted,
4076 : bool can_hash,
4077 : grouping_sets_data *gd,
4078 : const AggClauseCosts *agg_costs,
4079 : double dNumGroups)
4080 : {
4081 1732 : Query *parse = root->parse;
4082 1732 : Size hash_mem_limit = get_hash_memory_limit();
4083 :
4084 : /*
4085 : * If we're not being offered sorted input, then only consider plans that
4086 : * can be done entirely by hashing.
4087 : *
4088 : * We can hash everything if it looks like it'll fit in hash_mem. But if
4089 : * the input is actually sorted despite not being advertised as such, we
4090 : * prefer to make use of that in order to use less memory.
4091 : *
4092 : * If none of the grouping sets are sortable, then ignore the hash_mem
4093 : * limit and generate a path anyway, since otherwise we'll just fail.
4094 : */
4095 1732 : if (!is_sorted)
4096 : {
4097 794 : List *new_rollups = NIL;
4098 794 : RollupData *unhashed_rollup = NULL;
4099 : List *sets_data;
4100 794 : List *empty_sets_data = NIL;
4101 794 : List *empty_sets = NIL;
4102 : ListCell *lc;
4103 794 : ListCell *l_start = list_head(gd->rollups);
4104 794 : AggStrategy strat = AGG_HASHED;
4105 : double hashsize;
4106 794 : double exclude_groups = 0.0;
4107 :
4108 : Assert(can_hash);
4109 :
4110 : /*
4111 : * If the input is coincidentally sorted usefully (which can happen
4112 : * even if is_sorted is false, since that only means that our caller
4113 : * has set up the sorting for us), then save some hashtable space by
4114 : * making use of that. But we need to watch out for degenerate cases:
4115 : *
4116 : * 1) If there are any empty grouping sets, then group_pathkeys might
4117 : * be NIL if all non-empty grouping sets are unsortable. In this case,
4118 : * there will be a rollup containing only empty groups, and the
4119 : * pathkeys_contained_in test is vacuously true; this is ok.
4120 : *
4121 : * XXX: the above relies on the fact that group_pathkeys is generated
4122 : * from the first rollup. If we add the ability to consider multiple
4123 : * sort orders for grouping input, this assumption might fail.
4124 : *
4125 : * 2) If there are no empty sets and only unsortable sets, then the
4126 : * rollups list will be empty (and thus l_start == NULL), and
4127 : * group_pathkeys will be NIL; we must ensure that the vacuously-true
4128 : * pathkeys_contained_in test doesn't cause us to crash.
4129 : */
4130 1582 : if (l_start != NULL &&
4131 788 : pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4132 : {
4133 12 : unhashed_rollup = lfirst_node(RollupData, l_start);
4134 12 : exclude_groups = unhashed_rollup->numGroups;
4135 12 : l_start = lnext(gd->rollups, l_start);
4136 : }
4137 :
4138 794 : hashsize = estimate_hashagg_tablesize(root,
4139 : path,
4140 : agg_costs,
4141 : dNumGroups - exclude_groups);
4142 :
4143 : /*
4144 : * gd->rollups is empty if we have only unsortable columns to work
4145 : * with. Override hash_mem in that case; otherwise, we'll rely on the
4146 : * sorted-input case to generate usable mixed paths.
4147 : */
4148 794 : if (hashsize > hash_mem_limit && gd->rollups)
4149 18 : return; /* nope, won't fit */
4150 :
4151 : /*
4152 : * We need to burst the existing rollups list into individual grouping
4153 : * sets and recompute a groupClause for each set.
4154 : */
4155 776 : sets_data = list_copy(gd->unsortable_sets);
4156 :
4157 1968 : for_each_cell(lc, gd->rollups, l_start)
4158 : {
4159 1216 : RollupData *rollup = lfirst_node(RollupData, lc);
4160 :
4161 : /*
4162 : * If we find an unhashable rollup that's not been skipped by the
4163 : * "actually sorted" check above, we can't cope; we'd need sorted
4164 : * input (with a different sort order) but we can't get that here.
4165 : * So bail out; we'll get a valid path from the is_sorted case
4166 : * instead.
4167 : *
4168 : * The mere presence of empty grouping sets doesn't make a rollup
4169 : * unhashable (see preprocess_grouping_sets), we handle those
4170 : * specially below.
4171 : */
4172 1216 : if (!rollup->hashable)
4173 24 : return;
4174 :
4175 1192 : sets_data = list_concat(sets_data, rollup->gsets_data);
4176 : }
4177 3150 : foreach(lc, sets_data)
4178 : {
4179 2398 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
4180 2398 : List *gset = gs->set;
4181 : RollupData *rollup;
4182 :
4183 2398 : if (gset == NIL)
4184 : {
4185 : /* Empty grouping sets can't be hashed. */
4186 484 : empty_sets_data = lappend(empty_sets_data, gs);
4187 484 : empty_sets = lappend(empty_sets, NIL);
4188 : }
4189 : else
4190 : {
4191 1914 : rollup = makeNode(RollupData);
4192 :
4193 1914 : rollup->groupClause = preprocess_groupclause(root, gset);
4194 1914 : rollup->gsets_data = list_make1(gs);
4195 1914 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4196 : rollup->gsets_data,
4197 : gd->tleref_to_colnum_map);
4198 1914 : rollup->numGroups = gs->numGroups;
4199 1914 : rollup->hashable = true;
4200 1914 : rollup->is_hashed = true;
4201 1914 : new_rollups = lappend(new_rollups, rollup);
4202 : }
4203 : }
4204 :
4205 : /*
4206 : * If we didn't find anything nonempty to hash, then bail. We'll
4207 : * generate a path from the is_sorted case.
4208 : */
4209 752 : if (new_rollups == NIL)
4210 0 : return;
4211 :
4212 : /*
4213 : * If there were empty grouping sets they should have been in the
4214 : * first rollup.
4215 : */
4216 : Assert(!unhashed_rollup || !empty_sets);
4217 :
4218 752 : if (unhashed_rollup)
4219 : {
4220 12 : new_rollups = lappend(new_rollups, unhashed_rollup);
4221 12 : strat = AGG_MIXED;
4222 : }
4223 740 : else if (empty_sets)
4224 : {
4225 436 : RollupData *rollup = makeNode(RollupData);
4226 :
4227 436 : rollup->groupClause = NIL;
4228 436 : rollup->gsets_data = empty_sets_data;
4229 436 : rollup->gsets = empty_sets;
4230 436 : rollup->numGroups = list_length(empty_sets);
4231 436 : rollup->hashable = false;
4232 436 : rollup->is_hashed = false;
4233 436 : new_rollups = lappend(new_rollups, rollup);
4234 436 : strat = AGG_MIXED;
4235 : }
4236 :
4237 752 : add_path(grouped_rel, (Path *)
4238 752 : create_groupingsets_path(root,
4239 : grouped_rel,
4240 : path,
4241 752 : (List *) parse->havingQual,
4242 : strat,
4243 : new_rollups,
4244 : agg_costs));
4245 752 : return;
4246 : }
4247 :
4248 : /*
4249 : * If we have sorted input but nothing we can do with it, bail.
4250 : */
4251 938 : if (gd->rollups == NIL)
4252 0 : return;
4253 :
4254 : /*
4255 : * Given sorted input, we try and make two paths: one sorted and one mixed
4256 : * sort/hash. (We need to try both because hashagg might be disabled, or
4257 : * some columns might not be sortable.)
4258 : *
4259 : * can_hash is passed in as false if some obstacle elsewhere (such as
4260 : * ordered aggs) means that we shouldn't consider hashing at all.
4261 : */
4262 938 : if (can_hash && gd->any_hashable)
4263 : {
4264 860 : List *rollups = NIL;
4265 860 : List *hash_sets = list_copy(gd->unsortable_sets);
4266 860 : double availspace = hash_mem_limit;
4267 : ListCell *lc;
4268 :
4269 : /*
4270 : * Account first for space needed for groups we can't sort at all.
4271 : */
4272 860 : availspace -= estimate_hashagg_tablesize(root,
4273 : path,
4274 : agg_costs,
4275 : gd->dNumHashGroups);
4276 :
4277 860 : if (availspace > 0 && list_length(gd->rollups) > 1)
4278 : {
4279 : double scale;
4280 444 : int num_rollups = list_length(gd->rollups);
4281 : int k_capacity;
4282 444 : int *k_weights = palloc(num_rollups * sizeof(int));
4283 444 : Bitmapset *hash_items = NULL;
4284 : int i;
4285 :
4286 : /*
4287 : * We treat this as a knapsack problem: the knapsack capacity
4288 : * represents hash_mem, the item weights are the estimated memory
4289 : * usage of the hashtables needed to implement a single rollup,
4290 : * and we really ought to use the cost saving as the item value;
4291 : * however, currently the costs assigned to sort nodes don't
4292 : * reflect the comparison costs well, and so we treat all items as
4293 : * of equal value (each rollup we hash instead saves us one sort).
4294 : *
4295 : * To use the discrete knapsack, we need to scale the values to a
4296 : * reasonably small bounded range. We choose to allow a 5% error
4297 : * margin; we have no more than 4096 rollups in the worst possible
4298 : * case, which with a 5% error margin will require a bit over 42MB
4299 : * of workspace. (Anyone wanting to plan queries that complex had
4300 : * better have the memory for it. In more reasonable cases, with
4301 : * no more than a couple of dozen rollups, the memory usage will
4302 : * be negligible.)
4303 : *
4304 : * k_capacity is naturally bounded, but we clamp the values for
4305 : * scale and weight (below) to avoid overflows or underflows (or
4306 : * uselessly trying to use a scale factor less than 1 byte).
4307 : */
4308 444 : scale = Max(availspace / (20.0 * num_rollups), 1.0);
4309 444 : k_capacity = (int) floor(availspace / scale);
4310 :
4311 : /*
4312 : * We leave the first rollup out of consideration since it's the
4313 : * one that matches the input sort order. We assign indexes "i"
4314 : * to only those entries considered for hashing; the second loop,
4315 : * below, must use the same condition.
4316 : */
4317 444 : i = 0;
4318 1128 : for_each_from(lc, gd->rollups, 1)
4319 : {
4320 684 : RollupData *rollup = lfirst_node(RollupData, lc);
4321 :
4322 684 : if (rollup->hashable)
4323 : {
4324 684 : double sz = estimate_hashagg_tablesize(root,
4325 : path,
4326 : agg_costs,
4327 : rollup->numGroups);
4328 :
4329 : /*
4330 : * If sz is enormous, but hash_mem (and hence scale) is
4331 : * small, avoid integer overflow here.
4332 : */
4333 684 : k_weights[i] = (int) Min(floor(sz / scale),
4334 : k_capacity + 1.0);
4335 684 : ++i;
4336 : }
4337 : }
4338 :
4339 : /*
4340 : * Apply knapsack algorithm; compute the set of items which
4341 : * maximizes the value stored (in this case the number of sorts
4342 : * saved) while keeping the total size (approximately) within
4343 : * capacity.
4344 : */
4345 444 : if (i > 0)
4346 444 : hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4347 :
4348 444 : if (!bms_is_empty(hash_items))
4349 : {
4350 444 : rollups = list_make1(linitial(gd->rollups));
4351 :
4352 444 : i = 0;
4353 1128 : for_each_from(lc, gd->rollups, 1)
4354 : {
4355 684 : RollupData *rollup = lfirst_node(RollupData, lc);
4356 :
4357 684 : if (rollup->hashable)
4358 : {
4359 684 : if (bms_is_member(i, hash_items))
4360 648 : hash_sets = list_concat(hash_sets,
4361 648 : rollup->gsets_data);
4362 : else
4363 36 : rollups = lappend(rollups, rollup);
4364 684 : ++i;
4365 : }
4366 : else
4367 0 : rollups = lappend(rollups, rollup);
4368 : }
4369 : }
4370 : }
4371 :
4372 860 : if (!rollups && hash_sets)
4373 24 : rollups = list_copy(gd->rollups);
4374 :
4375 1648 : foreach(lc, hash_sets)
4376 : {
4377 788 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
4378 788 : RollupData *rollup = makeNode(RollupData);
4379 :
4380 : Assert(gs->set != NIL);
4381 :
4382 788 : rollup->groupClause = preprocess_groupclause(root, gs->set);
4383 788 : rollup->gsets_data = list_make1(gs);
4384 788 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4385 : rollup->gsets_data,
4386 : gd->tleref_to_colnum_map);
4387 788 : rollup->numGroups = gs->numGroups;
4388 788 : rollup->hashable = true;
4389 788 : rollup->is_hashed = true;
4390 788 : rollups = lcons(rollup, rollups);
4391 : }
4392 :
4393 860 : if (rollups)
4394 : {
4395 468 : add_path(grouped_rel, (Path *)
4396 468 : create_groupingsets_path(root,
4397 : grouped_rel,
4398 : path,
4399 468 : (List *) parse->havingQual,
4400 : AGG_MIXED,
4401 : rollups,
4402 : agg_costs));
4403 : }
4404 : }
4405 :
4406 : /*
4407 : * Now try the simple sorted case.
4408 : */
4409 938 : if (!gd->unsortable_sets)
4410 908 : add_path(grouped_rel, (Path *)
4411 908 : create_groupingsets_path(root,
4412 : grouped_rel,
4413 : path,
4414 908 : (List *) parse->havingQual,
4415 : AGG_SORTED,
4416 : gd->rollups,
4417 : agg_costs));
4418 : }
4419 :
4420 : /*
4421 : * create_window_paths
4422 : *
4423 : * Build a new upperrel containing Paths for window-function evaluation.
4424 : *
4425 : * input_rel: contains the source-data Paths
4426 : * input_target: result of make_window_input_target
4427 : * output_target: what the topmost WindowAggPath should return
4428 : * wflists: result of find_window_functions
4429 : * activeWindows: result of select_active_windows
4430 : *
4431 : * Note: all Paths in input_rel are expected to return input_target.
4432 : */
4433 : static RelOptInfo *
4434 2378 : create_window_paths(PlannerInfo *root,
4435 : RelOptInfo *input_rel,
4436 : PathTarget *input_target,
4437 : PathTarget *output_target,
4438 : bool output_target_parallel_safe,
4439 : WindowFuncLists *wflists,
4440 : List *activeWindows)
4441 : {
4442 : RelOptInfo *window_rel;
4443 : ListCell *lc;
4444 :
4445 : /* For now, do all work in the (WINDOW, NULL) upperrel */
4446 2378 : window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4447 :
4448 : /*
4449 : * If the input relation is not parallel-safe, then the window relation
4450 : * can't be parallel-safe, either. Otherwise, we need to examine the
4451 : * target list and active windows for non-parallel-safe constructs.
4452 : */
4453 2378 : if (input_rel->consider_parallel && output_target_parallel_safe &&
4454 0 : is_parallel_safe(root, (Node *) activeWindows))
4455 0 : window_rel->consider_parallel = true;
4456 :
4457 : /*
4458 : * If the input rel belongs to a single FDW, so does the window rel.
4459 : */
4460 2378 : window_rel->serverid = input_rel->serverid;
4461 2378 : window_rel->userid = input_rel->userid;
4462 2378 : window_rel->useridiscurrent = input_rel->useridiscurrent;
4463 2378 : window_rel->fdwroutine = input_rel->fdwroutine;
4464 :
4465 : /*
4466 : * Consider computing window functions starting from the existing
4467 : * cheapest-total path (which will likely require a sort) as well as any
4468 : * existing paths that satisfy or partially satisfy root->window_pathkeys.
4469 : */
4470 5082 : foreach(lc, input_rel->pathlist)
4471 : {
4472 2704 : Path *path = (Path *) lfirst(lc);
4473 : int presorted_keys;
4474 :
4475 3030 : if (path == input_rel->cheapest_total_path ||
4476 326 : pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4477 144 : &presorted_keys) ||
4478 144 : presorted_keys > 0)
4479 2590 : create_one_window_path(root,
4480 : window_rel,
4481 : path,
4482 : input_target,
4483 : output_target,
4484 : wflists,
4485 : activeWindows);
4486 : }
4487 :
4488 : /*
4489 : * If there is an FDW that's responsible for all baserels of the query,
4490 : * let it consider adding ForeignPaths.
4491 : */
4492 2378 : if (window_rel->fdwroutine &&
4493 12 : window_rel->fdwroutine->GetForeignUpperPaths)
4494 12 : window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4495 : input_rel, window_rel,
4496 : NULL);
4497 :
4498 : /* Let extensions possibly add some more paths */
4499 2378 : if (create_upper_paths_hook)
4500 0 : (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4501 : input_rel, window_rel, NULL);
4502 :
4503 : /* Now choose the best path(s) */
4504 2378 : set_cheapest(window_rel);
4505 :
4506 2378 : return window_rel;
4507 : }
4508 :
4509 : /*
4510 : * Stack window-function implementation steps atop the given Path, and
4511 : * add the result to window_rel.
4512 : *
4513 : * window_rel: upperrel to contain result
4514 : * path: input Path to use (must return input_target)
4515 : * input_target: result of make_window_input_target
4516 : * output_target: what the topmost WindowAggPath should return
4517 : * wflists: result of find_window_functions
4518 : * activeWindows: result of select_active_windows
4519 : */
4520 : static void
4521 2590 : create_one_window_path(PlannerInfo *root,
4522 : RelOptInfo *window_rel,
4523 : Path *path,
4524 : PathTarget *input_target,
4525 : PathTarget *output_target,
4526 : WindowFuncLists *wflists,
4527 : List *activeWindows)
4528 : {
4529 : PathTarget *window_target;
4530 : ListCell *l;
4531 2590 : List *topqual = NIL;
4532 :
4533 : /*
4534 : * Since each window clause could require a different sort order, we stack
4535 : * up a WindowAgg node for each clause, with sort steps between them as
4536 : * needed. (We assume that select_active_windows chose a good order for
4537 : * executing the clauses in.)
4538 : *
4539 : * input_target should contain all Vars and Aggs needed for the result.
4540 : * (In some cases we wouldn't need to propagate all of these all the way
4541 : * to the top, since they might only be needed as inputs to WindowFuncs.
4542 : * It's probably not worth trying to optimize that though.) It must also
4543 : * contain all window partitioning and sorting expressions, to ensure
4544 : * they're computed only once at the bottom of the stack (that's critical
4545 : * for volatile functions). As we climb up the stack, we'll add outputs
4546 : * for the WindowFuncs computed at each level.
4547 : */
4548 2590 : window_target = input_target;
4549 :
4550 5348 : foreach(l, activeWindows)
4551 : {
4552 2758 : WindowClause *wc = lfirst_node(WindowClause, l);
4553 : List *window_pathkeys;
4554 2758 : List *runcondition = NIL;
4555 : int presorted_keys;
4556 : bool is_sorted;
4557 : bool topwindow;
4558 : ListCell *lc2;
4559 :
4560 2758 : window_pathkeys = make_pathkeys_for_window(root,
4561 : wc,
4562 : root->processed_tlist);
4563 :
4564 2758 : is_sorted = pathkeys_count_contained_in(window_pathkeys,
4565 : path->pathkeys,
4566 : &presorted_keys);
4567 :
4568 : /* Sort if necessary */
4569 2758 : if (!is_sorted)
4570 : {
4571 : /*
4572 : * No presorted keys or incremental sort disabled, just perform a
4573 : * complete sort.
4574 : */
4575 2114 : if (presorted_keys == 0 || !enable_incremental_sort)
4576 2048 : path = (Path *) create_sort_path(root, window_rel,
4577 : path,
4578 : window_pathkeys,
4579 : -1.0);
4580 : else
4581 : {
4582 : /*
4583 : * Since we have presorted keys and incremental sort is
4584 : * enabled, just use incremental sort.
4585 : */
4586 66 : path = (Path *) create_incremental_sort_path(root,
4587 : window_rel,
4588 : path,
4589 : window_pathkeys,
4590 : presorted_keys,
4591 : -1.0);
4592 : }
4593 : }
4594 :
4595 2758 : if (lnext(activeWindows, l))
4596 : {
4597 : /*
4598 : * Add the current WindowFuncs to the output target for this
4599 : * intermediate WindowAggPath. We must copy window_target to
4600 : * avoid changing the previous path's target.
4601 : *
4602 : * Note: a WindowFunc adds nothing to the target's eval costs; but
4603 : * we do need to account for the increase in tlist width.
4604 : */
4605 168 : int64 tuple_width = window_target->width;
4606 :
4607 168 : window_target = copy_pathtarget(window_target);
4608 384 : foreach(lc2, wflists->windowFuncs[wc->winref])
4609 : {
4610 216 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4611 :
4612 216 : add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4613 216 : tuple_width += get_typavgwidth(wfunc->wintype, -1);
4614 : }
4615 168 : window_target->width = clamp_width_est(tuple_width);
4616 : }
4617 : else
4618 : {
4619 : /* Install the goal target in the topmost WindowAgg */
4620 2590 : window_target = output_target;
4621 : }
4622 :
4623 : /* mark the final item in the list as the top-level window */
4624 2758 : topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4625 :
4626 : /*
4627 : * Collect the WindowFuncRunConditions from each WindowFunc and
4628 : * convert them into OpExprs
4629 : */
4630 6254 : foreach(lc2, wflists->windowFuncs[wc->winref])
4631 : {
4632 : ListCell *lc3;
4633 3496 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4634 :
4635 3676 : foreach(lc3, wfunc->runCondition)
4636 : {
4637 180 : WindowFuncRunCondition *wfuncrc =
4638 : lfirst_node(WindowFuncRunCondition, lc3);
4639 : Expr *opexpr;
4640 : Expr *leftop;
4641 : Expr *rightop;
4642 :
4643 180 : if (wfuncrc->wfunc_left)
4644 : {
4645 162 : leftop = (Expr *) copyObject(wfunc);
4646 162 : rightop = copyObject(wfuncrc->arg);
4647 : }
4648 : else
4649 : {
4650 18 : leftop = copyObject(wfuncrc->arg);
4651 18 : rightop = (Expr *) copyObject(wfunc);
4652 : }
4653 :
4654 180 : opexpr = make_opclause(wfuncrc->opno,
4655 : BOOLOID,
4656 : false,
4657 : leftop,
4658 : rightop,
4659 : InvalidOid,
4660 : wfuncrc->inputcollid);
4661 :
4662 180 : runcondition = lappend(runcondition, opexpr);
4663 :
4664 180 : if (!topwindow)
4665 24 : topqual = lappend(topqual, opexpr);
4666 : }
4667 : }
4668 :
4669 : path = (Path *)
4670 2758 : create_windowagg_path(root, window_rel, path, window_target,
4671 2758 : wflists->windowFuncs[wc->winref],
4672 : runcondition, wc,
4673 : topwindow ? topqual : NIL, topwindow);
4674 : }
4675 :
4676 2590 : add_path(window_rel, path);
4677 2590 : }
4678 :
4679 : /*
4680 : * create_distinct_paths
4681 : *
4682 : * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
4683 : *
4684 : * input_rel: contains the source-data Paths
4685 : * target: the pathtarget for the result Paths to compute
4686 : *
4687 : * Note: input paths should already compute the desired pathtarget, since
4688 : * Sort/Unique won't project anything.
4689 : */
4690 : static RelOptInfo *
4691 2560 : create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4692 : PathTarget *target)
4693 : {
4694 : RelOptInfo *distinct_rel;
4695 :
4696 : /* For now, do all work in the (DISTINCT, NULL) upperrel */
4697 2560 : distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4698 :
4699 : /*
4700 : * We don't compute anything at this level, so distinct_rel will be
4701 : * parallel-safe if the input rel is parallel-safe. In particular, if
4702 : * there is a DISTINCT ON (...) clause, any path for the input_rel will
4703 : * output those expressions, and will not be parallel-safe unless those
4704 : * expressions are parallel-safe.
4705 : */
4706 2560 : distinct_rel->consider_parallel = input_rel->consider_parallel;
4707 :
4708 : /*
4709 : * If the input rel belongs to a single FDW, so does the distinct_rel.
4710 : */
4711 2560 : distinct_rel->serverid = input_rel->serverid;
4712 2560 : distinct_rel->userid = input_rel->userid;
4713 2560 : distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4714 2560 : distinct_rel->fdwroutine = input_rel->fdwroutine;
4715 :
4716 : /* build distinct paths based on input_rel's pathlist */
4717 2560 : create_final_distinct_paths(root, input_rel, distinct_rel);
4718 :
4719 : /* now build distinct paths based on input_rel's partial_pathlist */
4720 2560 : create_partial_distinct_paths(root, input_rel, distinct_rel, target);
4721 :
4722 : /* Give a helpful error if we failed to create any paths */
4723 2560 : if (distinct_rel->pathlist == NIL)
4724 0 : ereport(ERROR,
4725 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4726 : errmsg("could not implement DISTINCT"),
4727 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4728 :
4729 : /*
4730 : * If there is an FDW that's responsible for all baserels of the query,
4731 : * let it consider adding ForeignPaths.
4732 : */
4733 2560 : if (distinct_rel->fdwroutine &&
4734 16 : distinct_rel->fdwroutine->GetForeignUpperPaths)
4735 16 : distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4736 : UPPERREL_DISTINCT,
4737 : input_rel,
4738 : distinct_rel,
4739 : NULL);
4740 :
4741 : /* Let extensions possibly add some more paths */
4742 2560 : if (create_upper_paths_hook)
4743 0 : (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4744 : distinct_rel, NULL);
4745 :
4746 : /* Now choose the best path(s) */
4747 2560 : set_cheapest(distinct_rel);
4748 :
4749 2560 : return distinct_rel;
4750 : }
4751 :
4752 : /*
4753 : * create_partial_distinct_paths
4754 : *
4755 : * Process 'input_rel' partial paths and add unique/aggregate paths to the
4756 : * UPPERREL_PARTIAL_DISTINCT rel. For paths created, add Gather/GatherMerge
4757 : * paths on top and add a final unique/aggregate path to remove any duplicate
4758 : * produced from combining rows from parallel workers.
4759 : */
4760 : static void
4761 2560 : create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4762 : RelOptInfo *final_distinct_rel,
4763 : PathTarget *target)
4764 : {
4765 : RelOptInfo *partial_distinct_rel;
4766 : Query *parse;
4767 : List *distinctExprs;
4768 : double numDistinctRows;
4769 : Path *cheapest_partial_path;
4770 : ListCell *lc;
4771 :
4772 : /* nothing to do when there are no partial paths in the input rel */
4773 2560 : if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4774 2452 : return;
4775 :
4776 108 : parse = root->parse;
4777 :
4778 : /* can't do parallel DISTINCT ON */
4779 108 : if (parse->hasDistinctOn)
4780 0 : return;
4781 :
4782 108 : partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4783 : NULL);
4784 108 : partial_distinct_rel->reltarget = target;
4785 108 : partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4786 :
4787 : /*
4788 : * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4789 : */
4790 108 : partial_distinct_rel->serverid = input_rel->serverid;
4791 108 : partial_distinct_rel->userid = input_rel->userid;
4792 108 : partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4793 108 : partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4794 :
4795 108 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
4796 :
4797 108 : distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4798 : parse->targetList);
4799 :
4800 : /* estimate how many distinct rows we'll get from each worker */
4801 108 : numDistinctRows = estimate_num_groups(root, distinctExprs,
4802 : cheapest_partial_path->rows,
4803 : NULL, NULL);
4804 :
4805 : /*
4806 : * Try sorting the cheapest path and incrementally sorting any paths with
4807 : * presorted keys and put a unique paths atop of those. We'll also
4808 : * attempt to reorder the required pathkeys to match the input path's
4809 : * pathkeys as much as possible, in hopes of avoiding a possible need to
4810 : * re-sort.
4811 : */
4812 108 : if (grouping_is_sortable(root->processed_distinctClause))
4813 : {
4814 234 : foreach(lc, input_rel->partial_pathlist)
4815 : {
4816 126 : Path *input_path = (Path *) lfirst(lc);
4817 : Path *sorted_path;
4818 126 : List *useful_pathkeys_list = NIL;
4819 :
4820 : useful_pathkeys_list =
4821 126 : get_useful_pathkeys_for_distinct(root,
4822 : root->distinct_pathkeys,
4823 : input_path->pathkeys);
4824 : Assert(list_length(useful_pathkeys_list) > 0);
4825 :
4826 390 : foreach_node(List, useful_pathkeys, useful_pathkeys_list)
4827 : {
4828 138 : sorted_path = make_ordered_path(root,
4829 : partial_distinct_rel,
4830 : input_path,
4831 : cheapest_partial_path,
4832 : useful_pathkeys,
4833 : -1.0);
4834 :
4835 138 : if (sorted_path == NULL)
4836 12 : continue;
4837 :
4838 : /*
4839 : * An empty distinct_pathkeys means all tuples have the same
4840 : * value for the DISTINCT clause. See
4841 : * create_final_distinct_paths()
4842 : */
4843 126 : if (root->distinct_pathkeys == NIL)
4844 : {
4845 : Node *limitCount;
4846 :
4847 6 : limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
4848 : sizeof(int64),
4849 : Int64GetDatum(1), false,
4850 : FLOAT8PASSBYVAL);
4851 :
4852 : /*
4853 : * Apply a LimitPath onto the partial path to restrict the
4854 : * tuples from each worker to 1.
4855 : * create_final_distinct_paths will need to apply an
4856 : * additional LimitPath to restrict this to a single row
4857 : * after the Gather node. If the query already has a
4858 : * LIMIT clause, then we could end up with three Limit
4859 : * nodes in the final plan. Consolidating the top two of
4860 : * these could be done, but does not seem worth troubling
4861 : * over.
4862 : */
4863 6 : add_partial_path(partial_distinct_rel, (Path *)
4864 6 : create_limit_path(root, partial_distinct_rel,
4865 : sorted_path,
4866 : NULL,
4867 : limitCount,
4868 : LIMIT_OPTION_COUNT,
4869 : 0, 1));
4870 : }
4871 : else
4872 : {
4873 120 : add_partial_path(partial_distinct_rel, (Path *)
4874 120 : create_upper_unique_path(root, partial_distinct_rel,
4875 : sorted_path,
4876 120 : list_length(root->distinct_pathkeys),
4877 : numDistinctRows));
4878 : }
4879 : }
4880 : }
4881 : }
4882 :
4883 : /*
4884 : * Now try hash aggregate paths, if enabled and hashing is possible. Since
4885 : * we're not on the hook to ensure we do our best to create at least one
4886 : * path here, we treat enable_hashagg as a hard off-switch rather than the
4887 : * slightly softer variant in create_final_distinct_paths.
4888 : */
4889 108 : if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
4890 : {
4891 78 : add_partial_path(partial_distinct_rel, (Path *)
4892 78 : create_agg_path(root,
4893 : partial_distinct_rel,
4894 : cheapest_partial_path,
4895 : cheapest_partial_path->pathtarget,
4896 : AGG_HASHED,
4897 : AGGSPLIT_SIMPLE,
4898 : root->processed_distinctClause,
4899 : NIL,
4900 : NULL,
4901 : numDistinctRows));
4902 : }
4903 :
4904 : /*
4905 : * If there is an FDW that's responsible for all baserels of the query,
4906 : * let it consider adding ForeignPaths.
4907 : */
4908 108 : if (partial_distinct_rel->fdwroutine &&
4909 0 : partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
4910 0 : partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4911 : UPPERREL_PARTIAL_DISTINCT,
4912 : input_rel,
4913 : partial_distinct_rel,
4914 : NULL);
4915 :
4916 : /* Let extensions possibly add some more partial paths */
4917 108 : if (create_upper_paths_hook)
4918 0 : (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
4919 : input_rel, partial_distinct_rel, NULL);
4920 :
4921 108 : if (partial_distinct_rel->partial_pathlist != NIL)
4922 : {
4923 108 : generate_useful_gather_paths(root, partial_distinct_rel, true);
4924 108 : set_cheapest(partial_distinct_rel);
4925 :
4926 : /*
4927 : * Finally, create paths to distinctify the final result. This step
4928 : * is needed to remove any duplicates due to combining rows from
4929 : * parallel workers.
4930 : */
4931 108 : create_final_distinct_paths(root, partial_distinct_rel,
4932 : final_distinct_rel);
4933 : }
4934 : }
4935 :
4936 : /*
4937 : * create_final_distinct_paths
4938 : * Create distinct paths in 'distinct_rel' based on 'input_rel' pathlist
4939 : *
4940 : * input_rel: contains the source-data paths
4941 : * distinct_rel: destination relation for storing created paths
4942 : */
4943 : static RelOptInfo *
4944 2668 : create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4945 : RelOptInfo *distinct_rel)
4946 : {
4947 2668 : Query *parse = root->parse;
4948 2668 : Path *cheapest_input_path = input_rel->cheapest_total_path;
4949 : double numDistinctRows;
4950 : bool allow_hash;
4951 :
4952 : /* Estimate number of distinct rows there will be */
4953 2668 : if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
4954 2594 : root->hasHavingQual)
4955 : {
4956 : /*
4957 : * If there was grouping or aggregation, use the number of input rows
4958 : * as the estimated number of DISTINCT rows (ie, assume the input is
4959 : * already mostly unique).
4960 : */
4961 74 : numDistinctRows = cheapest_input_path->rows;
4962 : }
4963 : else
4964 : {
4965 : /*
4966 : * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
4967 : */
4968 : List *distinctExprs;
4969 :
4970 2594 : distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4971 : parse->targetList);
4972 2594 : numDistinctRows = estimate_num_groups(root, distinctExprs,
4973 : cheapest_input_path->rows,
4974 : NULL, NULL);
4975 : }
4976 :
4977 : /*
4978 : * Consider sort-based implementations of DISTINCT, if possible.
4979 : */
4980 2668 : if (grouping_is_sortable(root->processed_distinctClause))
4981 : {
4982 : /*
4983 : * Firstly, if we have any adequately-presorted paths, just stick a
4984 : * Unique node on those. We also, consider doing an explicit sort of
4985 : * the cheapest input path and Unique'ing that. If any paths have
4986 : * presorted keys then we'll create an incremental sort atop of those
4987 : * before adding a unique node on the top. We'll also attempt to
4988 : * reorder the required pathkeys to match the input path's pathkeys as
4989 : * much as possible, in hopes of avoiding a possible need to re-sort.
4990 : *
4991 : * When we have DISTINCT ON, we must sort by the more rigorous of
4992 : * DISTINCT and ORDER BY, else it won't have the desired behavior.
4993 : * Also, if we do have to do an explicit sort, we might as well use
4994 : * the more rigorous ordering to avoid a second sort later. (Note
4995 : * that the parser will have ensured that one clause is a prefix of
4996 : * the other.)
4997 : */
4998 : List *needed_pathkeys;
4999 : ListCell *lc;
5000 2662 : double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
5001 :
5002 2902 : if (parse->hasDistinctOn &&
5003 240 : list_length(root->distinct_pathkeys) <
5004 240 : list_length(root->sort_pathkeys))
5005 54 : needed_pathkeys = root->sort_pathkeys;
5006 : else
5007 2608 : needed_pathkeys = root->distinct_pathkeys;
5008 :
5009 6818 : foreach(lc, input_rel->pathlist)
5010 : {
5011 4156 : Path *input_path = (Path *) lfirst(lc);
5012 : Path *sorted_path;
5013 4156 : List *useful_pathkeys_list = NIL;
5014 :
5015 : useful_pathkeys_list =
5016 4156 : get_useful_pathkeys_for_distinct(root,
5017 : needed_pathkeys,
5018 : input_path->pathkeys);
5019 : Assert(list_length(useful_pathkeys_list) > 0);
5020 :
5021 12952 : foreach_node(List, useful_pathkeys, useful_pathkeys_list)
5022 : {
5023 4640 : sorted_path = make_ordered_path(root,
5024 : distinct_rel,
5025 : input_path,
5026 : cheapest_input_path,
5027 : useful_pathkeys,
5028 : limittuples);
5029 :
5030 4640 : if (sorted_path == NULL)
5031 542 : continue;
5032 :
5033 : /*
5034 : * distinct_pathkeys may have become empty if all of the
5035 : * pathkeys were determined to be redundant. If all of the
5036 : * pathkeys are redundant then each DISTINCT target must only
5037 : * allow a single value, therefore all resulting tuples must
5038 : * be identical (or at least indistinguishable by an equality
5039 : * check). We can uniquify these tuples simply by just taking
5040 : * the first tuple. All we do here is add a path to do "LIMIT
5041 : * 1" atop of 'sorted_path'. When doing a DISTINCT ON we may
5042 : * still have a non-NIL sort_pathkeys list, so we must still
5043 : * only do this with paths which are correctly sorted by
5044 : * sort_pathkeys.
5045 : */
5046 4098 : if (root->distinct_pathkeys == NIL)
5047 : {
5048 : Node *limitCount;
5049 :
5050 106 : limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5051 : sizeof(int64),
5052 : Int64GetDatum(1), false,
5053 : FLOAT8PASSBYVAL);
5054 :
5055 : /*
5056 : * If the query already has a LIMIT clause, then we could
5057 : * end up with a duplicate LimitPath in the final plan.
5058 : * That does not seem worth troubling over too much.
5059 : */
5060 106 : add_path(distinct_rel, (Path *)
5061 106 : create_limit_path(root, distinct_rel, sorted_path,
5062 : NULL, limitCount,
5063 : LIMIT_OPTION_COUNT, 0, 1));
5064 : }
5065 : else
5066 : {
5067 3992 : add_path(distinct_rel, (Path *)
5068 3992 : create_upper_unique_path(root, distinct_rel,
5069 : sorted_path,
5070 3992 : list_length(root->distinct_pathkeys),
5071 : numDistinctRows));
5072 : }
5073 : }
5074 : }
5075 : }
5076 :
5077 : /*
5078 : * Consider hash-based implementations of DISTINCT, if possible.
5079 : *
5080 : * If we were not able to make any other types of path, we *must* hash or
5081 : * die trying. If we do have other choices, there are two things that
5082 : * should prevent selection of hashing: if the query uses DISTINCT ON
5083 : * (because it won't really have the expected behavior if we hash), or if
5084 : * enable_hashagg is off.
5085 : *
5086 : * Note: grouping_is_hashable() is much more expensive to check than the
5087 : * other gating conditions, so we want to do it last.
5088 : */
5089 2668 : if (distinct_rel->pathlist == NIL)
5090 6 : allow_hash = true; /* we have no alternatives */
5091 2662 : else if (parse->hasDistinctOn || !enable_hashagg)
5092 390 : allow_hash = false; /* policy-based decision not to hash */
5093 : else
5094 2272 : allow_hash = true; /* default */
5095 :
5096 2668 : if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5097 : {
5098 : /* Generate hashed aggregate path --- no sort needed */
5099 2278 : add_path(distinct_rel, (Path *)
5100 2278 : create_agg_path(root,
5101 : distinct_rel,
5102 : cheapest_input_path,
5103 : cheapest_input_path->pathtarget,
5104 : AGG_HASHED,
5105 : AGGSPLIT_SIMPLE,
5106 : root->processed_distinctClause,
5107 : NIL,
5108 : NULL,
5109 : numDistinctRows));
5110 : }
5111 :
5112 2668 : return distinct_rel;
5113 : }
5114 :
5115 : /*
5116 : * get_useful_pathkeys_for_distinct
5117 : * Get useful orderings of pathkeys for distinctClause by reordering
5118 : * 'needed_pathkeys' to match the given 'path_pathkeys' as much as possible.
5119 : *
5120 : * This returns a list of pathkeys that can be useful for DISTINCT or DISTINCT
5121 : * ON clause. For convenience, it always includes the given 'needed_pathkeys'.
5122 : */
5123 : static List *
5124 4282 : get_useful_pathkeys_for_distinct(PlannerInfo *root, List *needed_pathkeys,
5125 : List *path_pathkeys)
5126 : {
5127 4282 : List *useful_pathkeys_list = NIL;
5128 4282 : List *useful_pathkeys = NIL;
5129 :
5130 : /* always include the given 'needed_pathkeys' */
5131 4282 : useful_pathkeys_list = lappend(useful_pathkeys_list,
5132 : needed_pathkeys);
5133 :
5134 4282 : if (!enable_distinct_reordering)
5135 0 : return useful_pathkeys_list;
5136 :
5137 : /*
5138 : * Scan the given 'path_pathkeys' and construct a list of PathKey nodes
5139 : * that match 'needed_pathkeys', but only up to the longest matching
5140 : * prefix.
5141 : *
5142 : * When we have DISTINCT ON, we must ensure that the resulting pathkey
5143 : * list matches initial distinctClause pathkeys; otherwise, it won't have
5144 : * the desired behavior.
5145 : */
5146 10462 : foreach_node(PathKey, pathkey, path_pathkeys)
5147 : {
5148 : /*
5149 : * The PathKey nodes are canonical, so they can be checked for
5150 : * equality by simple pointer comparison.
5151 : */
5152 1926 : if (!list_member_ptr(needed_pathkeys, pathkey))
5153 10 : break;
5154 1916 : if (root->parse->hasDistinctOn &&
5155 204 : !list_member_ptr(root->distinct_pathkeys, pathkey))
5156 18 : break;
5157 :
5158 1898 : useful_pathkeys = lappend(useful_pathkeys, pathkey);
5159 : }
5160 :
5161 : /* If no match at all, no point in reordering needed_pathkeys */
5162 4282 : if (useful_pathkeys == NIL)
5163 2648 : return useful_pathkeys_list;
5164 :
5165 : /*
5166 : * If not full match, the resulting pathkey list is not useful without
5167 : * incremental sort.
5168 : */
5169 1634 : if (list_length(useful_pathkeys) < list_length(needed_pathkeys) &&
5170 890 : !enable_incremental_sort)
5171 60 : return useful_pathkeys_list;
5172 :
5173 : /* Append the remaining PathKey nodes in needed_pathkeys */
5174 1574 : useful_pathkeys = list_concat_unique_ptr(useful_pathkeys,
5175 : needed_pathkeys);
5176 :
5177 : /*
5178 : * If the resulting pathkey list is the same as the 'needed_pathkeys',
5179 : * just drop it.
5180 : */
5181 1574 : if (compare_pathkeys(needed_pathkeys,
5182 : useful_pathkeys) == PATHKEYS_EQUAL)
5183 1078 : return useful_pathkeys_list;
5184 :
5185 496 : useful_pathkeys_list = lappend(useful_pathkeys_list,
5186 : useful_pathkeys);
5187 :
5188 496 : return useful_pathkeys_list;
5189 : }
5190 :
5191 : /*
5192 : * create_ordered_paths
5193 : *
5194 : * Build a new upperrel containing Paths for ORDER BY evaluation.
5195 : *
5196 : * All paths in the result must satisfy the ORDER BY ordering.
5197 : * The only new paths we need consider are an explicit full sort
5198 : * and incremental sort on the cheapest-total existing path.
5199 : *
5200 : * input_rel: contains the source-data Paths
5201 : * target: the output tlist the result Paths must emit
5202 : * limit_tuples: estimated bound on the number of output tuples,
5203 : * or -1 if no LIMIT or couldn't estimate
5204 : *
5205 : * XXX This only looks at sort_pathkeys. I wonder if it needs to look at the
5206 : * other pathkeys (grouping, ...) like generate_useful_gather_paths.
5207 : */
5208 : static RelOptInfo *
5209 68304 : create_ordered_paths(PlannerInfo *root,
5210 : RelOptInfo *input_rel,
5211 : PathTarget *target,
5212 : bool target_parallel_safe,
5213 : double limit_tuples)
5214 : {
5215 68304 : Path *cheapest_input_path = input_rel->cheapest_total_path;
5216 : RelOptInfo *ordered_rel;
5217 : ListCell *lc;
5218 :
5219 : /* For now, do all work in the (ORDERED, NULL) upperrel */
5220 68304 : ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5221 :
5222 : /*
5223 : * If the input relation is not parallel-safe, then the ordered relation
5224 : * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5225 : * target list is parallel-safe.
5226 : */
5227 68304 : if (input_rel->consider_parallel && target_parallel_safe)
5228 46124 : ordered_rel->consider_parallel = true;
5229 :
5230 : /*
5231 : * If the input rel belongs to a single FDW, so does the ordered_rel.
5232 : */
5233 68304 : ordered_rel->serverid = input_rel->serverid;
5234 68304 : ordered_rel->userid = input_rel->userid;
5235 68304 : ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5236 68304 : ordered_rel->fdwroutine = input_rel->fdwroutine;
5237 :
5238 170352 : foreach(lc, input_rel->pathlist)
5239 : {
5240 102048 : Path *input_path = (Path *) lfirst(lc);
5241 : Path *sorted_path;
5242 : bool is_sorted;
5243 : int presorted_keys;
5244 :
5245 102048 : is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5246 : input_path->pathkeys, &presorted_keys);
5247 :
5248 102048 : if (is_sorted)
5249 36464 : sorted_path = input_path;
5250 : else
5251 : {
5252 : /*
5253 : * Try at least sorting the cheapest path and also try
5254 : * incrementally sorting any path which is partially sorted
5255 : * already (no need to deal with paths which have presorted keys
5256 : * when incremental sort is disabled unless it's the cheapest
5257 : * input path).
5258 : */
5259 65584 : if (input_path != cheapest_input_path &&
5260 5766 : (presorted_keys == 0 || !enable_incremental_sort))
5261 1796 : continue;
5262 :
5263 : /*
5264 : * We've no need to consider both a sort and incremental sort.
5265 : * We'll just do a sort if there are no presorted keys and an
5266 : * incremental sort when there are presorted keys.
5267 : */
5268 63788 : if (presorted_keys == 0 || !enable_incremental_sort)
5269 59392 : sorted_path = (Path *) create_sort_path(root,
5270 : ordered_rel,
5271 : input_path,
5272 : root->sort_pathkeys,
5273 : limit_tuples);
5274 : else
5275 4396 : sorted_path = (Path *) create_incremental_sort_path(root,
5276 : ordered_rel,
5277 : input_path,
5278 : root->sort_pathkeys,
5279 : presorted_keys,
5280 : limit_tuples);
5281 : }
5282 :
5283 : /*
5284 : * If the pathtarget of the result path has different expressions from
5285 : * the target to be applied, a projection step is needed.
5286 : */
5287 100252 : if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5288 294 : sorted_path = apply_projection_to_path(root, ordered_rel,
5289 : sorted_path, target);
5290 :
5291 100252 : add_path(ordered_rel, sorted_path);
5292 : }
5293 :
5294 : /*
5295 : * generate_gather_paths() will have already generated a simple Gather
5296 : * path for the best parallel path, if any, and the loop above will have
5297 : * considered sorting it. Similarly, generate_gather_paths() will also
5298 : * have generated order-preserving Gather Merge plans which can be used
5299 : * without sorting if they happen to match the sort_pathkeys, and the loop
5300 : * above will have handled those as well. However, there's one more
5301 : * possibility: it may make sense to sort the cheapest partial path or
5302 : * incrementally sort any partial path that is partially sorted according
5303 : * to the required output order and then use Gather Merge.
5304 : */
5305 68304 : if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5306 45986 : input_rel->partial_pathlist != NIL)
5307 : {
5308 : Path *cheapest_partial_path;
5309 :
5310 2216 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
5311 :
5312 4638 : foreach(lc, input_rel->partial_pathlist)
5313 : {
5314 2422 : Path *input_path = (Path *) lfirst(lc);
5315 : Path *sorted_path;
5316 : bool is_sorted;
5317 : int presorted_keys;
5318 : double total_groups;
5319 :
5320 2422 : is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5321 : input_path->pathkeys,
5322 : &presorted_keys);
5323 :
5324 2422 : if (is_sorted)
5325 182 : continue;
5326 :
5327 : /*
5328 : * Try at least sorting the cheapest path and also try
5329 : * incrementally sorting any path which is partially sorted
5330 : * already (no need to deal with paths which have presorted keys
5331 : * when incremental sort is disabled unless it's the cheapest
5332 : * partial path).
5333 : */
5334 2240 : if (input_path != cheapest_partial_path &&
5335 42 : (presorted_keys == 0 || !enable_incremental_sort))
5336 0 : continue;
5337 :
5338 : /*
5339 : * We've no need to consider both a sort and incremental sort.
5340 : * We'll just do a sort if there are no presorted keys and an
5341 : * incremental sort when there are presorted keys.
5342 : */
5343 2240 : if (presorted_keys == 0 || !enable_incremental_sort)
5344 2180 : sorted_path = (Path *) create_sort_path(root,
5345 : ordered_rel,
5346 : input_path,
5347 : root->sort_pathkeys,
5348 : limit_tuples);
5349 : else
5350 60 : sorted_path = (Path *) create_incremental_sort_path(root,
5351 : ordered_rel,
5352 : input_path,
5353 : root->sort_pathkeys,
5354 : presorted_keys,
5355 : limit_tuples);
5356 2240 : total_groups = compute_gather_rows(sorted_path);
5357 : sorted_path = (Path *)
5358 2240 : create_gather_merge_path(root, ordered_rel,
5359 : sorted_path,
5360 : sorted_path->pathtarget,
5361 : root->sort_pathkeys, NULL,
5362 : &total_groups);
5363 :
5364 : /*
5365 : * If the pathtarget of the result path has different expressions
5366 : * from the target to be applied, a projection step is needed.
5367 : */
5368 2240 : if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5369 6 : sorted_path = apply_projection_to_path(root, ordered_rel,
5370 : sorted_path, target);
5371 :
5372 2240 : add_path(ordered_rel, sorted_path);
5373 : }
5374 : }
5375 :
5376 : /*
5377 : * If there is an FDW that's responsible for all baserels of the query,
5378 : * let it consider adding ForeignPaths.
5379 : */
5380 68304 : if (ordered_rel->fdwroutine &&
5381 384 : ordered_rel->fdwroutine->GetForeignUpperPaths)
5382 370 : ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5383 : input_rel, ordered_rel,
5384 : NULL);
5385 :
5386 : /* Let extensions possibly add some more paths */
5387 68304 : if (create_upper_paths_hook)
5388 0 : (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5389 : input_rel, ordered_rel, NULL);
5390 :
5391 : /*
5392 : * No need to bother with set_cheapest here; grouping_planner does not
5393 : * need us to do it.
5394 : */
5395 : Assert(ordered_rel->pathlist != NIL);
5396 :
5397 68304 : return ordered_rel;
5398 : }
5399 :
5400 :
5401 : /*
5402 : * make_group_input_target
5403 : * Generate appropriate PathTarget for initial input to grouping nodes.
5404 : *
5405 : * If there is grouping or aggregation, the scan/join subplan cannot emit
5406 : * the query's final targetlist; for example, it certainly can't emit any
5407 : * aggregate function calls. This routine generates the correct target
5408 : * for the scan/join subplan.
5409 : *
5410 : * The query target list passed from the parser already contains entries
5411 : * for all ORDER BY and GROUP BY expressions, but it will not have entries
5412 : * for variables used only in HAVING clauses; so we need to add those
5413 : * variables to the subplan target list. Also, we flatten all expressions
5414 : * except GROUP BY items into their component variables; other expressions
5415 : * will be computed by the upper plan nodes rather than by the subplan.
5416 : * For example, given a query like
5417 : * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
5418 : * we want to pass this targetlist to the subplan:
5419 : * a+b,c,d
5420 : * where the a+b target will be used by the Sort/Group steps, and the
5421 : * other targets will be used for computing the final results.
5422 : *
5423 : * 'final_target' is the query's final target list (in PathTarget form)
5424 : *
5425 : * The result is the PathTarget to be computed by the Paths returned from
5426 : * query_planner().
5427 : */
5428 : static PathTarget *
5429 38330 : make_group_input_target(PlannerInfo *root, PathTarget *final_target)
5430 : {
5431 38330 : Query *parse = root->parse;
5432 : PathTarget *input_target;
5433 : List *non_group_cols;
5434 : List *non_group_vars;
5435 : int i;
5436 : ListCell *lc;
5437 :
5438 : /*
5439 : * We must build a target containing all grouping columns, plus any other
5440 : * Vars mentioned in the query's targetlist and HAVING qual.
5441 : */
5442 38330 : input_target = create_empty_pathtarget();
5443 38330 : non_group_cols = NIL;
5444 :
5445 38330 : i = 0;
5446 93526 : foreach(lc, final_target->exprs)
5447 : {
5448 55196 : Expr *expr = (Expr *) lfirst(lc);
5449 55196 : Index sgref = get_pathtarget_sortgroupref(final_target, i);
5450 :
5451 63942 : if (sgref && root->processed_groupClause &&
5452 8746 : get_sortgroupref_clause_noerr(sgref,
5453 : root->processed_groupClause) != NULL)
5454 : {
5455 : /*
5456 : * It's a grouping column, so add it to the input target as-is.
5457 : *
5458 : * Note that the target is logically below the grouping step. So
5459 : * with grouping sets we need to remove the RT index of the
5460 : * grouping step if there is any from the target expression.
5461 : */
5462 7004 : if (parse->hasGroupRTE && parse->groupingSets != NIL)
5463 : {
5464 : Assert(root->group_rtindex > 0);
5465 : expr = (Expr *)
5466 1824 : remove_nulling_relids((Node *) expr,
5467 1824 : bms_make_singleton(root->group_rtindex),
5468 : NULL);
5469 : }
5470 7004 : add_column_to_pathtarget(input_target, expr, sgref);
5471 : }
5472 : else
5473 : {
5474 : /*
5475 : * Non-grouping column, so just remember the expression for later
5476 : * call to pull_var_clause.
5477 : */
5478 48192 : non_group_cols = lappend(non_group_cols, expr);
5479 : }
5480 :
5481 55196 : i++;
5482 : }
5483 :
5484 : /*
5485 : * If there's a HAVING clause, we'll need the Vars it uses, too.
5486 : */
5487 38330 : if (parse->havingQual)
5488 882 : non_group_cols = lappend(non_group_cols, parse->havingQual);
5489 :
5490 : /*
5491 : * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5492 : * add them to the input target if not already present. (A Var used
5493 : * directly as a GROUP BY item will be present already.) Note this
5494 : * includes Vars used in resjunk items, so we are covering the needs of
5495 : * ORDER BY and window specifications. Vars used within Aggrefs and
5496 : * WindowFuncs will be pulled out here, too.
5497 : *
5498 : * Note that the target is logically below the grouping step. So with
5499 : * grouping sets we need to remove the RT index of the grouping step if
5500 : * there is any from the non-group Vars.
5501 : */
5502 38330 : non_group_vars = pull_var_clause((Node *) non_group_cols,
5503 : PVC_RECURSE_AGGREGATES |
5504 : PVC_RECURSE_WINDOWFUNCS |
5505 : PVC_INCLUDE_PLACEHOLDERS);
5506 38330 : if (parse->hasGroupRTE && parse->groupingSets != NIL)
5507 : {
5508 : Assert(root->group_rtindex > 0);
5509 : non_group_vars = (List *)
5510 830 : remove_nulling_relids((Node *) non_group_vars,
5511 830 : bms_make_singleton(root->group_rtindex),
5512 : NULL);
5513 : }
5514 38330 : add_new_columns_to_pathtarget(input_target, non_group_vars);
5515 :
5516 : /* clean up cruft */
5517 38330 : list_free(non_group_vars);
5518 38330 : list_free(non_group_cols);
5519 :
5520 : /* XXX this causes some redundant cost calculation ... */
5521 38330 : return set_pathtarget_cost_width(root, input_target);
5522 : }
5523 :
5524 : /*
5525 : * make_partial_grouping_target
5526 : * Generate appropriate PathTarget for output of partial aggregate
5527 : * (or partial grouping, if there are no aggregates) nodes.
5528 : *
5529 : * A partial aggregation node needs to emit all the same aggregates that
5530 : * a regular aggregation node would, plus any aggregates used in HAVING;
5531 : * except that the Aggref nodes should be marked as partial aggregates.
5532 : *
5533 : * In addition, we'd better emit any Vars and PlaceHolderVars that are
5534 : * used outside of Aggrefs in the aggregation tlist and HAVING. (Presumably,
5535 : * these would be Vars that are grouped by or used in grouping expressions.)
5536 : *
5537 : * grouping_target is the tlist to be emitted by the topmost aggregation step.
5538 : * havingQual represents the HAVING clause.
5539 : */
5540 : static PathTarget *
5541 2198 : make_partial_grouping_target(PlannerInfo *root,
5542 : PathTarget *grouping_target,
5543 : Node *havingQual)
5544 : {
5545 : PathTarget *partial_target;
5546 : List *non_group_cols;
5547 : List *non_group_exprs;
5548 : int i;
5549 : ListCell *lc;
5550 :
5551 2198 : partial_target = create_empty_pathtarget();
5552 2198 : non_group_cols = NIL;
5553 :
5554 2198 : i = 0;
5555 7814 : foreach(lc, grouping_target->exprs)
5556 : {
5557 5616 : Expr *expr = (Expr *) lfirst(lc);
5558 5616 : Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5559 :
5560 9436 : if (sgref && root->processed_groupClause &&
5561 3820 : get_sortgroupref_clause_noerr(sgref,
5562 : root->processed_groupClause) != NULL)
5563 : {
5564 : /*
5565 : * It's a grouping column, so add it to the partial_target as-is.
5566 : * (This allows the upper agg step to repeat the grouping calcs.)
5567 : */
5568 1906 : add_column_to_pathtarget(partial_target, expr, sgref);
5569 : }
5570 : else
5571 : {
5572 : /*
5573 : * Non-grouping column, so just remember the expression for later
5574 : * call to pull_var_clause.
5575 : */
5576 3710 : non_group_cols = lappend(non_group_cols, expr);
5577 : }
5578 :
5579 5616 : i++;
5580 : }
5581 :
5582 : /*
5583 : * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5584 : */
5585 2198 : if (havingQual)
5586 824 : non_group_cols = lappend(non_group_cols, havingQual);
5587 :
5588 : /*
5589 : * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5590 : * non-group cols (plus HAVING), and add them to the partial_target if not
5591 : * already present. (An expression used directly as a GROUP BY item will
5592 : * be present already.) Note this includes Vars used in resjunk items, so
5593 : * we are covering the needs of ORDER BY and window specifications.
5594 : */
5595 2198 : non_group_exprs = pull_var_clause((Node *) non_group_cols,
5596 : PVC_INCLUDE_AGGREGATES |
5597 : PVC_RECURSE_WINDOWFUNCS |
5598 : PVC_INCLUDE_PLACEHOLDERS);
5599 :
5600 2198 : add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5601 :
5602 : /*
5603 : * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5604 : * are at the top level of the target list, so we can just scan the list
5605 : * rather than recursing through the expression trees.
5606 : */
5607 8386 : foreach(lc, partial_target->exprs)
5608 : {
5609 6188 : Aggref *aggref = (Aggref *) lfirst(lc);
5610 :
5611 6188 : if (IsA(aggref, Aggref))
5612 : {
5613 : Aggref *newaggref;
5614 :
5615 : /*
5616 : * We shouldn't need to copy the substructure of the Aggref node,
5617 : * but flat-copy the node itself to avoid damaging other trees.
5618 : */
5619 4252 : newaggref = makeNode(Aggref);
5620 4252 : memcpy(newaggref, aggref, sizeof(Aggref));
5621 :
5622 : /* For now, assume serialization is required */
5623 4252 : mark_partial_aggref(newaggref, AGGSPLIT_INITIAL_SERIAL);
5624 :
5625 4252 : lfirst(lc) = newaggref;
5626 : }
5627 : }
5628 :
5629 : /* clean up cruft */
5630 2198 : list_free(non_group_exprs);
5631 2198 : list_free(non_group_cols);
5632 :
5633 : /* XXX this causes some redundant cost calculation ... */
5634 2198 : return set_pathtarget_cost_width(root, partial_target);
5635 : }
5636 :
5637 : /*
5638 : * mark_partial_aggref
5639 : * Adjust an Aggref to make it represent a partial-aggregation step.
5640 : *
5641 : * The Aggref node is modified in-place; caller must do any copying required.
5642 : */
5643 : void
5644 7060 : mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
5645 : {
5646 : /* aggtranstype should be computed by this point */
5647 : Assert(OidIsValid(agg->aggtranstype));
5648 : /* ... but aggsplit should still be as the parser left it */
5649 : Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5650 :
5651 : /* Mark the Aggref with the intended partial-aggregation mode */
5652 7060 : agg->aggsplit = aggsplit;
5653 :
5654 : /*
5655 : * Adjust result type if needed. Normally, a partial aggregate returns
5656 : * the aggregate's transition type; but if that's INTERNAL and we're
5657 : * serializing, it returns BYTEA instead.
5658 : */
5659 7060 : if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5660 : {
5661 5656 : if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5662 242 : agg->aggtype = BYTEAOID;
5663 : else
5664 5414 : agg->aggtype = agg->aggtranstype;
5665 : }
5666 7060 : }
5667 :
5668 : /*
5669 : * postprocess_setop_tlist
5670 : * Fix up targetlist returned by plan_set_operations().
5671 : *
5672 : * We need to transpose sort key info from the orig_tlist into new_tlist.
5673 : * NOTE: this would not be good enough if we supported resjunk sort keys
5674 : * for results of set operations --- then, we'd need to project a whole
5675 : * new tlist to evaluate the resjunk columns. For now, just ereport if we
5676 : * find any resjunk columns in orig_tlist.
5677 : */
5678 : static List *
5679 5890 : postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
5680 : {
5681 : ListCell *l;
5682 5890 : ListCell *orig_tlist_item = list_head(orig_tlist);
5683 :
5684 22866 : foreach(l, new_tlist)
5685 : {
5686 16976 : TargetEntry *new_tle = lfirst_node(TargetEntry, l);
5687 : TargetEntry *orig_tle;
5688 :
5689 : /* ignore resjunk columns in setop result */
5690 16976 : if (new_tle->resjunk)
5691 0 : continue;
5692 :
5693 : Assert(orig_tlist_item != NULL);
5694 16976 : orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
5695 16976 : orig_tlist_item = lnext(orig_tlist, orig_tlist_item);
5696 16976 : if (orig_tle->resjunk) /* should not happen */
5697 0 : elog(ERROR, "resjunk output columns are not implemented");
5698 : Assert(new_tle->resno == orig_tle->resno);
5699 16976 : new_tle->ressortgroupref = orig_tle->ressortgroupref;
5700 : }
5701 5890 : if (orig_tlist_item != NULL)
5702 0 : elog(ERROR, "resjunk output columns are not implemented");
5703 5890 : return new_tlist;
5704 : }
5705 :
5706 : /*
5707 : * optimize_window_clauses
5708 : * Call each WindowFunc's prosupport function to see if we're able to
5709 : * make any adjustments to any of the WindowClause's so that the executor
5710 : * can execute the window functions in a more optimal way.
5711 : *
5712 : * Currently we only allow adjustments to the WindowClause's frameOptions. We
5713 : * may allow more things to be done here in the future.
5714 : */
5715 : static void
5716 2378 : optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
5717 : {
5718 2378 : List *windowClause = root->parse->windowClause;
5719 : ListCell *lc;
5720 :
5721 4984 : foreach(lc, windowClause)
5722 : {
5723 2606 : WindowClause *wc = lfirst_node(WindowClause, lc);
5724 : ListCell *lc2;
5725 2606 : int optimizedFrameOptions = 0;
5726 :
5727 : Assert(wc->winref <= wflists->maxWinRef);
5728 :
5729 : /* skip any WindowClauses that have no WindowFuncs */
5730 2606 : if (wflists->windowFuncs[wc->winref] == NIL)
5731 24 : continue;
5732 :
5733 3122 : foreach(lc2, wflists->windowFuncs[wc->winref])
5734 : {
5735 : SupportRequestOptimizeWindowClause req;
5736 : SupportRequestOptimizeWindowClause *res;
5737 2624 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
5738 : Oid prosupport;
5739 :
5740 2624 : prosupport = get_func_support(wfunc->winfnoid);
5741 :
5742 : /* Check if there's a support function for 'wfunc' */
5743 2624 : if (!OidIsValid(prosupport))
5744 2084 : break; /* can't optimize this WindowClause */
5745 :
5746 760 : req.type = T_SupportRequestOptimizeWindowClause;
5747 760 : req.window_clause = wc;
5748 760 : req.window_func = wfunc;
5749 760 : req.frameOptions = wc->frameOptions;
5750 :
5751 : /* call the support function */
5752 : res = (SupportRequestOptimizeWindowClause *)
5753 760 : DatumGetPointer(OidFunctionCall1(prosupport,
5754 : PointerGetDatum(&req)));
5755 :
5756 : /*
5757 : * Skip to next WindowClause if the support function does not
5758 : * support this request type.
5759 : */
5760 760 : if (res == NULL)
5761 220 : break;
5762 :
5763 : /*
5764 : * Save these frameOptions for the first WindowFunc for this
5765 : * WindowClause.
5766 : */
5767 540 : if (foreach_current_index(lc2) == 0)
5768 516 : optimizedFrameOptions = res->frameOptions;
5769 :
5770 : /*
5771 : * On subsequent WindowFuncs, if the frameOptions are not the same
5772 : * then we're unable to optimize the frameOptions for this
5773 : * WindowClause.
5774 : */
5775 24 : else if (optimizedFrameOptions != res->frameOptions)
5776 0 : break; /* skip to the next WindowClause, if any */
5777 : }
5778 :
5779 : /* adjust the frameOptions if all WindowFunc's agree that it's ok */
5780 2582 : if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5781 : {
5782 : ListCell *lc3;
5783 :
5784 : /* apply the new frame options */
5785 498 : wc->frameOptions = optimizedFrameOptions;
5786 :
5787 : /*
5788 : * We now check to see if changing the frameOptions has caused
5789 : * this WindowClause to be a duplicate of some other WindowClause.
5790 : * This can only happen if we have multiple WindowClauses, so
5791 : * don't bother if there's only 1.
5792 : */
5793 498 : if (list_length(windowClause) == 1)
5794 408 : continue;
5795 :
5796 : /*
5797 : * Do the duplicate check and reuse the existing WindowClause if
5798 : * we find a duplicate.
5799 : */
5800 228 : foreach(lc3, windowClause)
5801 : {
5802 174 : WindowClause *existing_wc = lfirst_node(WindowClause, lc3);
5803 :
5804 : /* skip over the WindowClause we're currently editing */
5805 174 : if (existing_wc == wc)
5806 54 : continue;
5807 :
5808 : /*
5809 : * Perform the same duplicate check that is done in
5810 : * transformWindowFuncCall.
5811 : */
5812 240 : if (equal(wc->partitionClause, existing_wc->partitionClause) &&
5813 120 : equal(wc->orderClause, existing_wc->orderClause) &&
5814 120 : wc->frameOptions == existing_wc->frameOptions &&
5815 72 : equal(wc->startOffset, existing_wc->startOffset) &&
5816 36 : equal(wc->endOffset, existing_wc->endOffset))
5817 : {
5818 : ListCell *lc4;
5819 :
5820 : /*
5821 : * Now move each WindowFunc in 'wc' into 'existing_wc'.
5822 : * This required adjusting each WindowFunc's winref and
5823 : * moving the WindowFuncs in 'wc' to the list of
5824 : * WindowFuncs in 'existing_wc'.
5825 : */
5826 78 : foreach(lc4, wflists->windowFuncs[wc->winref])
5827 : {
5828 42 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc4);
5829 :
5830 42 : wfunc->winref = existing_wc->winref;
5831 : }
5832 :
5833 : /* move list items */
5834 72 : wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
5835 36 : wflists->windowFuncs[wc->winref]);
5836 36 : wflists->windowFuncs[wc->winref] = NIL;
5837 :
5838 : /*
5839 : * transformWindowFuncCall() should have made sure there
5840 : * are no other duplicates, so we needn't bother looking
5841 : * any further.
5842 : */
5843 36 : break;
5844 : }
5845 : }
5846 : }
5847 : }
5848 2378 : }
5849 :
5850 : /*
5851 : * select_active_windows
5852 : * Create a list of the "active" window clauses (ie, those referenced
5853 : * by non-deleted WindowFuncs) in the order they are to be executed.
5854 : */
5855 : static List *
5856 2378 : select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
5857 : {
5858 2378 : List *windowClause = root->parse->windowClause;
5859 2378 : List *result = NIL;
5860 : ListCell *lc;
5861 2378 : int nActive = 0;
5862 2378 : WindowClauseSortData *actives = palloc(sizeof(WindowClauseSortData)
5863 2378 : * list_length(windowClause));
5864 :
5865 : /* First, construct an array of the active windows */
5866 4984 : foreach(lc, windowClause)
5867 : {
5868 2606 : WindowClause *wc = lfirst_node(WindowClause, lc);
5869 :
5870 : /* It's only active if wflists shows some related WindowFuncs */
5871 : Assert(wc->winref <= wflists->maxWinRef);
5872 2606 : if (wflists->windowFuncs[wc->winref] == NIL)
5873 60 : continue;
5874 :
5875 2546 : actives[nActive].wc = wc; /* original clause */
5876 :
5877 : /*
5878 : * For sorting, we want the list of partition keys followed by the
5879 : * list of sort keys. But pathkeys construction will remove duplicates
5880 : * between the two, so we can as well (even though we can't detect all
5881 : * of the duplicates, since some may come from ECs - that might mean
5882 : * we miss optimization chances here). We must, however, ensure that
5883 : * the order of entries is preserved with respect to the ones we do
5884 : * keep.
5885 : *
5886 : * partitionClause and orderClause had their own duplicates removed in
5887 : * parse analysis, so we're only concerned here with removing
5888 : * orderClause entries that also appear in partitionClause.
5889 : */
5890 5092 : actives[nActive].uniqueOrder =
5891 2546 : list_concat_unique(list_copy(wc->partitionClause),
5892 2546 : wc->orderClause);
5893 2546 : nActive++;
5894 : }
5895 :
5896 : /*
5897 : * Sort active windows by their partitioning/ordering clauses, ignoring
5898 : * any framing clauses, so that the windows that need the same sorting are
5899 : * adjacent in the list. When we come to generate paths, this will avoid
5900 : * inserting additional Sort nodes.
5901 : *
5902 : * This is how we implement a specific requirement from the SQL standard,
5903 : * which says that when two or more windows are order-equivalent (i.e.
5904 : * have matching partition and order clauses, even if their names or
5905 : * framing clauses differ), then all peer rows must be presented in the
5906 : * same order in all of them. If we allowed multiple sort nodes for such
5907 : * cases, we'd risk having the peer rows end up in different orders in
5908 : * equivalent windows due to sort instability. (See General Rule 4 of
5909 : * <window clause> in SQL2008 - SQL2016.)
5910 : *
5911 : * Additionally, if the entire list of clauses of one window is a prefix
5912 : * of another, put first the window with stronger sorting requirements.
5913 : * This way we will first sort for stronger window, and won't have to sort
5914 : * again for the weaker one.
5915 : */
5916 2378 : qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp);
5917 :
5918 : /* build ordered list of the original WindowClause nodes */
5919 4924 : for (int i = 0; i < nActive; i++)
5920 2546 : result = lappend(result, actives[i].wc);
5921 :
5922 2378 : pfree(actives);
5923 :
5924 2378 : return result;
5925 : }
5926 :
5927 : /*
5928 : * name_active_windows
5929 : * Ensure all active windows have unique names.
5930 : *
5931 : * The parser will have checked that user-assigned window names are unique
5932 : * within the Query. Here we assign made-up names to any unnamed
5933 : * WindowClauses for the benefit of EXPLAIN. (We don't want to do this
5934 : * at parse time, because it'd mess up decompilation of views.)
5935 : *
5936 : * activeWindows: result of select_active_windows
5937 : */
5938 : static void
5939 2378 : name_active_windows(List *activeWindows)
5940 : {
5941 2378 : int next_n = 1;
5942 : char newname[16];
5943 : ListCell *lc;
5944 :
5945 4924 : foreach(lc, activeWindows)
5946 : {
5947 2546 : WindowClause *wc = lfirst_node(WindowClause, lc);
5948 :
5949 : /* Nothing to do if it has a name already. */
5950 2546 : if (wc->name)
5951 498 : continue;
5952 :
5953 : /* Select a name not currently present in the list. */
5954 : for (;;)
5955 6 : {
5956 : ListCell *lc2;
5957 :
5958 2054 : snprintf(newname, sizeof(newname), "w%d", next_n++);
5959 4456 : foreach(lc2, activeWindows)
5960 : {
5961 2408 : WindowClause *wc2 = lfirst_node(WindowClause, lc2);
5962 :
5963 2408 : if (wc2->name && strcmp(wc2->name, newname) == 0)
5964 6 : break; /* matched */
5965 : }
5966 2054 : if (lc2 == NULL)
5967 2048 : break; /* reached the end with no match */
5968 : }
5969 2048 : wc->name = pstrdup(newname);
5970 : }
5971 2378 : }
5972 :
5973 : /*
5974 : * common_prefix_cmp
5975 : * QSort comparison function for WindowClauseSortData
5976 : *
5977 : * Sort the windows by the required sorting clauses. First, compare the sort
5978 : * clauses themselves. Second, if one window's clauses are a prefix of another
5979 : * one's clauses, put the window with more sort clauses first.
5980 : *
5981 : * We purposefully sort by the highest tleSortGroupRef first. Since
5982 : * tleSortGroupRefs are assigned for the query's DISTINCT and ORDER BY first
5983 : * and because here we sort the lowest tleSortGroupRefs last, if a
5984 : * WindowClause is sharing a tleSortGroupRef with the query's DISTINCT or
5985 : * ORDER BY clause, this makes it more likely that the final WindowAgg will
5986 : * provide presorted input for the query's DISTINCT or ORDER BY clause, thus
5987 : * reducing the total number of sorts required for the query.
5988 : */
5989 : static int
5990 186 : common_prefix_cmp(const void *a, const void *b)
5991 : {
5992 186 : const WindowClauseSortData *wcsa = a;
5993 186 : const WindowClauseSortData *wcsb = b;
5994 : ListCell *item_a;
5995 : ListCell *item_b;
5996 :
5997 330 : forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
5998 : {
5999 246 : SortGroupClause *sca = lfirst_node(SortGroupClause, item_a);
6000 246 : SortGroupClause *scb = lfirst_node(SortGroupClause, item_b);
6001 :
6002 246 : if (sca->tleSortGroupRef > scb->tleSortGroupRef)
6003 102 : return -1;
6004 234 : else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
6005 66 : return 1;
6006 168 : else if (sca->sortop > scb->sortop)
6007 0 : return -1;
6008 168 : else if (sca->sortop < scb->sortop)
6009 24 : return 1;
6010 144 : else if (sca->nulls_first && !scb->nulls_first)
6011 0 : return -1;
6012 144 : else if (!sca->nulls_first && scb->nulls_first)
6013 0 : return 1;
6014 : /* no need to compare eqop, since it is fully determined by sortop */
6015 : }
6016 :
6017 84 : if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
6018 6 : return -1;
6019 78 : else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
6020 30 : return 1;
6021 :
6022 48 : return 0;
6023 : }
6024 :
6025 : /*
6026 : * make_window_input_target
6027 : * Generate appropriate PathTarget for initial input to WindowAgg nodes.
6028 : *
6029 : * When the query has window functions, this function computes the desired
6030 : * target to be computed by the node just below the first WindowAgg.
6031 : * This tlist must contain all values needed to evaluate the window functions,
6032 : * compute the final target list, and perform any required final sort step.
6033 : * If multiple WindowAggs are needed, each intermediate one adds its window
6034 : * function results onto this base tlist; only the topmost WindowAgg computes
6035 : * the actual desired target list.
6036 : *
6037 : * This function is much like make_group_input_target, though not quite enough
6038 : * like it to share code. As in that function, we flatten most expressions
6039 : * into their component variables. But we do not want to flatten window
6040 : * PARTITION BY/ORDER BY clauses, since that might result in multiple
6041 : * evaluations of them, which would be bad (possibly even resulting in
6042 : * inconsistent answers, if they contain volatile functions).
6043 : * Also, we must not flatten GROUP BY clauses that were left unflattened by
6044 : * make_group_input_target, because we may no longer have access to the
6045 : * individual Vars in them.
6046 : *
6047 : * Another key difference from make_group_input_target is that we don't
6048 : * flatten Aggref expressions, since those are to be computed below the
6049 : * window functions and just referenced like Vars above that.
6050 : *
6051 : * 'final_target' is the query's final target list (in PathTarget form)
6052 : * 'activeWindows' is the list of active windows previously identified by
6053 : * select_active_windows.
6054 : *
6055 : * The result is the PathTarget to be computed by the plan node immediately
6056 : * below the first WindowAgg node.
6057 : */
6058 : static PathTarget *
6059 2378 : make_window_input_target(PlannerInfo *root,
6060 : PathTarget *final_target,
6061 : List *activeWindows)
6062 : {
6063 : PathTarget *input_target;
6064 : Bitmapset *sgrefs;
6065 : List *flattenable_cols;
6066 : List *flattenable_vars;
6067 : int i;
6068 : ListCell *lc;
6069 :
6070 : Assert(root->parse->hasWindowFuncs);
6071 :
6072 : /*
6073 : * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
6074 : * into a bitmapset for convenient reference below.
6075 : */
6076 2378 : sgrefs = NULL;
6077 4924 : foreach(lc, activeWindows)
6078 : {
6079 2546 : WindowClause *wc = lfirst_node(WindowClause, lc);
6080 : ListCell *lc2;
6081 :
6082 3310 : foreach(lc2, wc->partitionClause)
6083 : {
6084 764 : SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
6085 :
6086 764 : sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6087 : }
6088 4726 : foreach(lc2, wc->orderClause)
6089 : {
6090 2180 : SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
6091 :
6092 2180 : sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6093 : }
6094 : }
6095 :
6096 : /* Add in sortgroupref numbers of GROUP BY clauses, too */
6097 2564 : foreach(lc, root->processed_groupClause)
6098 : {
6099 186 : SortGroupClause *grpcl = lfirst_node(SortGroupClause, lc);
6100 :
6101 186 : sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
6102 : }
6103 :
6104 : /*
6105 : * Construct a target containing all the non-flattenable targetlist items,
6106 : * and save aside the others for a moment.
6107 : */
6108 2378 : input_target = create_empty_pathtarget();
6109 2378 : flattenable_cols = NIL;
6110 :
6111 2378 : i = 0;
6112 10318 : foreach(lc, final_target->exprs)
6113 : {
6114 7940 : Expr *expr = (Expr *) lfirst(lc);
6115 7940 : Index sgref = get_pathtarget_sortgroupref(final_target, i);
6116 :
6117 : /*
6118 : * Don't want to deconstruct window clauses or GROUP BY items. (Note
6119 : * that such items can't contain window functions, so it's okay to
6120 : * compute them below the WindowAgg nodes.)
6121 : */
6122 7940 : if (sgref != 0 && bms_is_member(sgref, sgrefs))
6123 : {
6124 : /*
6125 : * Don't want to deconstruct this value, so add it to the input
6126 : * target as-is.
6127 : */
6128 2798 : add_column_to_pathtarget(input_target, expr, sgref);
6129 : }
6130 : else
6131 : {
6132 : /*
6133 : * Column is to be flattened, so just remember the expression for
6134 : * later call to pull_var_clause.
6135 : */
6136 5142 : flattenable_cols = lappend(flattenable_cols, expr);
6137 : }
6138 :
6139 7940 : i++;
6140 : }
6141 :
6142 : /*
6143 : * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
6144 : * add them to the input target if not already present. (Some might be
6145 : * there already because they're used directly as window/group clauses.)
6146 : *
6147 : * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
6148 : * Aggrefs are placed in the Agg node's tlist and not left to be computed
6149 : * at higher levels. On the other hand, we should recurse into
6150 : * WindowFuncs to make sure their input expressions are available.
6151 : */
6152 2378 : flattenable_vars = pull_var_clause((Node *) flattenable_cols,
6153 : PVC_INCLUDE_AGGREGATES |
6154 : PVC_RECURSE_WINDOWFUNCS |
6155 : PVC_INCLUDE_PLACEHOLDERS);
6156 2378 : add_new_columns_to_pathtarget(input_target, flattenable_vars);
6157 :
6158 : /* clean up cruft */
6159 2378 : list_free(flattenable_vars);
6160 2378 : list_free(flattenable_cols);
6161 :
6162 : /* XXX this causes some redundant cost calculation ... */
6163 2378 : return set_pathtarget_cost_width(root, input_target);
6164 : }
6165 :
6166 : /*
6167 : * make_pathkeys_for_window
6168 : * Create a pathkeys list describing the required input ordering
6169 : * for the given WindowClause.
6170 : *
6171 : * Modifies wc's partitionClause to remove any clauses which are deemed
6172 : * redundant by the pathkey logic.
6173 : *
6174 : * The required ordering is first the PARTITION keys, then the ORDER keys.
6175 : * In the future we might try to implement windowing using hashing, in which
6176 : * case the ordering could be relaxed, but for now we always sort.
6177 : */
6178 : static List *
6179 5136 : make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
6180 : List *tlist)
6181 : {
6182 5136 : List *window_pathkeys = NIL;
6183 :
6184 : /* Throw error if can't sort */
6185 5136 : if (!grouping_is_sortable(wc->partitionClause))
6186 0 : ereport(ERROR,
6187 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6188 : errmsg("could not implement window PARTITION BY"),
6189 : errdetail("Window partitioning columns must be of sortable datatypes.")));
6190 5136 : if (!grouping_is_sortable(wc->orderClause))
6191 0 : ereport(ERROR,
6192 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6193 : errmsg("could not implement window ORDER BY"),
6194 : errdetail("Window ordering columns must be of sortable datatypes.")));
6195 :
6196 : /*
6197 : * First fetch the pathkeys for the PARTITION BY clause. We can safely
6198 : * remove any clauses from the wc->partitionClause for redundant pathkeys.
6199 : */
6200 5136 : if (wc->partitionClause != NIL)
6201 : {
6202 : bool sortable;
6203 :
6204 1302 : window_pathkeys = make_pathkeys_for_sortclauses_extended(root,
6205 : &wc->partitionClause,
6206 : tlist,
6207 : true,
6208 : false,
6209 : &sortable,
6210 : false);
6211 :
6212 : Assert(sortable);
6213 : }
6214 :
6215 : /*
6216 : * In principle, we could also consider removing redundant ORDER BY items
6217 : * too as doing so does not alter the result of peer row checks done by
6218 : * the executor. However, we must *not* remove the ordering column for
6219 : * RANGE OFFSET cases, as the executor needs that for in_range tests even
6220 : * if it's known to be equal to some partitioning column.
6221 : */
6222 5136 : if (wc->orderClause != NIL)
6223 : {
6224 : List *orderby_pathkeys;
6225 :
6226 4288 : orderby_pathkeys = make_pathkeys_for_sortclauses(root,
6227 : wc->orderClause,
6228 : tlist);
6229 :
6230 : /* Okay, make the combined pathkeys */
6231 4288 : if (window_pathkeys != NIL)
6232 946 : window_pathkeys = append_pathkeys(window_pathkeys, orderby_pathkeys);
6233 : else
6234 3342 : window_pathkeys = orderby_pathkeys;
6235 : }
6236 :
6237 5136 : return window_pathkeys;
6238 : }
6239 :
6240 : /*
6241 : * make_sort_input_target
6242 : * Generate appropriate PathTarget for initial input to Sort step.
6243 : *
6244 : * If the query has ORDER BY, this function chooses the target to be computed
6245 : * by the node just below the Sort (and DISTINCT, if any, since Unique can't
6246 : * project) steps. This might or might not be identical to the query's final
6247 : * output target.
6248 : *
6249 : * The main argument for keeping the sort-input tlist the same as the final
6250 : * is that we avoid a separate projection node (which will be needed if
6251 : * they're different, because Sort can't project). However, there are also
6252 : * advantages to postponing tlist evaluation till after the Sort: it ensures
6253 : * a consistent order of evaluation for any volatile functions in the tlist,
6254 : * and if there's also a LIMIT, we can stop the query without ever computing
6255 : * tlist functions for later rows, which is beneficial for both volatile and
6256 : * expensive functions.
6257 : *
6258 : * Our current policy is to postpone volatile expressions till after the sort
6259 : * unconditionally (assuming that that's possible, ie they are in plain tlist
6260 : * columns and not ORDER BY/GROUP BY/DISTINCT columns). We also prefer to
6261 : * postpone set-returning expressions, because running them beforehand would
6262 : * bloat the sort dataset, and because it might cause unexpected output order
6263 : * if the sort isn't stable. However there's a constraint on that: all SRFs
6264 : * in the tlist should be evaluated at the same plan step, so that they can
6265 : * run in sync in nodeProjectSet. So if any SRFs are in sort columns, we
6266 : * mustn't postpone any SRFs. (Note that in principle that policy should
6267 : * probably get applied to the group/window input targetlists too, but we
6268 : * have not done that historically.) Lastly, expensive expressions are
6269 : * postponed if there is a LIMIT, or if root->tuple_fraction shows that
6270 : * partial evaluation of the query is possible (if neither is true, we expect
6271 : * to have to evaluate the expressions for every row anyway), or if there are
6272 : * any volatile or set-returning expressions (since once we've put in a
6273 : * projection at all, it won't cost any more to postpone more stuff).
6274 : *
6275 : * Another issue that could potentially be considered here is that
6276 : * evaluating tlist expressions could result in data that's either wider
6277 : * or narrower than the input Vars, thus changing the volume of data that
6278 : * has to go through the Sort. However, we usually have only a very bad
6279 : * idea of the output width of any expression more complex than a Var,
6280 : * so for now it seems too risky to try to optimize on that basis.
6281 : *
6282 : * Note that if we do produce a modified sort-input target, and then the
6283 : * query ends up not using an explicit Sort, no particular harm is done:
6284 : * we'll initially use the modified target for the preceding path nodes,
6285 : * but then change them to the final target with apply_projection_to_path.
6286 : * Moreover, in such a case the guarantees about evaluation order of
6287 : * volatile functions still hold, since the rows are sorted already.
6288 : *
6289 : * This function has some things in common with make_group_input_target and
6290 : * make_window_input_target, though the detailed rules for what to do are
6291 : * different. We never flatten/postpone any grouping or ordering columns;
6292 : * those are needed before the sort. If we do flatten a particular
6293 : * expression, we leave Aggref and WindowFunc nodes alone, since those were
6294 : * computed earlier.
6295 : *
6296 : * 'final_target' is the query's final target list (in PathTarget form)
6297 : * 'have_postponed_srfs' is an output argument, see below
6298 : *
6299 : * The result is the PathTarget to be computed by the plan node immediately
6300 : * below the Sort step (and the Distinct step, if any). This will be
6301 : * exactly final_target if we decide a projection step wouldn't be helpful.
6302 : *
6303 : * In addition, *have_postponed_srfs is set to true if we choose to postpone
6304 : * any set-returning functions to after the Sort.
6305 : */
6306 : static PathTarget *
6307 64510 : make_sort_input_target(PlannerInfo *root,
6308 : PathTarget *final_target,
6309 : bool *have_postponed_srfs)
6310 : {
6311 64510 : Query *parse = root->parse;
6312 : PathTarget *input_target;
6313 : int ncols;
6314 : bool *col_is_srf;
6315 : bool *postpone_col;
6316 : bool have_srf;
6317 : bool have_volatile;
6318 : bool have_expensive;
6319 : bool have_srf_sortcols;
6320 : bool postpone_srfs;
6321 : List *postponable_cols;
6322 : List *postponable_vars;
6323 : int i;
6324 : ListCell *lc;
6325 :
6326 : /* Shouldn't get here unless query has ORDER BY */
6327 : Assert(parse->sortClause);
6328 :
6329 64510 : *have_postponed_srfs = false; /* default result */
6330 :
6331 : /* Inspect tlist and collect per-column information */
6332 64510 : ncols = list_length(final_target->exprs);
6333 64510 : col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6334 64510 : postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6335 64510 : have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
6336 :
6337 64510 : i = 0;
6338 402530 : foreach(lc, final_target->exprs)
6339 : {
6340 338020 : Expr *expr = (Expr *) lfirst(lc);
6341 :
6342 : /*
6343 : * If the column has a sortgroupref, assume it has to be evaluated
6344 : * before sorting. Generally such columns would be ORDER BY, GROUP
6345 : * BY, etc targets. One exception is columns that were removed from
6346 : * GROUP BY by remove_useless_groupby_columns() ... but those would
6347 : * only be Vars anyway. There don't seem to be any cases where it
6348 : * would be worth the trouble to double-check.
6349 : */
6350 338020 : if (get_pathtarget_sortgroupref(final_target, i) == 0)
6351 : {
6352 : /*
6353 : * Check for SRF or volatile functions. Check the SRF case first
6354 : * because we must know whether we have any postponed SRFs.
6355 : */
6356 245298 : if (parse->hasTargetSRFs &&
6357 216 : expression_returns_set((Node *) expr))
6358 : {
6359 : /* We'll decide below whether these are postponable */
6360 96 : col_is_srf[i] = true;
6361 96 : have_srf = true;
6362 : }
6363 244986 : else if (contain_volatile_functions((Node *) expr))
6364 : {
6365 : /* Unconditionally postpone */
6366 148 : postpone_col[i] = true;
6367 148 : have_volatile = true;
6368 : }
6369 : else
6370 : {
6371 : /*
6372 : * Else check the cost. XXX it's annoying to have to do this
6373 : * when set_pathtarget_cost_width() just did it. Refactor to
6374 : * allow sharing the work?
6375 : */
6376 : QualCost cost;
6377 :
6378 244838 : cost_qual_eval_node(&cost, (Node *) expr, root);
6379 :
6380 : /*
6381 : * We arbitrarily define "expensive" as "more than 10X
6382 : * cpu_operator_cost". Note this will take in any PL function
6383 : * with default cost.
6384 : */
6385 244838 : if (cost.per_tuple > 10 * cpu_operator_cost)
6386 : {
6387 16326 : postpone_col[i] = true;
6388 16326 : have_expensive = true;
6389 : }
6390 : }
6391 : }
6392 : else
6393 : {
6394 : /* For sortgroupref cols, just check if any contain SRFs */
6395 92938 : if (!have_srf_sortcols &&
6396 93248 : parse->hasTargetSRFs &&
6397 310 : expression_returns_set((Node *) expr))
6398 124 : have_srf_sortcols = true;
6399 : }
6400 :
6401 338020 : i++;
6402 : }
6403 :
6404 : /*
6405 : * We can postpone SRFs if we have some but none are in sortgroupref cols.
6406 : */
6407 64510 : postpone_srfs = (have_srf && !have_srf_sortcols);
6408 :
6409 : /*
6410 : * If we don't need a post-sort projection, just return final_target.
6411 : */
6412 64510 : if (!(postpone_srfs || have_volatile ||
6413 64306 : (have_expensive &&
6414 9640 : (parse->limitCount || root->tuple_fraction > 0))))
6415 64270 : return final_target;
6416 :
6417 : /*
6418 : * Report whether the post-sort projection will contain set-returning
6419 : * functions. This is important because it affects whether the Sort can
6420 : * rely on the query's LIMIT (if any) to bound the number of rows it needs
6421 : * to return.
6422 : */
6423 240 : *have_postponed_srfs = postpone_srfs;
6424 :
6425 : /*
6426 : * Construct the sort-input target, taking all non-postponable columns and
6427 : * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6428 : * the postponable ones.
6429 : */
6430 240 : input_target = create_empty_pathtarget();
6431 240 : postponable_cols = NIL;
6432 :
6433 240 : i = 0;
6434 1990 : foreach(lc, final_target->exprs)
6435 : {
6436 1750 : Expr *expr = (Expr *) lfirst(lc);
6437 :
6438 1750 : if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
6439 298 : postponable_cols = lappend(postponable_cols, expr);
6440 : else
6441 1452 : add_column_to_pathtarget(input_target, expr,
6442 1452 : get_pathtarget_sortgroupref(final_target, i));
6443 :
6444 1750 : i++;
6445 : }
6446 :
6447 : /*
6448 : * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6449 : * postponable columns, and add them to the sort-input target if not
6450 : * already present. (Some might be there already.) We mustn't
6451 : * deconstruct Aggrefs or WindowFuncs here, since the projection node
6452 : * would be unable to recompute them.
6453 : */
6454 240 : postponable_vars = pull_var_clause((Node *) postponable_cols,
6455 : PVC_INCLUDE_AGGREGATES |
6456 : PVC_INCLUDE_WINDOWFUNCS |
6457 : PVC_INCLUDE_PLACEHOLDERS);
6458 240 : add_new_columns_to_pathtarget(input_target, postponable_vars);
6459 :
6460 : /* clean up cruft */
6461 240 : list_free(postponable_vars);
6462 240 : list_free(postponable_cols);
6463 :
6464 : /* XXX this represents even more redundant cost calculation ... */
6465 240 : return set_pathtarget_cost_width(root, input_target);
6466 : }
6467 :
6468 : /*
6469 : * get_cheapest_fractional_path
6470 : * Find the cheapest path for retrieving a specified fraction of all
6471 : * the tuples expected to be returned by the given relation.
6472 : *
6473 : * Do not consider parameterized paths. If the caller needs a path for upper
6474 : * rel, it can't have parameterized paths. If the caller needs an append
6475 : * subpath, it could become limited by the treatment of similar
6476 : * parameterization of all the subpaths.
6477 : *
6478 : * We interpret tuple_fraction the same way as grouping_planner.
6479 : *
6480 : * We assume set_cheapest() has been run on the given rel.
6481 : */
6482 : Path *
6483 510384 : get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
6484 : {
6485 510384 : Path *best_path = rel->cheapest_total_path;
6486 : ListCell *l;
6487 :
6488 : /* If all tuples will be retrieved, just return the cheapest-total path */
6489 510384 : if (tuple_fraction <= 0.0)
6490 501266 : return best_path;
6491 :
6492 : /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6493 9118 : if (tuple_fraction >= 1.0 && best_path->rows > 0)
6494 3636 : tuple_fraction /= best_path->rows;
6495 :
6496 24022 : foreach(l, rel->pathlist)
6497 : {
6498 14904 : Path *path = (Path *) lfirst(l);
6499 :
6500 14904 : if (path->param_info)
6501 188 : continue;
6502 :
6503 20314 : if (path == rel->cheapest_total_path ||
6504 5598 : compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6505 14226 : continue;
6506 :
6507 490 : best_path = path;
6508 : }
6509 :
6510 9118 : return best_path;
6511 : }
6512 :
6513 : /*
6514 : * adjust_paths_for_srfs
6515 : * Fix up the Paths of the given upperrel to handle tSRFs properly.
6516 : *
6517 : * The executor can only handle set-returning functions that appear at the
6518 : * top level of the targetlist of a ProjectSet plan node. If we have any SRFs
6519 : * that are not at top level, we need to split up the evaluation into multiple
6520 : * plan levels in which each level satisfies this constraint. This function
6521 : * modifies each Path of an upperrel that (might) compute any SRFs in its
6522 : * output tlist to insert appropriate projection steps.
6523 : *
6524 : * The given targets and targets_contain_srfs lists are from
6525 : * split_pathtarget_at_srfs(). We assume the existing Paths emit the first
6526 : * target in targets.
6527 : */
6528 : static void
6529 9692 : adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
6530 : List *targets, List *targets_contain_srfs)
6531 : {
6532 : ListCell *lc;
6533 :
6534 : Assert(list_length(targets) == list_length(targets_contain_srfs));
6535 : Assert(!linitial_int(targets_contain_srfs));
6536 :
6537 : /* If no SRFs appear at this plan level, nothing to do */
6538 9692 : if (list_length(targets) == 1)
6539 626 : return;
6540 :
6541 : /*
6542 : * Stack SRF-evaluation nodes atop each path for the rel.
6543 : *
6544 : * In principle we should re-run set_cheapest() here to identify the
6545 : * cheapest path, but it seems unlikely that adding the same tlist eval
6546 : * costs to all the paths would change that, so we don't bother. Instead,
6547 : * just assume that the cheapest-startup and cheapest-total paths remain
6548 : * so. (There should be no parameterized paths anymore, so we needn't
6549 : * worry about updating cheapest_parameterized_paths.)
6550 : */
6551 18158 : foreach(lc, rel->pathlist)
6552 : {
6553 9092 : Path *subpath = (Path *) lfirst(lc);
6554 9092 : Path *newpath = subpath;
6555 : ListCell *lc1,
6556 : *lc2;
6557 :
6558 : Assert(subpath->param_info == NULL);
6559 28450 : forboth(lc1, targets, lc2, targets_contain_srfs)
6560 : {
6561 19358 : PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6562 19358 : bool contains_srfs = (bool) lfirst_int(lc2);
6563 :
6564 : /* If this level doesn't contain SRFs, do regular projection */
6565 19358 : if (contains_srfs)
6566 9152 : newpath = (Path *) create_set_projection_path(root,
6567 : rel,
6568 : newpath,
6569 : thistarget);
6570 : else
6571 10206 : newpath = (Path *) apply_projection_to_path(root,
6572 : rel,
6573 : newpath,
6574 : thistarget);
6575 : }
6576 9092 : lfirst(lc) = newpath;
6577 9092 : if (subpath == rel->cheapest_startup_path)
6578 370 : rel->cheapest_startup_path = newpath;
6579 9092 : if (subpath == rel->cheapest_total_path)
6580 370 : rel->cheapest_total_path = newpath;
6581 : }
6582 :
6583 : /* Likewise for partial paths, if any */
6584 9072 : foreach(lc, rel->partial_pathlist)
6585 : {
6586 6 : Path *subpath = (Path *) lfirst(lc);
6587 6 : Path *newpath = subpath;
6588 : ListCell *lc1,
6589 : *lc2;
6590 :
6591 : Assert(subpath->param_info == NULL);
6592 24 : forboth(lc1, targets, lc2, targets_contain_srfs)
6593 : {
6594 18 : PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6595 18 : bool contains_srfs = (bool) lfirst_int(lc2);
6596 :
6597 : /* If this level doesn't contain SRFs, do regular projection */
6598 18 : if (contains_srfs)
6599 6 : newpath = (Path *) create_set_projection_path(root,
6600 : rel,
6601 : newpath,
6602 : thistarget);
6603 : else
6604 : {
6605 : /* avoid apply_projection_to_path, in case of multiple refs */
6606 12 : newpath = (Path *) create_projection_path(root,
6607 : rel,
6608 : newpath,
6609 : thistarget);
6610 : }
6611 : }
6612 6 : lfirst(lc) = newpath;
6613 : }
6614 : }
6615 :
6616 : /*
6617 : * expression_planner
6618 : * Perform planner's transformations on a standalone expression.
6619 : *
6620 : * Various utility commands need to evaluate expressions that are not part
6621 : * of a plannable query. They can do so using the executor's regular
6622 : * expression-execution machinery, but first the expression has to be fed
6623 : * through here to transform it from parser output to something executable.
6624 : *
6625 : * Currently, we disallow sublinks in standalone expressions, so there's no
6626 : * real "planning" involved here. (That might not always be true though.)
6627 : * What we must do is run eval_const_expressions to ensure that any function
6628 : * calls are converted to positional notation and function default arguments
6629 : * get inserted. The fact that constant subexpressions get simplified is a
6630 : * side-effect that is useful when the expression will get evaluated more than
6631 : * once. Also, we must fix operator function IDs.
6632 : *
6633 : * This does not return any information about dependencies of the expression.
6634 : * Hence callers should use the results only for the duration of the current
6635 : * query. Callers that would like to cache the results for longer should use
6636 : * expression_planner_with_deps, probably via the plancache.
6637 : *
6638 : * Note: this must not make any damaging changes to the passed-in expression
6639 : * tree. (It would actually be okay to apply fix_opfuncids to it, but since
6640 : * we first do an expression_tree_mutator-based walk, what is returned will
6641 : * be a new node tree.) The result is constructed in the current memory
6642 : * context; beware that this can leak a lot of additional stuff there, too.
6643 : */
6644 : Expr *
6645 272852 : expression_planner(Expr *expr)
6646 : {
6647 : Node *result;
6648 :
6649 : /*
6650 : * Convert named-argument function calls, insert default arguments and
6651 : * simplify constant subexprs
6652 : */
6653 272852 : result = eval_const_expressions(NULL, (Node *) expr);
6654 :
6655 : /* Fill in opfuncid values if missing */
6656 272834 : fix_opfuncids(result);
6657 :
6658 272834 : return (Expr *) result;
6659 : }
6660 :
6661 : /*
6662 : * expression_planner_with_deps
6663 : * Perform planner's transformations on a standalone expression,
6664 : * returning expression dependency information along with the result.
6665 : *
6666 : * This is identical to expression_planner() except that it also returns
6667 : * information about possible dependencies of the expression, ie identities of
6668 : * objects whose definitions affect the result. As in a PlannedStmt, these
6669 : * are expressed as a list of relation Oids and a list of PlanInvalItems.
6670 : */
6671 : Expr *
6672 356 : expression_planner_with_deps(Expr *expr,
6673 : List **relationOids,
6674 : List **invalItems)
6675 : {
6676 : Node *result;
6677 : PlannerGlobal glob;
6678 : PlannerInfo root;
6679 :
6680 : /* Make up dummy planner state so we can use setrefs machinery */
6681 8188 : MemSet(&glob, 0, sizeof(glob));
6682 356 : glob.type = T_PlannerGlobal;
6683 356 : glob.relationOids = NIL;
6684 356 : glob.invalItems = NIL;
6685 :
6686 31684 : MemSet(&root, 0, sizeof(root));
6687 356 : root.type = T_PlannerInfo;
6688 356 : root.glob = &glob;
6689 :
6690 : /*
6691 : * Convert named-argument function calls, insert default arguments and
6692 : * simplify constant subexprs. Collect identities of inlined functions
6693 : * and elided domains, too.
6694 : */
6695 356 : result = eval_const_expressions(&root, (Node *) expr);
6696 :
6697 : /* Fill in opfuncid values if missing */
6698 356 : fix_opfuncids(result);
6699 :
6700 : /*
6701 : * Now walk the finished expression to find anything else we ought to
6702 : * record as an expression dependency.
6703 : */
6704 356 : (void) extract_query_dependencies_walker(result, &root);
6705 :
6706 356 : *relationOids = glob.relationOids;
6707 356 : *invalItems = glob.invalItems;
6708 :
6709 356 : return (Expr *) result;
6710 : }
6711 :
6712 :
6713 : /*
6714 : * plan_cluster_use_sort
6715 : * Use the planner to decide how CLUSTER should implement sorting
6716 : *
6717 : * tableOid is the OID of a table to be clustered on its index indexOid
6718 : * (which is already known to be a btree index). Decide whether it's
6719 : * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
6720 : * Return true to use sorting, false to use an indexscan.
6721 : *
6722 : * Note: caller had better already hold some type of lock on the table.
6723 : */
6724 : bool
6725 188 : plan_cluster_use_sort(Oid tableOid, Oid indexOid)
6726 : {
6727 : PlannerInfo *root;
6728 : Query *query;
6729 : PlannerGlobal *glob;
6730 : RangeTblEntry *rte;
6731 : RelOptInfo *rel;
6732 : IndexOptInfo *indexInfo;
6733 : QualCost indexExprCost;
6734 : Cost comparisonCost;
6735 : Path *seqScanPath;
6736 : Path seqScanAndSortPath;
6737 : IndexPath *indexScanPath;
6738 : ListCell *lc;
6739 :
6740 : /* We can short-circuit the cost comparison if indexscans are disabled */
6741 188 : if (!enable_indexscan)
6742 30 : return true; /* use sort */
6743 :
6744 : /* Set up mostly-dummy planner state */
6745 158 : query = makeNode(Query);
6746 158 : query->commandType = CMD_SELECT;
6747 :
6748 158 : glob = makeNode(PlannerGlobal);
6749 :
6750 158 : root = makeNode(PlannerInfo);
6751 158 : root->parse = query;
6752 158 : root->glob = glob;
6753 158 : root->query_level = 1;
6754 158 : root->planner_cxt = CurrentMemoryContext;
6755 158 : root->wt_param_id = -1;
6756 158 : root->join_domains = list_make1(makeNode(JoinDomain));
6757 :
6758 : /* Build a minimal RTE for the rel */
6759 158 : rte = makeNode(RangeTblEntry);
6760 158 : rte->rtekind = RTE_RELATION;
6761 158 : rte->relid = tableOid;
6762 158 : rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6763 158 : rte->rellockmode = AccessShareLock;
6764 158 : rte->lateral = false;
6765 158 : rte->inh = false;
6766 158 : rte->inFromCl = true;
6767 158 : query->rtable = list_make1(rte);
6768 158 : addRTEPermissionInfo(&query->rteperminfos, rte);
6769 :
6770 : /* Set up RTE/RelOptInfo arrays */
6771 158 : setup_simple_rel_arrays(root);
6772 :
6773 : /* Build RelOptInfo */
6774 158 : rel = build_simple_rel(root, 1, NULL);
6775 :
6776 : /* Locate IndexOptInfo for the target index */
6777 158 : indexInfo = NULL;
6778 196 : foreach(lc, rel->indexlist)
6779 : {
6780 196 : indexInfo = lfirst_node(IndexOptInfo, lc);
6781 196 : if (indexInfo->indexoid == indexOid)
6782 158 : break;
6783 : }
6784 :
6785 : /*
6786 : * It's possible that get_relation_info did not generate an IndexOptInfo
6787 : * for the desired index; this could happen if it's not yet reached its
6788 : * indcheckxmin usability horizon, or if it's a system index and we're
6789 : * ignoring system indexes. In such cases we should tell CLUSTER to not
6790 : * trust the index contents but use seqscan-and-sort.
6791 : */
6792 158 : if (lc == NULL) /* not in the list? */
6793 0 : return true; /* use sort */
6794 :
6795 : /*
6796 : * Rather than doing all the pushups that would be needed to use
6797 : * set_baserel_size_estimates, just do a quick hack for rows and width.
6798 : */
6799 158 : rel->rows = rel->tuples;
6800 158 : rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6801 :
6802 158 : root->total_table_pages = rel->pages;
6803 :
6804 : /*
6805 : * Determine eval cost of the index expressions, if any. We need to
6806 : * charge twice that amount for each tuple comparison that happens during
6807 : * the sort, since tuplesort.c will have to re-evaluate the index
6808 : * expressions each time. (XXX that's pretty inefficient...)
6809 : */
6810 158 : cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6811 158 : comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6812 :
6813 : /* Estimate the cost of seq scan + sort */
6814 158 : seqScanPath = create_seqscan_path(root, rel, NULL, 0);
6815 158 : cost_sort(&seqScanAndSortPath, root, NIL,
6816 : seqScanPath->disabled_nodes,
6817 158 : seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6818 : comparisonCost, maintenance_work_mem, -1.0);
6819 :
6820 : /* Estimate the cost of index scan */
6821 158 : indexScanPath = create_index_path(root, indexInfo,
6822 : NIL, NIL, NIL, NIL,
6823 : ForwardScanDirection, false,
6824 : NULL, 1.0, false);
6825 :
6826 158 : return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
6827 : }
6828 :
6829 : /*
6830 : * plan_create_index_workers
6831 : * Use the planner to decide how many parallel worker processes
6832 : * CREATE INDEX should request for use
6833 : *
6834 : * tableOid is the table on which the index is to be built. indexOid is the
6835 : * OID of an index to be created or reindexed (which must be an index with
6836 : * support for parallel builds - currently btree or BRIN).
6837 : *
6838 : * Return value is the number of parallel worker processes to request. It
6839 : * may be unsafe to proceed if this is 0. Note that this does not include the
6840 : * leader participating as a worker (value is always a number of parallel
6841 : * worker processes).
6842 : *
6843 : * Note: caller had better already hold some type of lock on the table and
6844 : * index.
6845 : */
6846 : int
6847 36134 : plan_create_index_workers(Oid tableOid, Oid indexOid)
6848 : {
6849 : PlannerInfo *root;
6850 : Query *query;
6851 : PlannerGlobal *glob;
6852 : RangeTblEntry *rte;
6853 : Relation heap;
6854 : Relation index;
6855 : RelOptInfo *rel;
6856 : int parallel_workers;
6857 : BlockNumber heap_blocks;
6858 : double reltuples;
6859 : double allvisfrac;
6860 :
6861 : /*
6862 : * We don't allow performing parallel operation in standalone backend or
6863 : * when parallelism is disabled.
6864 : */
6865 36134 : if (!IsUnderPostmaster || max_parallel_maintenance_workers == 0)
6866 490 : return 0;
6867 :
6868 : /* Set up largely-dummy planner state */
6869 35644 : query = makeNode(Query);
6870 35644 : query->commandType = CMD_SELECT;
6871 :
6872 35644 : glob = makeNode(PlannerGlobal);
6873 :
6874 35644 : root = makeNode(PlannerInfo);
6875 35644 : root->parse = query;
6876 35644 : root->glob = glob;
6877 35644 : root->query_level = 1;
6878 35644 : root->planner_cxt = CurrentMemoryContext;
6879 35644 : root->wt_param_id = -1;
6880 35644 : root->join_domains = list_make1(makeNode(JoinDomain));
6881 :
6882 : /*
6883 : * Build a minimal RTE.
6884 : *
6885 : * Mark the RTE with inh = true. This is a kludge to prevent
6886 : * get_relation_info() from fetching index info, which is necessary
6887 : * because it does not expect that any IndexOptInfo is currently
6888 : * undergoing REINDEX.
6889 : */
6890 35644 : rte = makeNode(RangeTblEntry);
6891 35644 : rte->rtekind = RTE_RELATION;
6892 35644 : rte->relid = tableOid;
6893 35644 : rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6894 35644 : rte->rellockmode = AccessShareLock;
6895 35644 : rte->lateral = false;
6896 35644 : rte->inh = true;
6897 35644 : rte->inFromCl = true;
6898 35644 : query->rtable = list_make1(rte);
6899 35644 : addRTEPermissionInfo(&query->rteperminfos, rte);
6900 :
6901 : /* Set up RTE/RelOptInfo arrays */
6902 35644 : setup_simple_rel_arrays(root);
6903 :
6904 : /* Build RelOptInfo */
6905 35644 : rel = build_simple_rel(root, 1, NULL);
6906 :
6907 : /* Rels are assumed already locked by the caller */
6908 35644 : heap = table_open(tableOid, NoLock);
6909 35644 : index = index_open(indexOid, NoLock);
6910 :
6911 : /*
6912 : * Determine if it's safe to proceed.
6913 : *
6914 : * Currently, parallel workers can't access the leader's temporary tables.
6915 : * Furthermore, any index predicate or index expressions must be parallel
6916 : * safe.
6917 : */
6918 35644 : if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
6919 33622 : !is_parallel_safe(root, (Node *) RelationGetIndexExpressions(index)) ||
6920 33502 : !is_parallel_safe(root, (Node *) RelationGetIndexPredicate(index)))
6921 : {
6922 2142 : parallel_workers = 0;
6923 2142 : goto done;
6924 : }
6925 :
6926 : /*
6927 : * If parallel_workers storage parameter is set for the table, accept that
6928 : * as the number of parallel worker processes to launch (though still cap
6929 : * at max_parallel_maintenance_workers). Note that we deliberately do not
6930 : * consider any other factor when parallel_workers is set. (e.g., memory
6931 : * use by workers.)
6932 : */
6933 33502 : if (rel->rel_parallel_workers != -1)
6934 : {
6935 18 : parallel_workers = Min(rel->rel_parallel_workers,
6936 : max_parallel_maintenance_workers);
6937 18 : goto done;
6938 : }
6939 :
6940 : /*
6941 : * Estimate heap relation size ourselves, since rel->pages cannot be
6942 : * trusted (heap RTE was marked as inheritance parent)
6943 : */
6944 33484 : estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
6945 :
6946 : /*
6947 : * Determine number of workers to scan the heap relation using generic
6948 : * model
6949 : */
6950 33484 : parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
6951 : max_parallel_maintenance_workers);
6952 :
6953 : /*
6954 : * Cap workers based on available maintenance_work_mem as needed.
6955 : *
6956 : * Note that each tuplesort participant receives an even share of the
6957 : * total maintenance_work_mem budget. Aim to leave participants
6958 : * (including the leader as a participant) with no less than 32MB of
6959 : * memory. This leaves cases where maintenance_work_mem is set to 64MB
6960 : * immediately past the threshold of being capable of launching a single
6961 : * parallel worker to sort.
6962 : */
6963 33640 : while (parallel_workers > 0 &&
6964 314 : maintenance_work_mem / (parallel_workers + 1) < 32 * 1024)
6965 156 : parallel_workers--;
6966 :
6967 33484 : done:
6968 35644 : index_close(index, NoLock);
6969 35644 : table_close(heap, NoLock);
6970 :
6971 35644 : return parallel_workers;
6972 : }
6973 :
6974 : /*
6975 : * add_paths_to_grouping_rel
6976 : *
6977 : * Add non-partial paths to grouping relation.
6978 : */
6979 : static void
6980 39188 : add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
6981 : RelOptInfo *grouped_rel,
6982 : RelOptInfo *partially_grouped_rel,
6983 : const AggClauseCosts *agg_costs,
6984 : grouping_sets_data *gd, double dNumGroups,
6985 : GroupPathExtraData *extra)
6986 : {
6987 39188 : Query *parse = root->parse;
6988 39188 : Path *cheapest_path = input_rel->cheapest_total_path;
6989 : ListCell *lc;
6990 39188 : bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
6991 39188 : bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
6992 39188 : List *havingQual = (List *) extra->havingQual;
6993 39188 : AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
6994 :
6995 39188 : if (can_sort)
6996 : {
6997 : /*
6998 : * Use any available suitably-sorted path as input, and also consider
6999 : * sorting the cheapest-total path and incremental sort on any paths
7000 : * with presorted keys.
7001 : */
7002 81040 : foreach(lc, input_rel->pathlist)
7003 : {
7004 : ListCell *lc2;
7005 41858 : Path *path = (Path *) lfirst(lc);
7006 41858 : Path *path_save = path;
7007 41858 : List *pathkey_orderings = NIL;
7008 :
7009 : /* generate alternative group orderings that might be useful */
7010 41858 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7011 :
7012 : Assert(list_length(pathkey_orderings) > 0);
7013 :
7014 83860 : foreach(lc2, pathkey_orderings)
7015 : {
7016 42002 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7017 :
7018 : /* restore the path (we replace it in the loop) */
7019 42002 : path = path_save;
7020 :
7021 42002 : path = make_ordered_path(root,
7022 : grouped_rel,
7023 : path,
7024 : cheapest_path,
7025 : info->pathkeys,
7026 : -1.0);
7027 42002 : if (path == NULL)
7028 368 : continue;
7029 :
7030 : /* Now decide what to stick atop it */
7031 41634 : if (parse->groupingSets)
7032 : {
7033 938 : consider_groupingsets_paths(root, grouped_rel,
7034 : path, true, can_hash,
7035 : gd, agg_costs, dNumGroups);
7036 : }
7037 40696 : else if (parse->hasAggs)
7038 : {
7039 : /*
7040 : * We have aggregation, possibly with plain GROUP BY. Make
7041 : * an AggPath.
7042 : */
7043 39924 : add_path(grouped_rel, (Path *)
7044 39924 : create_agg_path(root,
7045 : grouped_rel,
7046 : path,
7047 39924 : grouped_rel->reltarget,
7048 39924 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7049 : AGGSPLIT_SIMPLE,
7050 : info->clauses,
7051 : havingQual,
7052 : agg_costs,
7053 : dNumGroups));
7054 : }
7055 772 : else if (parse->groupClause)
7056 : {
7057 : /*
7058 : * We have GROUP BY without aggregation or grouping sets.
7059 : * Make a GroupPath.
7060 : */
7061 772 : add_path(grouped_rel, (Path *)
7062 772 : create_group_path(root,
7063 : grouped_rel,
7064 : path,
7065 : info->clauses,
7066 : havingQual,
7067 : dNumGroups));
7068 : }
7069 : else
7070 : {
7071 : /* Other cases should have been handled above */
7072 : Assert(false);
7073 : }
7074 : }
7075 : }
7076 :
7077 : /*
7078 : * Instead of operating directly on the input relation, we can
7079 : * consider finalizing a partially aggregated path.
7080 : */
7081 39182 : if (partially_grouped_rel != NULL)
7082 : {
7083 3990 : foreach(lc, partially_grouped_rel->pathlist)
7084 : {
7085 : ListCell *lc2;
7086 2410 : Path *path = (Path *) lfirst(lc);
7087 2410 : Path *path_save = path;
7088 2410 : List *pathkey_orderings = NIL;
7089 :
7090 : /* generate alternative group orderings that might be useful */
7091 2410 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7092 :
7093 : Assert(list_length(pathkey_orderings) > 0);
7094 :
7095 : /* process all potentially interesting grouping reorderings */
7096 4820 : foreach(lc2, pathkey_orderings)
7097 : {
7098 2410 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7099 :
7100 : /* restore the path (we replace it in the loop) */
7101 2410 : path = path_save;
7102 :
7103 2410 : path = make_ordered_path(root,
7104 : grouped_rel,
7105 : path,
7106 2410 : partially_grouped_rel->cheapest_total_path,
7107 : info->pathkeys,
7108 : -1.0);
7109 :
7110 2410 : if (path == NULL)
7111 108 : continue;
7112 :
7113 2302 : if (parse->hasAggs)
7114 2054 : add_path(grouped_rel, (Path *)
7115 2054 : create_agg_path(root,
7116 : grouped_rel,
7117 : path,
7118 2054 : grouped_rel->reltarget,
7119 2054 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7120 : AGGSPLIT_FINAL_DESERIAL,
7121 : info->clauses,
7122 : havingQual,
7123 : agg_final_costs,
7124 : dNumGroups));
7125 : else
7126 248 : add_path(grouped_rel, (Path *)
7127 248 : create_group_path(root,
7128 : grouped_rel,
7129 : path,
7130 : info->clauses,
7131 : havingQual,
7132 : dNumGroups));
7133 :
7134 : }
7135 : }
7136 : }
7137 : }
7138 :
7139 39188 : if (can_hash)
7140 : {
7141 4980 : if (parse->groupingSets)
7142 : {
7143 : /*
7144 : * Try for a hash-only groupingsets path over unsorted input.
7145 : */
7146 794 : consider_groupingsets_paths(root, grouped_rel,
7147 : cheapest_path, false, true,
7148 : gd, agg_costs, dNumGroups);
7149 : }
7150 : else
7151 : {
7152 : /*
7153 : * Generate a HashAgg Path. We just need an Agg over the
7154 : * cheapest-total input path, since input order won't matter.
7155 : */
7156 4186 : add_path(grouped_rel, (Path *)
7157 4186 : create_agg_path(root, grouped_rel,
7158 : cheapest_path,
7159 4186 : grouped_rel->reltarget,
7160 : AGG_HASHED,
7161 : AGGSPLIT_SIMPLE,
7162 : root->processed_groupClause,
7163 : havingQual,
7164 : agg_costs,
7165 : dNumGroups));
7166 : }
7167 :
7168 : /*
7169 : * Generate a Finalize HashAgg Path atop of the cheapest partially
7170 : * grouped path, assuming there is one
7171 : */
7172 4980 : if (partially_grouped_rel && partially_grouped_rel->pathlist)
7173 : {
7174 784 : Path *path = partially_grouped_rel->cheapest_total_path;
7175 :
7176 784 : add_path(grouped_rel, (Path *)
7177 784 : create_agg_path(root,
7178 : grouped_rel,
7179 : path,
7180 784 : grouped_rel->reltarget,
7181 : AGG_HASHED,
7182 : AGGSPLIT_FINAL_DESERIAL,
7183 : root->processed_groupClause,
7184 : havingQual,
7185 : agg_final_costs,
7186 : dNumGroups));
7187 : }
7188 : }
7189 :
7190 : /*
7191 : * When partitionwise aggregate is used, we might have fully aggregated
7192 : * paths in the partial pathlist, because add_paths_to_append_rel() will
7193 : * consider a path for grouped_rel consisting of a Parallel Append of
7194 : * non-partial paths from each child.
7195 : */
7196 39188 : if (grouped_rel->partial_pathlist != NIL)
7197 162 : gather_grouping_paths(root, grouped_rel);
7198 39188 : }
7199 :
7200 : /*
7201 : * create_partial_grouping_paths
7202 : *
7203 : * Create a new upper relation representing the result of partial aggregation
7204 : * and populate it with appropriate paths. Note that we don't finalize the
7205 : * lists of paths here, so the caller can add additional partial or non-partial
7206 : * paths and must afterward call gather_grouping_paths and set_cheapest on
7207 : * the returned upper relation.
7208 : *
7209 : * All paths for this new upper relation -- both partial and non-partial --
7210 : * have been partially aggregated but require a subsequent FinalizeAggregate
7211 : * step.
7212 : *
7213 : * NB: This function is allowed to return NULL if it determines that there is
7214 : * no real need to create a new RelOptInfo.
7215 : */
7216 : static RelOptInfo *
7217 35168 : create_partial_grouping_paths(PlannerInfo *root,
7218 : RelOptInfo *grouped_rel,
7219 : RelOptInfo *input_rel,
7220 : grouping_sets_data *gd,
7221 : GroupPathExtraData *extra,
7222 : bool force_rel_creation)
7223 : {
7224 35168 : Query *parse = root->parse;
7225 : RelOptInfo *partially_grouped_rel;
7226 35168 : AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7227 35168 : AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7228 35168 : Path *cheapest_partial_path = NULL;
7229 35168 : Path *cheapest_total_path = NULL;
7230 35168 : double dNumPartialGroups = 0;
7231 35168 : double dNumPartialPartialGroups = 0;
7232 : ListCell *lc;
7233 35168 : bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7234 35168 : bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7235 :
7236 : /*
7237 : * Consider whether we should generate partially aggregated non-partial
7238 : * paths. We can only do this if we have a non-partial path, and only if
7239 : * the parent of the input rel is performing partial partitionwise
7240 : * aggregation. (Note that extra->patype is the type of partitionwise
7241 : * aggregation being used at the parent level, not this level.)
7242 : */
7243 35168 : if (input_rel->pathlist != NIL &&
7244 35168 : extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
7245 618 : cheapest_total_path = input_rel->cheapest_total_path;
7246 :
7247 : /*
7248 : * If parallelism is possible for grouped_rel, then we should consider
7249 : * generating partially-grouped partial paths. However, if the input rel
7250 : * has no partial paths, then we can't.
7251 : */
7252 35168 : if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7253 1782 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
7254 :
7255 : /*
7256 : * If we can't partially aggregate partial paths, and we can't partially
7257 : * aggregate non-partial paths, then don't bother creating the new
7258 : * RelOptInfo at all, unless the caller specified force_rel_creation.
7259 : */
7260 35168 : if (cheapest_total_path == NULL &&
7261 33068 : cheapest_partial_path == NULL &&
7262 33068 : !force_rel_creation)
7263 32970 : return NULL;
7264 :
7265 : /*
7266 : * Build a new upper relation to represent the result of partially
7267 : * aggregating the rows from the input relation.
7268 : */
7269 2198 : partially_grouped_rel = fetch_upper_rel(root,
7270 : UPPERREL_PARTIAL_GROUP_AGG,
7271 : grouped_rel->relids);
7272 2198 : partially_grouped_rel->consider_parallel =
7273 2198 : grouped_rel->consider_parallel;
7274 2198 : partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7275 2198 : partially_grouped_rel->serverid = grouped_rel->serverid;
7276 2198 : partially_grouped_rel->userid = grouped_rel->userid;
7277 2198 : partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7278 2198 : partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7279 :
7280 : /*
7281 : * Build target list for partial aggregate paths. These paths cannot just
7282 : * emit the same tlist as regular aggregate paths, because (1) we must
7283 : * include Vars and Aggrefs needed in HAVING, which might not appear in
7284 : * the result tlist, and (2) the Aggrefs must be set in partial mode.
7285 : */
7286 2198 : partially_grouped_rel->reltarget =
7287 2198 : make_partial_grouping_target(root, grouped_rel->reltarget,
7288 : extra->havingQual);
7289 :
7290 2198 : if (!extra->partial_costs_set)
7291 : {
7292 : /*
7293 : * Collect statistics about aggregates for estimating costs of
7294 : * performing aggregation in parallel.
7295 : */
7296 7752 : MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7297 7752 : MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7298 1292 : if (parse->hasAggs)
7299 : {
7300 : /* partial phase */
7301 1158 : get_agg_clause_costs(root, AGGSPLIT_INITIAL_SERIAL,
7302 : agg_partial_costs);
7303 :
7304 : /* final phase */
7305 1158 : get_agg_clause_costs(root, AGGSPLIT_FINAL_DESERIAL,
7306 : agg_final_costs);
7307 : }
7308 :
7309 1292 : extra->partial_costs_set = true;
7310 : }
7311 :
7312 : /* Estimate number of partial groups. */
7313 2198 : if (cheapest_total_path != NULL)
7314 : dNumPartialGroups =
7315 618 : get_number_of_groups(root,
7316 : cheapest_total_path->rows,
7317 : gd,
7318 : extra->targetList);
7319 2198 : if (cheapest_partial_path != NULL)
7320 : dNumPartialPartialGroups =
7321 1782 : get_number_of_groups(root,
7322 : cheapest_partial_path->rows,
7323 : gd,
7324 : extra->targetList);
7325 :
7326 2198 : if (can_sort && cheapest_total_path != NULL)
7327 : {
7328 : /* This should have been checked previously */
7329 : Assert(parse->hasAggs || parse->groupClause);
7330 :
7331 : /*
7332 : * Use any available suitably-sorted path as input, and also consider
7333 : * sorting the cheapest partial path.
7334 : */
7335 1236 : foreach(lc, input_rel->pathlist)
7336 : {
7337 : ListCell *lc2;
7338 618 : Path *path = (Path *) lfirst(lc);
7339 618 : Path *path_save = path;
7340 618 : List *pathkey_orderings = NIL;
7341 :
7342 : /* generate alternative group orderings that might be useful */
7343 618 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7344 :
7345 : Assert(list_length(pathkey_orderings) > 0);
7346 :
7347 : /* process all potentially interesting grouping reorderings */
7348 1236 : foreach(lc2, pathkey_orderings)
7349 : {
7350 618 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7351 :
7352 : /* restore the path (we replace it in the loop) */
7353 618 : path = path_save;
7354 :
7355 618 : path = make_ordered_path(root,
7356 : partially_grouped_rel,
7357 : path,
7358 : cheapest_total_path,
7359 : info->pathkeys,
7360 : -1.0);
7361 :
7362 618 : if (path == NULL)
7363 0 : continue;
7364 :
7365 618 : if (parse->hasAggs)
7366 546 : add_path(partially_grouped_rel, (Path *)
7367 546 : create_agg_path(root,
7368 : partially_grouped_rel,
7369 : path,
7370 546 : partially_grouped_rel->reltarget,
7371 546 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7372 : AGGSPLIT_INITIAL_SERIAL,
7373 : info->clauses,
7374 : NIL,
7375 : agg_partial_costs,
7376 : dNumPartialGroups));
7377 : else
7378 72 : add_path(partially_grouped_rel, (Path *)
7379 72 : create_group_path(root,
7380 : partially_grouped_rel,
7381 : path,
7382 : info->clauses,
7383 : NIL,
7384 : dNumPartialGroups));
7385 : }
7386 : }
7387 : }
7388 :
7389 2198 : if (can_sort && cheapest_partial_path != NULL)
7390 : {
7391 : /* Similar to above logic, but for partial paths. */
7392 3576 : foreach(lc, input_rel->partial_pathlist)
7393 : {
7394 : ListCell *lc2;
7395 1794 : Path *path = (Path *) lfirst(lc);
7396 1794 : Path *path_save = path;
7397 1794 : List *pathkey_orderings = NIL;
7398 :
7399 : /* generate alternative group orderings that might be useful */
7400 1794 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7401 :
7402 : Assert(list_length(pathkey_orderings) > 0);
7403 :
7404 : /* process all potentially interesting grouping reorderings */
7405 3588 : foreach(lc2, pathkey_orderings)
7406 : {
7407 1794 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7408 :
7409 :
7410 : /* restore the path (we replace it in the loop) */
7411 1794 : path = path_save;
7412 :
7413 1794 : path = make_ordered_path(root,
7414 : partially_grouped_rel,
7415 : path,
7416 : cheapest_partial_path,
7417 : info->pathkeys,
7418 : -1.0);
7419 :
7420 1794 : if (path == NULL)
7421 6 : continue;
7422 :
7423 1788 : if (parse->hasAggs)
7424 1666 : add_partial_path(partially_grouped_rel, (Path *)
7425 1666 : create_agg_path(root,
7426 : partially_grouped_rel,
7427 : path,
7428 1666 : partially_grouped_rel->reltarget,
7429 1666 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7430 : AGGSPLIT_INITIAL_SERIAL,
7431 : info->clauses,
7432 : NIL,
7433 : agg_partial_costs,
7434 : dNumPartialPartialGroups));
7435 : else
7436 122 : add_partial_path(partially_grouped_rel, (Path *)
7437 122 : create_group_path(root,
7438 : partially_grouped_rel,
7439 : path,
7440 : info->clauses,
7441 : NIL,
7442 : dNumPartialPartialGroups));
7443 : }
7444 : }
7445 : }
7446 :
7447 : /*
7448 : * Add a partially-grouped HashAgg Path where possible
7449 : */
7450 2198 : if (can_hash && cheapest_total_path != NULL)
7451 : {
7452 : /* Checked above */
7453 : Assert(parse->hasAggs || parse->groupClause);
7454 :
7455 618 : add_path(partially_grouped_rel, (Path *)
7456 618 : create_agg_path(root,
7457 : partially_grouped_rel,
7458 : cheapest_total_path,
7459 618 : partially_grouped_rel->reltarget,
7460 : AGG_HASHED,
7461 : AGGSPLIT_INITIAL_SERIAL,
7462 : root->processed_groupClause,
7463 : NIL,
7464 : agg_partial_costs,
7465 : dNumPartialGroups));
7466 : }
7467 :
7468 : /*
7469 : * Now add a partially-grouped HashAgg partial Path where possible
7470 : */
7471 2198 : if (can_hash && cheapest_partial_path != NULL)
7472 : {
7473 986 : add_partial_path(partially_grouped_rel, (Path *)
7474 986 : create_agg_path(root,
7475 : partially_grouped_rel,
7476 : cheapest_partial_path,
7477 986 : partially_grouped_rel->reltarget,
7478 : AGG_HASHED,
7479 : AGGSPLIT_INITIAL_SERIAL,
7480 : root->processed_groupClause,
7481 : NIL,
7482 : agg_partial_costs,
7483 : dNumPartialPartialGroups));
7484 : }
7485 :
7486 : /*
7487 : * If there is an FDW that's responsible for all baserels of the query,
7488 : * let it consider adding partially grouped ForeignPaths.
7489 : */
7490 2198 : if (partially_grouped_rel->fdwroutine &&
7491 6 : partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7492 : {
7493 6 : FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7494 :
7495 6 : fdwroutine->GetForeignUpperPaths(root,
7496 : UPPERREL_PARTIAL_GROUP_AGG,
7497 : input_rel, partially_grouped_rel,
7498 : extra);
7499 : }
7500 :
7501 2198 : return partially_grouped_rel;
7502 : }
7503 :
7504 : /*
7505 : * make_ordered_path
7506 : * Return a path ordered by 'pathkeys' based on the given 'path'. May
7507 : * return NULL if it doesn't make sense to generate an ordered path in
7508 : * this case.
7509 : */
7510 : static Path *
7511 51602 : make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path,
7512 : Path *cheapest_path, List *pathkeys, double limit_tuples)
7513 : {
7514 : bool is_sorted;
7515 : int presorted_keys;
7516 :
7517 51602 : is_sorted = pathkeys_count_contained_in(pathkeys,
7518 : path->pathkeys,
7519 : &presorted_keys);
7520 :
7521 51602 : if (!is_sorted)
7522 : {
7523 : /*
7524 : * Try at least sorting the cheapest path and also try incrementally
7525 : * sorting any path which is partially sorted already (no need to deal
7526 : * with paths which have presorted keys when incremental sort is
7527 : * disabled unless it's the cheapest input path).
7528 : */
7529 12748 : if (path != cheapest_path &&
7530 2028 : (presorted_keys == 0 || !enable_incremental_sort))
7531 1036 : return NULL;
7532 :
7533 : /*
7534 : * We've no need to consider both a sort and incremental sort. We'll
7535 : * just do a sort if there are no presorted keys and an incremental
7536 : * sort when there are presorted keys.
7537 : */
7538 11712 : if (presorted_keys == 0 || !enable_incremental_sort)
7539 10552 : path = (Path *) create_sort_path(root,
7540 : rel,
7541 : path,
7542 : pathkeys,
7543 : limit_tuples);
7544 : else
7545 1160 : path = (Path *) create_incremental_sort_path(root,
7546 : rel,
7547 : path,
7548 : pathkeys,
7549 : presorted_keys,
7550 : limit_tuples);
7551 : }
7552 :
7553 50566 : return path;
7554 : }
7555 :
7556 : /*
7557 : * Generate Gather and Gather Merge paths for a grouping relation or partial
7558 : * grouping relation.
7559 : *
7560 : * generate_useful_gather_paths does most of the work, but we also consider a
7561 : * special case: we could try sorting the data by the group_pathkeys and then
7562 : * applying Gather Merge.
7563 : *
7564 : * NB: This function shouldn't be used for anything other than a grouped or
7565 : * partially grouped relation not only because of the fact that it explicitly
7566 : * references group_pathkeys but we pass "true" as the third argument to
7567 : * generate_useful_gather_paths().
7568 : */
7569 : static void
7570 1644 : gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
7571 : {
7572 : ListCell *lc;
7573 : Path *cheapest_partial_path;
7574 : List *groupby_pathkeys;
7575 :
7576 : /*
7577 : * This occurs after any partial aggregation has taken place, so trim off
7578 : * any pathkeys added for ORDER BY / DISTINCT aggregates.
7579 : */
7580 1644 : if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7581 18 : groupby_pathkeys = list_copy_head(root->group_pathkeys,
7582 : root->num_groupby_pathkeys);
7583 : else
7584 1626 : groupby_pathkeys = root->group_pathkeys;
7585 :
7586 : /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7587 1644 : generate_useful_gather_paths(root, rel, true);
7588 :
7589 1644 : cheapest_partial_path = linitial(rel->partial_pathlist);
7590 :
7591 : /* XXX Shouldn't this also consider the group-key-reordering? */
7592 3894 : foreach(lc, rel->partial_pathlist)
7593 : {
7594 2250 : Path *path = (Path *) lfirst(lc);
7595 : bool is_sorted;
7596 : int presorted_keys;
7597 : double total_groups;
7598 :
7599 2250 : is_sorted = pathkeys_count_contained_in(groupby_pathkeys,
7600 : path->pathkeys,
7601 : &presorted_keys);
7602 :
7603 2250 : if (is_sorted)
7604 1470 : continue;
7605 :
7606 : /*
7607 : * Try at least sorting the cheapest path and also try incrementally
7608 : * sorting any path which is partially sorted already (no need to deal
7609 : * with paths which have presorted keys when incremental sort is
7610 : * disabled unless it's the cheapest input path).
7611 : */
7612 780 : if (path != cheapest_partial_path &&
7613 0 : (presorted_keys == 0 || !enable_incremental_sort))
7614 0 : continue;
7615 :
7616 : /*
7617 : * We've no need to consider both a sort and incremental sort. We'll
7618 : * just do a sort if there are no presorted keys and an incremental
7619 : * sort when there are presorted keys.
7620 : */
7621 780 : if (presorted_keys == 0 || !enable_incremental_sort)
7622 780 : path = (Path *) create_sort_path(root, rel, path,
7623 : groupby_pathkeys,
7624 : -1.0);
7625 : else
7626 0 : path = (Path *) create_incremental_sort_path(root,
7627 : rel,
7628 : path,
7629 : groupby_pathkeys,
7630 : presorted_keys,
7631 : -1.0);
7632 780 : total_groups = compute_gather_rows(path);
7633 : path = (Path *)
7634 780 : create_gather_merge_path(root,
7635 : rel,
7636 : path,
7637 780 : rel->reltarget,
7638 : groupby_pathkeys,
7639 : NULL,
7640 : &total_groups);
7641 :
7642 780 : add_path(rel, path);
7643 : }
7644 1644 : }
7645 :
7646 : /*
7647 : * can_partial_agg
7648 : *
7649 : * Determines whether or not partial grouping and/or aggregation is possible.
7650 : * Returns true when possible, false otherwise.
7651 : */
7652 : static bool
7653 38312 : can_partial_agg(PlannerInfo *root)
7654 : {
7655 38312 : Query *parse = root->parse;
7656 :
7657 38312 : if (!parse->hasAggs && parse->groupClause == NIL)
7658 : {
7659 : /*
7660 : * We don't know how to do parallel aggregation unless we have either
7661 : * some aggregates or a grouping clause.
7662 : */
7663 0 : return false;
7664 : }
7665 38312 : else if (parse->groupingSets)
7666 : {
7667 : /* We don't know how to do grouping sets in parallel. */
7668 872 : return false;
7669 : }
7670 37440 : else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7671 : {
7672 : /* Insufficient support for partial mode. */
7673 3694 : return false;
7674 : }
7675 :
7676 : /* Everything looks good. */
7677 33746 : return true;
7678 : }
7679 :
7680 : /*
7681 : * apply_scanjoin_target_to_paths
7682 : *
7683 : * Adjust the final scan/join relation, and recursively all of its children,
7684 : * to generate the final scan/join target. It would be more correct to model
7685 : * this as a separate planning step with a new RelOptInfo at the toplevel and
7686 : * for each child relation, but doing it this way is noticeably cheaper.
7687 : * Maybe that problem can be solved at some point, but for now we do this.
7688 : *
7689 : * If tlist_same_exprs is true, then the scan/join target to be applied has
7690 : * the same expressions as the existing reltarget, so we need only insert the
7691 : * appropriate sortgroupref information. By avoiding the creation of
7692 : * projection paths we save effort both immediately and at plan creation time.
7693 : */
7694 : static void
7695 555014 : apply_scanjoin_target_to_paths(PlannerInfo *root,
7696 : RelOptInfo *rel,
7697 : List *scanjoin_targets,
7698 : List *scanjoin_targets_contain_srfs,
7699 : bool scanjoin_target_parallel_safe,
7700 : bool tlist_same_exprs)
7701 : {
7702 555014 : bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7703 : PathTarget *scanjoin_target;
7704 : ListCell *lc;
7705 :
7706 : /* This recurses, so be paranoid. */
7707 555014 : check_stack_depth();
7708 :
7709 : /*
7710 : * If the rel is partitioned, we want to drop its existing paths and
7711 : * generate new ones. This function would still be correct if we kept the
7712 : * existing paths: we'd modify them to generate the correct target above
7713 : * the partitioning Append, and then they'd compete on cost with paths
7714 : * generating the target below the Append. However, in our current cost
7715 : * model the latter way is always the same or cheaper cost, so modifying
7716 : * the existing paths would just be useless work. Moreover, when the cost
7717 : * is the same, varying roundoff errors might sometimes allow an existing
7718 : * path to be picked, resulting in undesirable cross-platform plan
7719 : * variations. So we drop old paths and thereby force the work to be done
7720 : * below the Append, except in the case of a non-parallel-safe target.
7721 : *
7722 : * Some care is needed, because we have to allow
7723 : * generate_useful_gather_paths to see the old partial paths in the next
7724 : * stanza. Hence, zap the main pathlist here, then allow
7725 : * generate_useful_gather_paths to add path(s) to the main list, and
7726 : * finally zap the partial pathlist.
7727 : */
7728 555014 : if (rel_is_partitioned)
7729 12440 : rel->pathlist = NIL;
7730 :
7731 : /*
7732 : * If the scan/join target is not parallel-safe, partial paths cannot
7733 : * generate it.
7734 : */
7735 555014 : if (!scanjoin_target_parallel_safe)
7736 : {
7737 : /*
7738 : * Since we can't generate the final scan/join target in parallel
7739 : * workers, this is our last opportunity to use any partial paths that
7740 : * exist; so build Gather path(s) that use them and emit whatever the
7741 : * current reltarget is. We don't do this in the case where the
7742 : * target is parallel-safe, since we will be able to generate superior
7743 : * paths by doing it after the final scan/join target has been
7744 : * applied.
7745 : */
7746 85112 : generate_useful_gather_paths(root, rel, false);
7747 :
7748 : /* Can't use parallel query above this level. */
7749 85112 : rel->partial_pathlist = NIL;
7750 85112 : rel->consider_parallel = false;
7751 : }
7752 :
7753 : /* Finish dropping old paths for a partitioned rel, per comment above */
7754 555014 : if (rel_is_partitioned)
7755 12440 : rel->partial_pathlist = NIL;
7756 :
7757 : /* Extract SRF-free scan/join target. */
7758 555014 : scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
7759 :
7760 : /*
7761 : * Apply the SRF-free scan/join target to each existing path.
7762 : *
7763 : * If the tlist exprs are the same, we can just inject the sortgroupref
7764 : * information into the existing pathtargets. Otherwise, replace each
7765 : * path with a projection path that generates the SRF-free scan/join
7766 : * target. This can't change the ordering of paths within rel->pathlist,
7767 : * so we just modify the list in place.
7768 : */
7769 1144604 : foreach(lc, rel->pathlist)
7770 : {
7771 589590 : Path *subpath = (Path *) lfirst(lc);
7772 :
7773 : /* Shouldn't have any parameterized paths anymore */
7774 : Assert(subpath->param_info == NULL);
7775 :
7776 589590 : if (tlist_same_exprs)
7777 203586 : subpath->pathtarget->sortgrouprefs =
7778 203586 : scanjoin_target->sortgrouprefs;
7779 : else
7780 : {
7781 : Path *newpath;
7782 :
7783 386004 : newpath = (Path *) create_projection_path(root, rel, subpath,
7784 : scanjoin_target);
7785 386004 : lfirst(lc) = newpath;
7786 : }
7787 : }
7788 :
7789 : /* Likewise adjust the targets for any partial paths. */
7790 574584 : foreach(lc, rel->partial_pathlist)
7791 : {
7792 19570 : Path *subpath = (Path *) lfirst(lc);
7793 :
7794 : /* Shouldn't have any parameterized paths anymore */
7795 : Assert(subpath->param_info == NULL);
7796 :
7797 19570 : if (tlist_same_exprs)
7798 15936 : subpath->pathtarget->sortgrouprefs =
7799 15936 : scanjoin_target->sortgrouprefs;
7800 : else
7801 : {
7802 : Path *newpath;
7803 :
7804 3634 : newpath = (Path *) create_projection_path(root, rel, subpath,
7805 : scanjoin_target);
7806 3634 : lfirst(lc) = newpath;
7807 : }
7808 : }
7809 :
7810 : /*
7811 : * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
7812 : * atop each existing path. (Note that this function doesn't look at the
7813 : * cheapest-path fields, which is a good thing because they're bogus right
7814 : * now.)
7815 : */
7816 555014 : if (root->parse->hasTargetSRFs)
7817 9066 : adjust_paths_for_srfs(root, rel,
7818 : scanjoin_targets,
7819 : scanjoin_targets_contain_srfs);
7820 :
7821 : /*
7822 : * Update the rel's target to be the final (with SRFs) scan/join target.
7823 : * This now matches the actual output of all the paths, and we might get
7824 : * confused in createplan.c if they don't agree. We must do this now so
7825 : * that any append paths made in the next part will use the correct
7826 : * pathtarget (cf. create_append_path).
7827 : *
7828 : * Note that this is also necessary if GetForeignUpperPaths() gets called
7829 : * on the final scan/join relation or on any of its children, since the
7830 : * FDW might look at the rel's target to create ForeignPaths.
7831 : */
7832 555014 : rel->reltarget = llast_node(PathTarget, scanjoin_targets);
7833 :
7834 : /*
7835 : * If the relation is partitioned, recursively apply the scan/join target
7836 : * to all partitions, and generate brand-new Append paths in which the
7837 : * scan/join target is computed below the Append rather than above it.
7838 : * Since Append is not projection-capable, that might save a separate
7839 : * Result node, and it also is important for partitionwise aggregate.
7840 : */
7841 555014 : if (rel_is_partitioned)
7842 : {
7843 12440 : List *live_children = NIL;
7844 : int i;
7845 :
7846 : /* Adjust each partition. */
7847 12440 : i = -1;
7848 35170 : while ((i = bms_next_member(rel->live_parts, i)) >= 0)
7849 : {
7850 22730 : RelOptInfo *child_rel = rel->part_rels[i];
7851 : AppendRelInfo **appinfos;
7852 : int nappinfos;
7853 22730 : List *child_scanjoin_targets = NIL;
7854 :
7855 : Assert(child_rel != NULL);
7856 :
7857 : /* Dummy children can be ignored. */
7858 22730 : if (IS_DUMMY_REL(child_rel))
7859 42 : continue;
7860 :
7861 : /* Translate scan/join targets for this child. */
7862 22688 : appinfos = find_appinfos_by_relids(root, child_rel->relids,
7863 : &nappinfos);
7864 45376 : foreach(lc, scanjoin_targets)
7865 : {
7866 22688 : PathTarget *target = lfirst_node(PathTarget, lc);
7867 :
7868 22688 : target = copy_pathtarget(target);
7869 22688 : target->exprs = (List *)
7870 22688 : adjust_appendrel_attrs(root,
7871 22688 : (Node *) target->exprs,
7872 : nappinfos, appinfos);
7873 22688 : child_scanjoin_targets = lappend(child_scanjoin_targets,
7874 : target);
7875 : }
7876 22688 : pfree(appinfos);
7877 :
7878 : /* Recursion does the real work. */
7879 22688 : apply_scanjoin_target_to_paths(root, child_rel,
7880 : child_scanjoin_targets,
7881 : scanjoin_targets_contain_srfs,
7882 : scanjoin_target_parallel_safe,
7883 : tlist_same_exprs);
7884 :
7885 : /* Save non-dummy children for Append paths. */
7886 22688 : if (!IS_DUMMY_REL(child_rel))
7887 22688 : live_children = lappend(live_children, child_rel);
7888 : }
7889 :
7890 : /* Build new paths for this relation by appending child paths. */
7891 12440 : add_paths_to_append_rel(root, rel, live_children);
7892 : }
7893 :
7894 : /*
7895 : * Consider generating Gather or Gather Merge paths. We must only do this
7896 : * if the relation is parallel safe, and we don't do it for child rels to
7897 : * avoid creating multiple Gather nodes within the same plan. We must do
7898 : * this after all paths have been generated and before set_cheapest, since
7899 : * one of the generated paths may turn out to be the cheapest one.
7900 : */
7901 555014 : if (rel->consider_parallel && !IS_OTHER_REL(rel))
7902 162030 : generate_useful_gather_paths(root, rel, false);
7903 :
7904 : /*
7905 : * Reassess which paths are the cheapest, now that we've potentially added
7906 : * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
7907 : * this relation.
7908 : */
7909 555014 : set_cheapest(rel);
7910 555014 : }
7911 :
7912 : /*
7913 : * create_partitionwise_grouping_paths
7914 : *
7915 : * If the partition keys of input relation are part of the GROUP BY clause, all
7916 : * the rows belonging to a given group come from a single partition. This
7917 : * allows aggregation/grouping over a partitioned relation to be broken down
7918 : * into aggregation/grouping on each partition. This should be no worse, and
7919 : * often better, than the normal approach.
7920 : *
7921 : * However, if the GROUP BY clause does not contain all the partition keys,
7922 : * rows from a given group may be spread across multiple partitions. In that
7923 : * case, we perform partial aggregation for each group, append the results,
7924 : * and then finalize aggregation. This is less certain to win than the
7925 : * previous case. It may win if the PartialAggregate stage greatly reduces
7926 : * the number of groups, because fewer rows will pass through the Append node.
7927 : * It may lose if we have lots of small groups.
7928 : */
7929 : static void
7930 562 : create_partitionwise_grouping_paths(PlannerInfo *root,
7931 : RelOptInfo *input_rel,
7932 : RelOptInfo *grouped_rel,
7933 : RelOptInfo *partially_grouped_rel,
7934 : const AggClauseCosts *agg_costs,
7935 : grouping_sets_data *gd,
7936 : PartitionwiseAggregateType patype,
7937 : GroupPathExtraData *extra)
7938 : {
7939 562 : List *grouped_live_children = NIL;
7940 562 : List *partially_grouped_live_children = NIL;
7941 562 : PathTarget *target = grouped_rel->reltarget;
7942 562 : bool partial_grouping_valid = true;
7943 : int i;
7944 :
7945 : Assert(patype != PARTITIONWISE_AGGREGATE_NONE);
7946 : Assert(patype != PARTITIONWISE_AGGREGATE_PARTIAL ||
7947 : partially_grouped_rel != NULL);
7948 :
7949 : /* Add paths for partitionwise aggregation/grouping. */
7950 562 : i = -1;
7951 2056 : while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
7952 : {
7953 1494 : RelOptInfo *child_input_rel = input_rel->part_rels[i];
7954 : PathTarget *child_target;
7955 : AppendRelInfo **appinfos;
7956 : int nappinfos;
7957 : GroupPathExtraData child_extra;
7958 : RelOptInfo *child_grouped_rel;
7959 : RelOptInfo *child_partially_grouped_rel;
7960 :
7961 : Assert(child_input_rel != NULL);
7962 :
7963 : /* Dummy children can be ignored. */
7964 1494 : if (IS_DUMMY_REL(child_input_rel))
7965 0 : continue;
7966 :
7967 1494 : child_target = copy_pathtarget(target);
7968 :
7969 : /*
7970 : * Copy the given "extra" structure as is and then override the
7971 : * members specific to this child.
7972 : */
7973 1494 : memcpy(&child_extra, extra, sizeof(child_extra));
7974 :
7975 1494 : appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
7976 : &nappinfos);
7977 :
7978 1494 : child_target->exprs = (List *)
7979 1494 : adjust_appendrel_attrs(root,
7980 1494 : (Node *) target->exprs,
7981 : nappinfos, appinfos);
7982 :
7983 : /* Translate havingQual and targetList. */
7984 1494 : child_extra.havingQual = (Node *)
7985 : adjust_appendrel_attrs(root,
7986 : extra->havingQual,
7987 : nappinfos, appinfos);
7988 1494 : child_extra.targetList = (List *)
7989 1494 : adjust_appendrel_attrs(root,
7990 1494 : (Node *) extra->targetList,
7991 : nappinfos, appinfos);
7992 :
7993 : /*
7994 : * extra->patype was the value computed for our parent rel; patype is
7995 : * the value for this relation. For the child, our value is its
7996 : * parent rel's value.
7997 : */
7998 1494 : child_extra.patype = patype;
7999 :
8000 : /*
8001 : * Create grouping relation to hold fully aggregated grouping and/or
8002 : * aggregation paths for the child.
8003 : */
8004 1494 : child_grouped_rel = make_grouping_rel(root, child_input_rel,
8005 : child_target,
8006 1494 : extra->target_parallel_safe,
8007 : child_extra.havingQual);
8008 :
8009 : /* Create grouping paths for this child relation. */
8010 1494 : create_ordinary_grouping_paths(root, child_input_rel,
8011 : child_grouped_rel,
8012 : agg_costs, gd, &child_extra,
8013 : &child_partially_grouped_rel);
8014 :
8015 1494 : if (child_partially_grouped_rel)
8016 : {
8017 : partially_grouped_live_children =
8018 906 : lappend(partially_grouped_live_children,
8019 : child_partially_grouped_rel);
8020 : }
8021 : else
8022 588 : partial_grouping_valid = false;
8023 :
8024 1494 : if (patype == PARTITIONWISE_AGGREGATE_FULL)
8025 : {
8026 876 : set_cheapest(child_grouped_rel);
8027 876 : grouped_live_children = lappend(grouped_live_children,
8028 : child_grouped_rel);
8029 : }
8030 :
8031 1494 : pfree(appinfos);
8032 : }
8033 :
8034 : /*
8035 : * Try to create append paths for partially grouped children. For full
8036 : * partitionwise aggregation, we might have paths in the partial_pathlist
8037 : * if parallel aggregation is possible. For partial partitionwise
8038 : * aggregation, we may have paths in both pathlist and partial_pathlist.
8039 : *
8040 : * NB: We must have a partially grouped path for every child in order to
8041 : * generate a partially grouped path for this relation.
8042 : */
8043 562 : if (partially_grouped_rel && partial_grouping_valid)
8044 : {
8045 : Assert(partially_grouped_live_children != NIL);
8046 :
8047 350 : add_paths_to_append_rel(root, partially_grouped_rel,
8048 : partially_grouped_live_children);
8049 :
8050 : /*
8051 : * We need call set_cheapest, since the finalization step will use the
8052 : * cheapest path from the rel.
8053 : */
8054 350 : if (partially_grouped_rel->pathlist)
8055 350 : set_cheapest(partially_grouped_rel);
8056 : }
8057 :
8058 : /* If possible, create append paths for fully grouped children. */
8059 562 : if (patype == PARTITIONWISE_AGGREGATE_FULL)
8060 : {
8061 : Assert(grouped_live_children != NIL);
8062 :
8063 320 : add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
8064 : }
8065 562 : }
8066 :
8067 : /*
8068 : * group_by_has_partkey
8069 : *
8070 : * Returns true if all the partition keys of the given relation are part of
8071 : * the GROUP BY clauses, including having matching collation, false otherwise.
8072 : */
8073 : static bool
8074 556 : group_by_has_partkey(RelOptInfo *input_rel,
8075 : List *targetList,
8076 : List *groupClause)
8077 : {
8078 556 : List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8079 556 : int cnt = 0;
8080 : int partnatts;
8081 :
8082 : /* Input relation should be partitioned. */
8083 : Assert(input_rel->part_scheme);
8084 :
8085 : /* Rule out early, if there are no partition keys present. */
8086 556 : if (!input_rel->partexprs)
8087 0 : return false;
8088 :
8089 556 : partnatts = input_rel->part_scheme->partnatts;
8090 :
8091 912 : for (cnt = 0; cnt < partnatts; cnt++)
8092 : {
8093 592 : List *partexprs = input_rel->partexprs[cnt];
8094 : ListCell *lc;
8095 592 : bool found = false;
8096 :
8097 810 : foreach(lc, partexprs)
8098 : {
8099 : ListCell *lg;
8100 586 : Expr *partexpr = lfirst(lc);
8101 586 : Oid partcoll = input_rel->part_scheme->partcollation[cnt];
8102 :
8103 924 : foreach(lg, groupexprs)
8104 : {
8105 706 : Expr *groupexpr = lfirst(lg);
8106 706 : Oid groupcoll = exprCollation((Node *) groupexpr);
8107 :
8108 : /*
8109 : * Note: we can assume there is at most one RelabelType node;
8110 : * eval_const_expressions() will have simplified if more than
8111 : * one.
8112 : */
8113 706 : if (IsA(groupexpr, RelabelType))
8114 24 : groupexpr = ((RelabelType *) groupexpr)->arg;
8115 :
8116 706 : if (equal(groupexpr, partexpr))
8117 : {
8118 : /*
8119 : * Reject a match if the grouping collation does not match
8120 : * the partitioning collation.
8121 : */
8122 368 : if (OidIsValid(partcoll) && OidIsValid(groupcoll) &&
8123 : partcoll != groupcoll)
8124 12 : return false;
8125 :
8126 356 : found = true;
8127 356 : break;
8128 : }
8129 : }
8130 :
8131 574 : if (found)
8132 356 : break;
8133 : }
8134 :
8135 : /*
8136 : * If none of the partition key expressions match with any of the
8137 : * GROUP BY expression, return false.
8138 : */
8139 580 : if (!found)
8140 224 : return false;
8141 : }
8142 :
8143 320 : return true;
8144 : }
8145 :
8146 : /*
8147 : * generate_setop_child_grouplist
8148 : * Build a SortGroupClause list defining the sort/grouping properties
8149 : * of the child of a set operation.
8150 : *
8151 : * This is similar to generate_setop_grouplist() but differs as the setop
8152 : * child query's targetlist entries may already have a tleSortGroupRef
8153 : * assigned for other purposes, such as GROUP BYs. Here we keep the
8154 : * SortGroupClause list in the same order as 'op' groupClauses and just adjust
8155 : * the tleSortGroupRef to reference the TargetEntry's 'ressortgroupref'. If
8156 : * any of the columns in the targetlist don't match to the setop's colTypes
8157 : * then we return an empty list. This may leave some TLEs with unreferenced
8158 : * ressortgroupref markings, but that's harmless.
8159 : */
8160 : static List *
8161 12122 : generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist)
8162 : {
8163 12122 : List *grouplist = copyObject(op->groupClauses);
8164 : ListCell *lg;
8165 : ListCell *lt;
8166 : ListCell *ct;
8167 :
8168 12122 : lg = list_head(grouplist);
8169 12122 : ct = list_head(op->colTypes);
8170 47222 : foreach(lt, targetlist)
8171 : {
8172 35530 : TargetEntry *tle = (TargetEntry *) lfirst(lt);
8173 : SortGroupClause *sgc;
8174 : Oid coltype;
8175 :
8176 : /* resjunk columns could have sortgrouprefs. Leave these alone */
8177 35530 : if (tle->resjunk)
8178 0 : continue;
8179 :
8180 : /*
8181 : * We expect every non-resjunk target to have a SortGroupClause and
8182 : * colTypes.
8183 : */
8184 : Assert(lg != NULL);
8185 : Assert(ct != NULL);
8186 35530 : sgc = (SortGroupClause *) lfirst(lg);
8187 35530 : coltype = lfirst_oid(ct);
8188 :
8189 : /* reject if target type isn't the same as the setop target type */
8190 35530 : if (coltype != exprType((Node *) tle->expr))
8191 430 : return NIL;
8192 :
8193 35100 : lg = lnext(grouplist, lg);
8194 35100 : ct = lnext(op->colTypes, ct);
8195 :
8196 : /* assign a tleSortGroupRef, or reuse the existing one */
8197 35100 : sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8198 : }
8199 :
8200 : Assert(lg == NULL);
8201 : Assert(ct == NULL);
8202 :
8203 11692 : return grouplist;
8204 : }
|