Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * planner.c
4 : * The query optimizer external interface.
5 : *
6 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/optimizer/plan/planner.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 :
16 : #include "postgres.h"
17 :
18 : #include <limits.h>
19 : #include <math.h>
20 :
21 : #include "access/genam.h"
22 : #include "access/parallel.h"
23 : #include "access/sysattr.h"
24 : #include "access/table.h"
25 : #include "catalog/pg_aggregate.h"
26 : #include "catalog/pg_inherits.h"
27 : #include "catalog/pg_proc.h"
28 : #include "catalog/pg_type.h"
29 : #include "executor/executor.h"
30 : #include "foreign/fdwapi.h"
31 : #include "jit/jit.h"
32 : #include "lib/bipartite_match.h"
33 : #include "lib/knapsack.h"
34 : #include "miscadmin.h"
35 : #include "nodes/makefuncs.h"
36 : #include "nodes/nodeFuncs.h"
37 : #ifdef OPTIMIZER_DEBUG
38 : #include "nodes/print.h"
39 : #endif
40 : #include "nodes/supportnodes.h"
41 : #include "optimizer/appendinfo.h"
42 : #include "optimizer/clauses.h"
43 : #include "optimizer/cost.h"
44 : #include "optimizer/optimizer.h"
45 : #include "optimizer/paramassign.h"
46 : #include "optimizer/pathnode.h"
47 : #include "optimizer/paths.h"
48 : #include "optimizer/plancat.h"
49 : #include "optimizer/planmain.h"
50 : #include "optimizer/planner.h"
51 : #include "optimizer/prep.h"
52 : #include "optimizer/subselect.h"
53 : #include "optimizer/tlist.h"
54 : #include "parser/analyze.h"
55 : #include "parser/parse_agg.h"
56 : #include "parser/parse_clause.h"
57 : #include "parser/parse_relation.h"
58 : #include "parser/parsetree.h"
59 : #include "partitioning/partdesc.h"
60 : #include "rewrite/rewriteManip.h"
61 : #include "utils/acl.h"
62 : #include "utils/backend_status.h"
63 : #include "utils/lsyscache.h"
64 : #include "utils/rel.h"
65 : #include "utils/selfuncs.h"
66 :
67 : /* GUC parameters */
68 : double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
69 : int debug_parallel_query = DEBUG_PARALLEL_OFF;
70 : bool parallel_leader_participation = true;
71 : bool enable_distinct_reordering = true;
72 :
73 : /* Hook for plugins to get control in planner() */
74 : planner_hook_type planner_hook = NULL;
75 :
76 : /* Hook for plugins to get control after PlannerGlobal is initialized */
77 : planner_setup_hook_type planner_setup_hook = NULL;
78 :
79 : /* Hook for plugins to get control before PlannerGlobal is discarded */
80 : planner_shutdown_hook_type planner_shutdown_hook = NULL;
81 :
82 : /* Hook for plugins to get control when grouping_planner() plans upper rels */
83 : create_upper_paths_hook_type create_upper_paths_hook = NULL;
84 :
85 :
86 : /* Expression kind codes for preprocess_expression */
87 : #define EXPRKIND_QUAL 0
88 : #define EXPRKIND_TARGET 1
89 : #define EXPRKIND_RTFUNC 2
90 : #define EXPRKIND_RTFUNC_LATERAL 3
91 : #define EXPRKIND_VALUES 4
92 : #define EXPRKIND_VALUES_LATERAL 5
93 : #define EXPRKIND_LIMIT 6
94 : #define EXPRKIND_APPINFO 7
95 : #define EXPRKIND_PHV 8
96 : #define EXPRKIND_TABLESAMPLE 9
97 : #define EXPRKIND_ARBITER_ELEM 10
98 : #define EXPRKIND_TABLEFUNC 11
99 : #define EXPRKIND_TABLEFUNC_LATERAL 12
100 : #define EXPRKIND_GROUPEXPR 13
101 :
102 : /*
103 : * Data specific to grouping sets
104 : */
105 : typedef struct
106 : {
107 : List *rollups;
108 : List *hash_sets_idx;
109 : double dNumHashGroups;
110 : bool any_hashable;
111 : Bitmapset *unsortable_refs;
112 : Bitmapset *unhashable_refs;
113 : List *unsortable_sets;
114 : int *tleref_to_colnum_map;
115 : } grouping_sets_data;
116 :
117 : /*
118 : * Temporary structure for use during WindowClause reordering in order to be
119 : * able to sort WindowClauses on partitioning/ordering prefix.
120 : */
121 : typedef struct
122 : {
123 : WindowClause *wc;
124 : List *uniqueOrder; /* A List of unique ordering/partitioning
125 : * clauses per Window */
126 : } WindowClauseSortData;
127 :
128 : /* Passthrough data for standard_qp_callback */
129 : typedef struct
130 : {
131 : List *activeWindows; /* active windows, if any */
132 : grouping_sets_data *gset_data; /* grouping sets data, if any */
133 : SetOperationStmt *setop; /* parent set operation or NULL if not a
134 : * subquery belonging to a set operation */
135 : } standard_qp_extra;
136 :
137 : /* Local functions */
138 : static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
139 : static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
140 : static void grouping_planner(PlannerInfo *root, double tuple_fraction,
141 : SetOperationStmt *setops);
142 : static grouping_sets_data *preprocess_grouping_sets(PlannerInfo *root);
143 : static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
144 : int *tleref_to_colnum_map);
145 : static void preprocess_rowmarks(PlannerInfo *root);
146 : static double preprocess_limit(PlannerInfo *root,
147 : double tuple_fraction,
148 : int64 *offset_est, int64 *count_est);
149 : static List *preprocess_groupclause(PlannerInfo *root, List *force);
150 : static List *extract_rollup_sets(List *groupingSets);
151 : static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
152 : static void standard_qp_callback(PlannerInfo *root, void *extra);
153 : static double get_number_of_groups(PlannerInfo *root,
154 : double path_rows,
155 : grouping_sets_data *gd,
156 : List *target_list);
157 : static RelOptInfo *create_grouping_paths(PlannerInfo *root,
158 : RelOptInfo *input_rel,
159 : PathTarget *target,
160 : bool target_parallel_safe,
161 : grouping_sets_data *gd);
162 : static bool is_degenerate_grouping(PlannerInfo *root);
163 : static void create_degenerate_grouping_paths(PlannerInfo *root,
164 : RelOptInfo *input_rel,
165 : RelOptInfo *grouped_rel);
166 : static RelOptInfo *make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
167 : PathTarget *target, bool target_parallel_safe,
168 : Node *havingQual);
169 : static void create_ordinary_grouping_paths(PlannerInfo *root,
170 : RelOptInfo *input_rel,
171 : RelOptInfo *grouped_rel,
172 : const AggClauseCosts *agg_costs,
173 : grouping_sets_data *gd,
174 : GroupPathExtraData *extra,
175 : RelOptInfo **partially_grouped_rel_p);
176 : static void consider_groupingsets_paths(PlannerInfo *root,
177 : RelOptInfo *grouped_rel,
178 : Path *path,
179 : bool is_sorted,
180 : bool can_hash,
181 : grouping_sets_data *gd,
182 : const AggClauseCosts *agg_costs,
183 : double dNumGroups);
184 : static RelOptInfo *create_window_paths(PlannerInfo *root,
185 : RelOptInfo *input_rel,
186 : PathTarget *input_target,
187 : PathTarget *output_target,
188 : bool output_target_parallel_safe,
189 : WindowFuncLists *wflists,
190 : List *activeWindows);
191 : static void create_one_window_path(PlannerInfo *root,
192 : RelOptInfo *window_rel,
193 : Path *path,
194 : PathTarget *input_target,
195 : PathTarget *output_target,
196 : WindowFuncLists *wflists,
197 : List *activeWindows);
198 : static RelOptInfo *create_distinct_paths(PlannerInfo *root,
199 : RelOptInfo *input_rel,
200 : PathTarget *target);
201 : static void create_partial_distinct_paths(PlannerInfo *root,
202 : RelOptInfo *input_rel,
203 : RelOptInfo *final_distinct_rel,
204 : PathTarget *target);
205 : static RelOptInfo *create_final_distinct_paths(PlannerInfo *root,
206 : RelOptInfo *input_rel,
207 : RelOptInfo *distinct_rel);
208 : static List *get_useful_pathkeys_for_distinct(PlannerInfo *root,
209 : List *needed_pathkeys,
210 : List *path_pathkeys);
211 : static RelOptInfo *create_ordered_paths(PlannerInfo *root,
212 : RelOptInfo *input_rel,
213 : PathTarget *target,
214 : bool target_parallel_safe,
215 : double limit_tuples);
216 : static PathTarget *make_group_input_target(PlannerInfo *root,
217 : PathTarget *final_target);
218 : static PathTarget *make_partial_grouping_target(PlannerInfo *root,
219 : PathTarget *grouping_target,
220 : Node *havingQual);
221 : static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
222 : static void optimize_window_clauses(PlannerInfo *root,
223 : WindowFuncLists *wflists);
224 : static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
225 : static void name_active_windows(List *activeWindows);
226 : static PathTarget *make_window_input_target(PlannerInfo *root,
227 : PathTarget *final_target,
228 : List *activeWindows);
229 : static List *make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
230 : List *tlist);
231 : static PathTarget *make_sort_input_target(PlannerInfo *root,
232 : PathTarget *final_target,
233 : bool *have_postponed_srfs);
234 : static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
235 : List *targets, List *targets_contain_srfs);
236 : static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
237 : RelOptInfo *grouped_rel,
238 : RelOptInfo *partially_grouped_rel,
239 : const AggClauseCosts *agg_costs,
240 : grouping_sets_data *gd,
241 : GroupPathExtraData *extra);
242 : static RelOptInfo *create_partial_grouping_paths(PlannerInfo *root,
243 : RelOptInfo *grouped_rel,
244 : RelOptInfo *input_rel,
245 : grouping_sets_data *gd,
246 : GroupPathExtraData *extra,
247 : bool force_rel_creation);
248 : static Path *make_ordered_path(PlannerInfo *root,
249 : RelOptInfo *rel,
250 : Path *path,
251 : Path *cheapest_path,
252 : List *pathkeys,
253 : double limit_tuples);
254 : static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel);
255 : static bool can_partial_agg(PlannerInfo *root);
256 : static void apply_scanjoin_target_to_paths(PlannerInfo *root,
257 : RelOptInfo *rel,
258 : List *scanjoin_targets,
259 : List *scanjoin_targets_contain_srfs,
260 : bool scanjoin_target_parallel_safe,
261 : bool tlist_same_exprs);
262 : static void create_partitionwise_grouping_paths(PlannerInfo *root,
263 : RelOptInfo *input_rel,
264 : RelOptInfo *grouped_rel,
265 : RelOptInfo *partially_grouped_rel,
266 : const AggClauseCosts *agg_costs,
267 : grouping_sets_data *gd,
268 : PartitionwiseAggregateType patype,
269 : GroupPathExtraData *extra);
270 : static bool group_by_has_partkey(RelOptInfo *input_rel,
271 : List *targetList,
272 : List *groupClause);
273 : static int common_prefix_cmp(const void *a, const void *b);
274 : static List *generate_setop_child_grouplist(SetOperationStmt *op,
275 : List *targetlist);
276 : static void create_final_unique_paths(PlannerInfo *root, RelOptInfo *input_rel,
277 : List *sortPathkeys, List *groupClause,
278 : SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel);
279 : static void create_partial_unique_paths(PlannerInfo *root, RelOptInfo *input_rel,
280 : List *sortPathkeys, List *groupClause,
281 : SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel);
282 :
283 :
284 : /*****************************************************************************
285 : *
286 : * Query optimizer entry point
287 : *
288 : * Inputs:
289 : * parse: an analyzed-and-rewritten query tree for an optimizable statement
290 : * query_string: source text for the query tree (used for error reports)
291 : * cursorOptions: bitmask of CURSOR_OPT_XXX flags, see parsenodes.h
292 : * boundParams: passed-in parameter values, or NULL if none
293 : * es: ExplainState if being called from EXPLAIN, else NULL
294 : *
295 : * The result is a PlannedStmt tree.
296 : *
297 : * PARAM_EXTERN Param nodes within the parse tree can be replaced by Consts
298 : * using values from boundParams, if those values are marked PARAM_FLAG_CONST.
299 : * Parameter values not so marked are still relied on for estimation purposes.
300 : *
301 : * The ExplainState pointer is not currently used by the core planner, but it
302 : * is passed through to some planner hooks so that they can report information
303 : * back to EXPLAIN extension hooks.
304 : *
305 : * To support loadable plugins that monitor or modify planner behavior,
306 : * we provide a hook variable that lets a plugin get control before and
307 : * after the standard planning process. The plugin would normally call
308 : * standard_planner().
309 : *
310 : * Note to plugin authors: standard_planner() scribbles on its Query input,
311 : * so you'd better copy that data structure if you want to plan more than once.
312 : *
313 : *****************************************************************************/
314 : PlannedStmt *
315 470196 : planner(Query *parse, const char *query_string, int cursorOptions,
316 : ParamListInfo boundParams, ExplainState *es)
317 : {
318 : PlannedStmt *result;
319 :
320 470196 : if (planner_hook)
321 96668 : result = (*planner_hook) (parse, query_string, cursorOptions,
322 : boundParams, es);
323 : else
324 373528 : result = standard_planner(parse, query_string, cursorOptions,
325 : boundParams, es);
326 :
327 465358 : pgstat_report_plan_id(result->planId, false);
328 :
329 465358 : return result;
330 : }
331 :
332 : PlannedStmt *
333 470196 : standard_planner(Query *parse, const char *query_string, int cursorOptions,
334 : ParamListInfo boundParams, ExplainState *es)
335 : {
336 : PlannedStmt *result;
337 : PlannerGlobal *glob;
338 : double tuple_fraction;
339 : PlannerInfo *root;
340 : RelOptInfo *final_rel;
341 : Path *best_path;
342 : Plan *top_plan;
343 : ListCell *lp,
344 : *lr;
345 :
346 : /*
347 : * Set up global state for this planner invocation. This data is needed
348 : * across all levels of sub-Query that might exist in the given command,
349 : * so we keep it in a separate struct that's linked to by each per-Query
350 : * PlannerInfo.
351 : */
352 470196 : glob = makeNode(PlannerGlobal);
353 :
354 470196 : glob->boundParams = boundParams;
355 470196 : glob->subplans = NIL;
356 470196 : glob->subpaths = NIL;
357 470196 : glob->subroots = NIL;
358 470196 : glob->rewindPlanIDs = NULL;
359 470196 : glob->finalrtable = NIL;
360 470196 : glob->allRelids = NULL;
361 470196 : glob->prunableRelids = NULL;
362 470196 : glob->finalrteperminfos = NIL;
363 470196 : glob->finalrowmarks = NIL;
364 470196 : glob->resultRelations = NIL;
365 470196 : glob->appendRelations = NIL;
366 470196 : glob->partPruneInfos = NIL;
367 470196 : glob->relationOids = NIL;
368 470196 : glob->invalItems = NIL;
369 470196 : glob->paramExecTypes = NIL;
370 470196 : glob->lastPHId = 0;
371 470196 : glob->lastRowMarkId = 0;
372 470196 : glob->lastPlanNodeId = 0;
373 470196 : glob->transientPlan = false;
374 470196 : glob->dependsOnRole = false;
375 470196 : glob->partition_directory = NULL;
376 470196 : glob->rel_notnullatts_hash = NULL;
377 :
378 : /*
379 : * Assess whether it's feasible to use parallel mode for this query. We
380 : * can't do this in a standalone backend, or if the command will try to
381 : * modify any data, or if this is a cursor operation, or if GUCs are set
382 : * to values that don't permit parallelism, or if parallel-unsafe
383 : * functions are present in the query tree.
384 : *
385 : * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
386 : * MATERIALIZED VIEW to use parallel plans, but this is safe only because
387 : * the command is writing into a completely new table which workers won't
388 : * be able to see. If the workers could see the table, the fact that
389 : * group locking would cause them to ignore the leader's heavyweight GIN
390 : * page locks would make this unsafe. We'll have to fix that somehow if
391 : * we want to allow parallel inserts in general; updates and deletes have
392 : * additional problems especially around combo CIDs.)
393 : *
394 : * For now, we don't try to use parallel mode if we're running inside a
395 : * parallel worker. We might eventually be able to relax this
396 : * restriction, but for now it seems best not to have parallel workers
397 : * trying to create their own parallel workers.
398 : */
399 470196 : if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
400 441594 : IsUnderPostmaster &&
401 441594 : parse->commandType == CMD_SELECT &&
402 352656 : !parse->hasModifyingCTE &&
403 352512 : max_parallel_workers_per_gather > 0 &&
404 351870 : !IsParallelWorker())
405 : {
406 : /* all the cheap tests pass, so scan the query tree */
407 351822 : glob->maxParallelHazard = max_parallel_hazard(parse);
408 351822 : glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE);
409 : }
410 : else
411 : {
412 : /* skip the query tree scan, just assume it's unsafe */
413 118374 : glob->maxParallelHazard = PROPARALLEL_UNSAFE;
414 118374 : glob->parallelModeOK = false;
415 : }
416 :
417 : /*
418 : * glob->parallelModeNeeded is normally set to false here and changed to
419 : * true during plan creation if a Gather or Gather Merge plan is actually
420 : * created (cf. create_gather_plan, create_gather_merge_plan).
421 : *
422 : * However, if debug_parallel_query = on or debug_parallel_query =
423 : * regress, then we impose parallel mode whenever it's safe to do so, even
424 : * if the final plan doesn't use parallelism. It's not safe to do so if
425 : * the query contains anything parallel-unsafe; parallelModeOK will be
426 : * false in that case. Note that parallelModeOK can't change after this
427 : * point. Otherwise, everything in the query is either parallel-safe or
428 : * parallel-restricted, and in either case it should be OK to impose
429 : * parallel-mode restrictions. If that ends up breaking something, then
430 : * either some function the user included in the query is incorrectly
431 : * labeled as parallel-safe or parallel-restricted when in reality it's
432 : * parallel-unsafe, or else the query planner itself has a bug.
433 : */
434 765978 : glob->parallelModeNeeded = glob->parallelModeOK &&
435 295782 : (debug_parallel_query != DEBUG_PARALLEL_OFF);
436 :
437 : /* Determine what fraction of the plan is likely to be scanned */
438 470196 : if (cursorOptions & CURSOR_OPT_FAST_PLAN)
439 : {
440 : /*
441 : * We have no real idea how many tuples the user will ultimately FETCH
442 : * from a cursor, but it is often the case that he doesn't want 'em
443 : * all, or would prefer a fast-start plan anyway so that he can
444 : * process some of the tuples sooner. Use a GUC parameter to decide
445 : * what fraction to optimize for.
446 : */
447 4712 : tuple_fraction = cursor_tuple_fraction;
448 :
449 : /*
450 : * We document cursor_tuple_fraction as simply being a fraction, which
451 : * means the edge cases 0 and 1 have to be treated specially here. We
452 : * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
453 : */
454 4712 : if (tuple_fraction >= 1.0)
455 0 : tuple_fraction = 0.0;
456 4712 : else if (tuple_fraction <= 0.0)
457 0 : tuple_fraction = 1e-10;
458 : }
459 : else
460 : {
461 : /* Default assumption is we need all the tuples */
462 465484 : tuple_fraction = 0.0;
463 : }
464 :
465 : /*
466 : * Compute the initial path generation strategy mask.
467 : *
468 : * Some strategies, such as PGS_FOREIGNJOIN, have no corresponding enable_*
469 : * GUC, and so the corresponding bits are always set in the default
470 : * strategy mask.
471 : *
472 : * It may seem surprising that enable_indexscan sets both PGS_INDEXSCAN
473 : * and PGS_INDEXONLYSCAN. However, the historical behavior of this GUC
474 : * corresponds to this exactly: enable_indexscan=off disables both
475 : * index-scan and index-only scan paths, whereas enable_indexonlyscan=off
476 : * converts the index-only scan paths that we would have considered into
477 : * index scan paths.
478 : */
479 470196 : glob->default_pgs_mask = PGS_APPEND | PGS_MERGE_APPEND | PGS_FOREIGNJOIN |
480 : PGS_GATHER | PGS_CONSIDER_NONPARTIAL;
481 470196 : if (enable_tidscan)
482 470196 : glob->default_pgs_mask |= PGS_TIDSCAN;
483 470196 : if (enable_seqscan)
484 445066 : glob->default_pgs_mask |= PGS_SEQSCAN;
485 470196 : if (enable_indexscan)
486 467518 : glob->default_pgs_mask |= PGS_INDEXSCAN | PGS_INDEXONLYSCAN;
487 470196 : if (enable_indexonlyscan)
488 468528 : glob->default_pgs_mask |= PGS_CONSIDER_INDEXONLY;
489 470196 : if (enable_bitmapscan)
490 458706 : glob->default_pgs_mask |= PGS_BITMAPSCAN;
491 470196 : if (enable_mergejoin)
492 : {
493 467928 : glob->default_pgs_mask |= PGS_MERGEJOIN_PLAIN;
494 467928 : if (enable_material)
495 467846 : glob->default_pgs_mask |= PGS_MERGEJOIN_MATERIALIZE;
496 : }
497 470196 : if (enable_nestloop)
498 : {
499 469812 : glob->default_pgs_mask |= PGS_NESTLOOP_PLAIN;
500 469812 : if (enable_material)
501 469584 : glob->default_pgs_mask |= PGS_NESTLOOP_MATERIALIZE;
502 469812 : if (enable_memoize)
503 469668 : glob->default_pgs_mask |= PGS_NESTLOOP_MEMOIZE;
504 : }
505 470196 : if (enable_hashjoin)
506 467534 : glob->default_pgs_mask |= PGS_HASHJOIN;
507 470196 : if (enable_gathermerge)
508 470196 : glob->default_pgs_mask |= PGS_GATHER_MERGE;
509 470196 : if (enable_partitionwise_join)
510 2502 : glob->default_pgs_mask |= PGS_CONSIDER_PARTITIONWISE;
511 :
512 : /* Allow plugins to take control after we've initialized "glob" */
513 470196 : if (planner_setup_hook)
514 0 : (*planner_setup_hook) (glob, parse, query_string, &tuple_fraction, es);
515 :
516 : /* primary planning entry point (may recurse for subqueries) */
517 470196 : root = subquery_planner(glob, parse, NULL, NULL, false, tuple_fraction,
518 : NULL);
519 :
520 : /* Select best Path and turn it into a Plan */
521 465754 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
522 465754 : best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
523 :
524 465754 : top_plan = create_plan(root, best_path);
525 :
526 : /*
527 : * If creating a plan for a scrollable cursor, make sure it can run
528 : * backwards on demand. Add a Material node at the top at need.
529 : */
530 465358 : if (cursorOptions & CURSOR_OPT_SCROLL)
531 : {
532 266 : if (!ExecSupportsBackwardScan(top_plan))
533 32 : top_plan = materialize_finished_plan(top_plan);
534 : }
535 :
536 : /*
537 : * Optionally add a Gather node for testing purposes, provided this is
538 : * actually a safe thing to do.
539 : *
540 : * We can add Gather even when top_plan has parallel-safe initPlans, but
541 : * then we have to move the initPlans to the Gather node because of
542 : * SS_finalize_plan's limitations. That would cause cosmetic breakage of
543 : * regression tests when debug_parallel_query = regress, because initPlans
544 : * that would normally appear on the top_plan move to the Gather, causing
545 : * them to disappear from EXPLAIN output. That doesn't seem worth kluging
546 : * EXPLAIN to hide, so skip it when debug_parallel_query = regress.
547 : */
548 465358 : if (debug_parallel_query != DEBUG_PARALLEL_OFF &&
549 194 : top_plan->parallel_safe &&
550 128 : (top_plan->initPlan == NIL ||
551 0 : debug_parallel_query != DEBUG_PARALLEL_REGRESS))
552 : {
553 128 : Gather *gather = makeNode(Gather);
554 : Cost initplan_cost;
555 : bool unsafe_initplans;
556 :
557 128 : gather->plan.targetlist = top_plan->targetlist;
558 128 : gather->plan.qual = NIL;
559 128 : gather->plan.lefttree = top_plan;
560 128 : gather->plan.righttree = NULL;
561 128 : gather->num_workers = 1;
562 128 : gather->single_copy = true;
563 128 : gather->invisible = (debug_parallel_query == DEBUG_PARALLEL_REGRESS);
564 :
565 : /* Transfer any initPlans to the new top node */
566 128 : gather->plan.initPlan = top_plan->initPlan;
567 128 : top_plan->initPlan = NIL;
568 :
569 : /*
570 : * Since this Gather has no parallel-aware descendants to signal to,
571 : * we don't need a rescan Param.
572 : */
573 128 : gather->rescan_param = -1;
574 :
575 : /*
576 : * Ideally we'd use cost_gather here, but setting up dummy path data
577 : * to satisfy it doesn't seem much cleaner than knowing what it does.
578 : */
579 128 : gather->plan.startup_cost = top_plan->startup_cost +
580 : parallel_setup_cost;
581 128 : gather->plan.total_cost = top_plan->total_cost +
582 128 : parallel_setup_cost + parallel_tuple_cost * top_plan->plan_rows;
583 128 : gather->plan.plan_rows = top_plan->plan_rows;
584 128 : gather->plan.plan_width = top_plan->plan_width;
585 128 : gather->plan.parallel_aware = false;
586 128 : gather->plan.parallel_safe = false;
587 :
588 : /*
589 : * Delete the initplans' cost from top_plan. We needn't add it to the
590 : * Gather node, since the above coding already included it there.
591 : */
592 128 : SS_compute_initplan_cost(gather->plan.initPlan,
593 : &initplan_cost, &unsafe_initplans);
594 128 : top_plan->startup_cost -= initplan_cost;
595 128 : top_plan->total_cost -= initplan_cost;
596 :
597 : /* use parallel mode for parallel plans. */
598 128 : root->glob->parallelModeNeeded = true;
599 :
600 128 : top_plan = &gather->plan;
601 : }
602 :
603 : /*
604 : * If any Params were generated, run through the plan tree and compute
605 : * each plan node's extParam/allParam sets. Ideally we'd merge this into
606 : * set_plan_references' tree traversal, but for now it has to be separate
607 : * because we need to visit subplans before not after main plan.
608 : */
609 465358 : if (glob->paramExecTypes != NIL)
610 : {
611 : Assert(list_length(glob->subplans) == list_length(glob->subroots));
612 212032 : forboth(lp, glob->subplans, lr, glob->subroots)
613 : {
614 44338 : Plan *subplan = (Plan *) lfirst(lp);
615 44338 : PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
616 :
617 44338 : SS_finalize_plan(subroot, subplan);
618 : }
619 167694 : SS_finalize_plan(root, top_plan);
620 : }
621 :
622 : /* final cleanup of the plan */
623 : Assert(glob->finalrtable == NIL);
624 : Assert(glob->finalrteperminfos == NIL);
625 : Assert(glob->finalrowmarks == NIL);
626 : Assert(glob->resultRelations == NIL);
627 : Assert(glob->appendRelations == NIL);
628 465358 : top_plan = set_plan_references(root, top_plan);
629 : /* ... and the subplans (both regular subplans and initplans) */
630 : Assert(list_length(glob->subplans) == list_length(glob->subroots));
631 509696 : forboth(lp, glob->subplans, lr, glob->subroots)
632 : {
633 44338 : Plan *subplan = (Plan *) lfirst(lp);
634 44338 : PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
635 :
636 44338 : lfirst(lp) = set_plan_references(subroot, subplan);
637 : }
638 :
639 : /* build the PlannedStmt result */
640 465358 : result = makeNode(PlannedStmt);
641 :
642 465358 : result->commandType = parse->commandType;
643 465358 : result->queryId = parse->queryId;
644 465358 : result->planOrigin = PLAN_STMT_STANDARD;
645 465358 : result->hasReturning = (parse->returningList != NIL);
646 465358 : result->hasModifyingCTE = parse->hasModifyingCTE;
647 465358 : result->canSetTag = parse->canSetTag;
648 465358 : result->transientPlan = glob->transientPlan;
649 465358 : result->dependsOnRole = glob->dependsOnRole;
650 465358 : result->parallelModeNeeded = glob->parallelModeNeeded;
651 465358 : result->planTree = top_plan;
652 465358 : result->partPruneInfos = glob->partPruneInfos;
653 465358 : result->rtable = glob->finalrtable;
654 930716 : result->unprunableRelids = bms_difference(glob->allRelids,
655 465358 : glob->prunableRelids);
656 465358 : result->permInfos = glob->finalrteperminfos;
657 465358 : result->resultRelations = glob->resultRelations;
658 465358 : result->appendRelations = glob->appendRelations;
659 465358 : result->subplans = glob->subplans;
660 465358 : result->rewindPlanIDs = glob->rewindPlanIDs;
661 465358 : result->rowMarks = glob->finalrowmarks;
662 465358 : result->relationOids = glob->relationOids;
663 465358 : result->invalItems = glob->invalItems;
664 465358 : result->paramExecTypes = glob->paramExecTypes;
665 : /* utilityStmt should be null, but we might as well copy it */
666 465358 : result->utilityStmt = parse->utilityStmt;
667 465358 : result->stmt_location = parse->stmt_location;
668 465358 : result->stmt_len = parse->stmt_len;
669 :
670 465358 : result->jitFlags = PGJIT_NONE;
671 465358 : if (jit_enabled && jit_above_cost >= 0 &&
672 464644 : top_plan->total_cost > jit_above_cost)
673 : {
674 928 : result->jitFlags |= PGJIT_PERFORM;
675 :
676 : /*
677 : * Decide how much effort should be put into generating better code.
678 : */
679 928 : if (jit_optimize_above_cost >= 0 &&
680 928 : top_plan->total_cost > jit_optimize_above_cost)
681 368 : result->jitFlags |= PGJIT_OPT3;
682 928 : if (jit_inline_above_cost >= 0 &&
683 928 : top_plan->total_cost > jit_inline_above_cost)
684 368 : result->jitFlags |= PGJIT_INLINE;
685 :
686 : /*
687 : * Decide which operations should be JITed.
688 : */
689 928 : if (jit_expressions)
690 928 : result->jitFlags |= PGJIT_EXPR;
691 928 : if (jit_tuple_deforming)
692 928 : result->jitFlags |= PGJIT_DEFORM;
693 : }
694 :
695 : /* Allow plugins to take control before we discard "glob" */
696 465358 : if (planner_shutdown_hook)
697 0 : (*planner_shutdown_hook) (glob, parse, query_string, result);
698 :
699 465358 : if (glob->partition_directory != NULL)
700 12196 : DestroyPartitionDirectory(glob->partition_directory);
701 :
702 465358 : return result;
703 : }
704 :
705 :
706 : /*--------------------
707 : * subquery_planner
708 : * Invokes the planner on a subquery. We recurse to here for each
709 : * sub-SELECT found in the query tree.
710 : *
711 : * glob is the global state for the current planner run.
712 : * parse is the querytree produced by the parser & rewriter.
713 : * plan_name is the name to assign to this subplan (NULL at the top level).
714 : * parent_root is the immediate parent Query's info (NULL at the top level).
715 : * hasRecursion is true if this is a recursive WITH query.
716 : * tuple_fraction is the fraction of tuples we expect will be retrieved.
717 : * tuple_fraction is interpreted as explained for grouping_planner, below.
718 : * setops is used for set operation subqueries to provide the subquery with
719 : * the context in which it's being used so that Paths correctly sorted for the
720 : * set operation can be generated. NULL when not planning a set operation
721 : * child, or when a child of a set op that isn't interested in sorted input.
722 : *
723 : * Basically, this routine does the stuff that should only be done once
724 : * per Query object. It then calls grouping_planner. At one time,
725 : * grouping_planner could be invoked recursively on the same Query object;
726 : * that's not currently true, but we keep the separation between the two
727 : * routines anyway, in case we need it again someday.
728 : *
729 : * subquery_planner will be called recursively to handle sub-Query nodes
730 : * found within the query's expressions and rangetable.
731 : *
732 : * Returns the PlannerInfo struct ("root") that contains all data generated
733 : * while planning the subquery. In particular, the Path(s) attached to
734 : * the (UPPERREL_FINAL, NULL) upperrel represent our conclusions about the
735 : * cheapest way(s) to implement the query. The top level will select the
736 : * best Path and pass it through createplan.c to produce a finished Plan.
737 : *--------------------
738 : */
739 : PlannerInfo *
740 554548 : subquery_planner(PlannerGlobal *glob, Query *parse, char *plan_name,
741 : PlannerInfo *parent_root, bool hasRecursion,
742 : double tuple_fraction, SetOperationStmt *setops)
743 : {
744 : PlannerInfo *root;
745 : List *newWithCheckOptions;
746 : List *newHaving;
747 : bool hasOuterJoins;
748 : bool hasResultRTEs;
749 : RelOptInfo *final_rel;
750 : ListCell *l;
751 :
752 : /* Create a PlannerInfo data structure for this subquery */
753 554548 : root = makeNode(PlannerInfo);
754 554548 : root->parse = parse;
755 554548 : root->glob = glob;
756 554548 : root->query_level = parent_root ? parent_root->query_level + 1 : 1;
757 554548 : root->plan_name = plan_name;
758 554548 : root->parent_root = parent_root;
759 554548 : root->plan_params = NIL;
760 554548 : root->outer_params = NULL;
761 554548 : root->planner_cxt = CurrentMemoryContext;
762 554548 : root->init_plans = NIL;
763 554548 : root->cte_plan_ids = NIL;
764 554548 : root->multiexpr_params = NIL;
765 554548 : root->join_domains = NIL;
766 554548 : root->eq_classes = NIL;
767 554548 : root->ec_merging_done = false;
768 554548 : root->last_rinfo_serial = 0;
769 554548 : root->all_result_relids =
770 554548 : parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
771 554548 : root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
772 554548 : root->append_rel_list = NIL;
773 554548 : root->row_identity_vars = NIL;
774 554548 : root->rowMarks = NIL;
775 554548 : memset(root->upper_rels, 0, sizeof(root->upper_rels));
776 554548 : memset(root->upper_targets, 0, sizeof(root->upper_targets));
777 554548 : root->processed_groupClause = NIL;
778 554548 : root->processed_distinctClause = NIL;
779 554548 : root->processed_tlist = NIL;
780 554548 : root->update_colnos = NIL;
781 554548 : root->grouping_map = NULL;
782 554548 : root->minmax_aggs = NIL;
783 554548 : root->qual_security_level = 0;
784 554548 : root->hasPseudoConstantQuals = false;
785 554548 : root->hasAlternativeSubPlans = false;
786 554548 : root->placeholdersFrozen = false;
787 554548 : root->hasRecursion = hasRecursion;
788 554548 : root->assumeReplanning = false;
789 554548 : if (hasRecursion)
790 942 : root->wt_param_id = assign_special_exec_param(root);
791 : else
792 553606 : root->wt_param_id = -1;
793 554548 : root->non_recursive_path = NULL;
794 :
795 : /*
796 : * Create the top-level join domain. This won't have valid contents until
797 : * deconstruct_jointree fills it in, but the node needs to exist before
798 : * that so we can build EquivalenceClasses referencing it.
799 : */
800 554548 : root->join_domains = list_make1(makeNode(JoinDomain));
801 :
802 : /*
803 : * If there is a WITH list, process each WITH query and either convert it
804 : * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
805 : */
806 554548 : if (parse->cteList)
807 2940 : SS_process_ctes(root);
808 :
809 : /*
810 : * If it's a MERGE command, transform the joinlist as appropriate.
811 : */
812 554542 : transform_MERGE_to_join(parse);
813 :
814 : /*
815 : * Scan the rangetable for relation RTEs and retrieve the necessary
816 : * catalog information for each relation. Using this information, clear
817 : * the inh flag for any relation that has no children, collect not-null
818 : * attribute numbers for any relation that has column not-null
819 : * constraints, and expand virtual generated columns for any relation that
820 : * contains them. Note that this step does not descend into sublinks and
821 : * subqueries; if we pull up any sublinks or subqueries below, their
822 : * relation RTEs are processed just before pulling them up.
823 : */
824 554542 : parse = root->parse = preprocess_relation_rtes(root);
825 :
826 : /*
827 : * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
828 : * that we don't need so many special cases to deal with that situation.
829 : */
830 554542 : replace_empty_jointree(parse);
831 :
832 : /*
833 : * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
834 : * to transform them into joins. Note that this step does not descend
835 : * into subqueries; if we pull up any subqueries below, their SubLinks are
836 : * processed just before pulling them up.
837 : */
838 554542 : if (parse->hasSubLinks)
839 37374 : pull_up_sublinks(root);
840 :
841 : /*
842 : * Scan the rangetable for function RTEs, do const-simplification on them,
843 : * and then inline them if possible (producing subqueries that might get
844 : * pulled up next). Recursion issues here are handled in the same way as
845 : * for SubLinks.
846 : */
847 554542 : preprocess_function_rtes(root);
848 :
849 : /*
850 : * Check to see if any subqueries in the jointree can be merged into this
851 : * query.
852 : */
853 554536 : pull_up_subqueries(root);
854 :
855 : /*
856 : * If this is a simple UNION ALL query, flatten it into an appendrel. We
857 : * do this now because it requires applying pull_up_subqueries to the leaf
858 : * queries of the UNION ALL, which weren't touched above because they
859 : * weren't referenced by the jointree (they will be after we do this).
860 : */
861 554530 : if (parse->setOperations)
862 7014 : flatten_simple_union_all(root);
863 :
864 : /*
865 : * Survey the rangetable to see what kinds of entries are present. We can
866 : * skip some later processing if relevant SQL features are not used; for
867 : * example if there are no JOIN RTEs we can avoid the expense of doing
868 : * flatten_join_alias_vars(). This must be done after we have finished
869 : * adding rangetable entries, of course. (Note: actually, processing of
870 : * inherited or partitioned rels can cause RTEs for their child tables to
871 : * get added later; but those must all be RTE_RELATION entries, so they
872 : * don't invalidate the conclusions drawn here.)
873 : */
874 554530 : root->hasJoinRTEs = false;
875 554530 : root->hasLateralRTEs = false;
876 554530 : root->group_rtindex = 0;
877 554530 : hasOuterJoins = false;
878 554530 : hasResultRTEs = false;
879 1514968 : foreach(l, parse->rtable)
880 : {
881 960438 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
882 :
883 960438 : switch (rte->rtekind)
884 : {
885 97276 : case RTE_JOIN:
886 97276 : root->hasJoinRTEs = true;
887 97276 : if (IS_OUTER_JOIN(rte->jointype))
888 50480 : hasOuterJoins = true;
889 97276 : break;
890 210896 : case RTE_RESULT:
891 210896 : hasResultRTEs = true;
892 210896 : break;
893 5074 : case RTE_GROUP:
894 : Assert(parse->hasGroupRTE);
895 5074 : root->group_rtindex = list_cell_number(parse->rtable, l) + 1;
896 5074 : break;
897 647192 : default:
898 : /* No work here for other RTE types */
899 647192 : break;
900 : }
901 :
902 960438 : if (rte->lateral)
903 11436 : root->hasLateralRTEs = true;
904 :
905 : /*
906 : * We can also determine the maximum security level required for any
907 : * securityQuals now. Addition of inheritance-child RTEs won't affect
908 : * this, because child tables don't have their own securityQuals; see
909 : * expand_single_inheritance_child().
910 : */
911 960438 : if (rte->securityQuals)
912 2784 : root->qual_security_level = Max(root->qual_security_level,
913 : list_length(rte->securityQuals));
914 : }
915 :
916 : /*
917 : * If we have now verified that the query target relation is
918 : * non-inheriting, mark it as a leaf target.
919 : */
920 554530 : if (parse->resultRelation)
921 : {
922 95980 : RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
923 :
924 95980 : if (!rte->inh)
925 93028 : root->leaf_result_relids =
926 93028 : bms_make_singleton(parse->resultRelation);
927 : }
928 :
929 : /*
930 : * This would be a convenient time to check access permissions for all
931 : * relations mentioned in the query, since it would be better to fail now,
932 : * before doing any detailed planning. However, for historical reasons,
933 : * we leave this to be done at executor startup.
934 : *
935 : * Note, however, that we do need to check access permissions for any view
936 : * relations mentioned in the query, in order to prevent information being
937 : * leaked by selectivity estimation functions, which only check view owner
938 : * permissions on underlying tables (see all_rows_selectable() and its
939 : * callers). This is a little ugly, because it means that access
940 : * permissions for views will be checked twice, which is another reason
941 : * why it would be better to do all the ACL checks here.
942 : */
943 1513844 : foreach(l, parse->rtable)
944 : {
945 959702 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
946 :
947 959702 : if (rte->perminfoindex != 0 &&
948 512030 : rte->relkind == RELKIND_VIEW)
949 : {
950 : RTEPermissionInfo *perminfo;
951 : bool result;
952 :
953 21758 : perminfo = getRTEPermissionInfo(parse->rteperminfos, rte);
954 21758 : result = ExecCheckOneRelPerms(perminfo);
955 21758 : if (!result)
956 388 : aclcheck_error(ACLCHECK_NO_PRIV, OBJECT_VIEW,
957 388 : get_rel_name(perminfo->relid));
958 : }
959 : }
960 :
961 : /*
962 : * Preprocess RowMark information. We need to do this after subquery
963 : * pullup, so that all base relations are present.
964 : */
965 554142 : preprocess_rowmarks(root);
966 :
967 : /*
968 : * Set hasHavingQual to remember if HAVING clause is present. Needed
969 : * because preprocess_expression will reduce a constant-true condition to
970 : * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
971 : */
972 554142 : root->hasHavingQual = (parse->havingQual != NULL);
973 :
974 : /*
975 : * Do expression preprocessing on targetlist and quals, as well as other
976 : * random expressions in the querytree. Note that we do not need to
977 : * handle sort/group expressions explicitly, because they are actually
978 : * part of the targetlist.
979 : */
980 550172 : parse->targetList = (List *)
981 554142 : preprocess_expression(root, (Node *) parse->targetList,
982 : EXPRKIND_TARGET);
983 :
984 550172 : newWithCheckOptions = NIL;
985 553110 : foreach(l, parse->withCheckOptions)
986 : {
987 2938 : WithCheckOption *wco = lfirst_node(WithCheckOption, l);
988 :
989 2938 : wco->qual = preprocess_expression(root, wco->qual,
990 : EXPRKIND_QUAL);
991 2938 : if (wco->qual != NULL)
992 2538 : newWithCheckOptions = lappend(newWithCheckOptions, wco);
993 : }
994 550172 : parse->withCheckOptions = newWithCheckOptions;
995 :
996 550172 : parse->returningList = (List *)
997 550172 : preprocess_expression(root, (Node *) parse->returningList,
998 : EXPRKIND_TARGET);
999 :
1000 550172 : preprocess_qual_conditions(root, (Node *) parse->jointree);
1001 :
1002 550172 : parse->havingQual = preprocess_expression(root, parse->havingQual,
1003 : EXPRKIND_QUAL);
1004 :
1005 553006 : foreach(l, parse->windowClause)
1006 : {
1007 2834 : WindowClause *wc = lfirst_node(WindowClause, l);
1008 :
1009 : /* partitionClause/orderClause are sort/group expressions */
1010 2834 : wc->startOffset = preprocess_expression(root, wc->startOffset,
1011 : EXPRKIND_LIMIT);
1012 2834 : wc->endOffset = preprocess_expression(root, wc->endOffset,
1013 : EXPRKIND_LIMIT);
1014 : }
1015 :
1016 550172 : parse->limitOffset = preprocess_expression(root, parse->limitOffset,
1017 : EXPRKIND_LIMIT);
1018 550172 : parse->limitCount = preprocess_expression(root, parse->limitCount,
1019 : EXPRKIND_LIMIT);
1020 :
1021 550172 : if (parse->onConflict)
1022 : {
1023 3828 : parse->onConflict->arbiterElems = (List *)
1024 1914 : preprocess_expression(root,
1025 1914 : (Node *) parse->onConflict->arbiterElems,
1026 : EXPRKIND_ARBITER_ELEM);
1027 3828 : parse->onConflict->arbiterWhere =
1028 1914 : preprocess_expression(root,
1029 1914 : parse->onConflict->arbiterWhere,
1030 : EXPRKIND_QUAL);
1031 3828 : parse->onConflict->onConflictSet = (List *)
1032 1914 : preprocess_expression(root,
1033 1914 : (Node *) parse->onConflict->onConflictSet,
1034 : EXPRKIND_TARGET);
1035 1914 : parse->onConflict->onConflictWhere =
1036 1914 : preprocess_expression(root,
1037 1914 : parse->onConflict->onConflictWhere,
1038 : EXPRKIND_QUAL);
1039 : /* exclRelTlist contains only Vars, so no preprocessing needed */
1040 : }
1041 :
1042 553054 : foreach(l, parse->mergeActionList)
1043 : {
1044 2882 : MergeAction *action = (MergeAction *) lfirst(l);
1045 :
1046 2882 : action->targetList = (List *)
1047 2882 : preprocess_expression(root,
1048 2882 : (Node *) action->targetList,
1049 : EXPRKIND_TARGET);
1050 2882 : action->qual =
1051 2882 : preprocess_expression(root,
1052 : (Node *) action->qual,
1053 : EXPRKIND_QUAL);
1054 : }
1055 :
1056 550172 : parse->mergeJoinCondition =
1057 550172 : preprocess_expression(root, parse->mergeJoinCondition, EXPRKIND_QUAL);
1058 :
1059 550172 : root->append_rel_list = (List *)
1060 550172 : preprocess_expression(root, (Node *) root->append_rel_list,
1061 : EXPRKIND_APPINFO);
1062 :
1063 : /* Also need to preprocess expressions within RTEs */
1064 1504920 : foreach(l, parse->rtable)
1065 : {
1066 954748 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
1067 : int kind;
1068 : ListCell *lcsq;
1069 :
1070 954748 : if (rte->rtekind == RTE_RELATION)
1071 : {
1072 494466 : if (rte->tablesample)
1073 228 : rte->tablesample = (TableSampleClause *)
1074 228 : preprocess_expression(root,
1075 228 : (Node *) rte->tablesample,
1076 : EXPRKIND_TABLESAMPLE);
1077 : }
1078 460282 : else if (rte->rtekind == RTE_SUBQUERY)
1079 : {
1080 : /*
1081 : * We don't want to do all preprocessing yet on the subquery's
1082 : * expressions, since that will happen when we plan it. But if it
1083 : * contains any join aliases of our level, those have to get
1084 : * expanded now, because planning of the subquery won't do it.
1085 : * That's only possible if the subquery is LATERAL.
1086 : */
1087 83374 : if (rte->lateral && root->hasJoinRTEs)
1088 1830 : rte->subquery = (Query *)
1089 1830 : flatten_join_alias_vars(root, root->parse,
1090 1830 : (Node *) rte->subquery);
1091 : }
1092 376908 : else if (rte->rtekind == RTE_FUNCTION)
1093 : {
1094 : /* Preprocess the function expression(s) fully */
1095 52972 : kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
1096 52972 : rte->functions = (List *)
1097 52972 : preprocess_expression(root, (Node *) rte->functions, kind);
1098 : }
1099 323936 : else if (rte->rtekind == RTE_TABLEFUNC)
1100 : {
1101 : /* Preprocess the function expression(s) fully */
1102 626 : kind = rte->lateral ? EXPRKIND_TABLEFUNC_LATERAL : EXPRKIND_TABLEFUNC;
1103 626 : rte->tablefunc = (TableFunc *)
1104 626 : preprocess_expression(root, (Node *) rte->tablefunc, kind);
1105 : }
1106 323310 : else if (rte->rtekind == RTE_VALUES)
1107 : {
1108 : /* Preprocess the values lists fully */
1109 8532 : kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
1110 8532 : rte->values_lists = (List *)
1111 8532 : preprocess_expression(root, (Node *) rte->values_lists, kind);
1112 : }
1113 314778 : else if (rte->rtekind == RTE_GROUP)
1114 : {
1115 : /* Preprocess the groupexprs list fully */
1116 5074 : rte->groupexprs = (List *)
1117 5074 : preprocess_expression(root, (Node *) rte->groupexprs,
1118 : EXPRKIND_GROUPEXPR);
1119 : }
1120 :
1121 : /*
1122 : * Process each element of the securityQuals list as if it were a
1123 : * separate qual expression (as indeed it is). We need to do it this
1124 : * way to get proper canonicalization of AND/OR structure. Note that
1125 : * this converts each element into an implicit-AND sublist.
1126 : */
1127 957926 : foreach(lcsq, rte->securityQuals)
1128 : {
1129 3178 : lfirst(lcsq) = preprocess_expression(root,
1130 3178 : (Node *) lfirst(lcsq),
1131 : EXPRKIND_QUAL);
1132 : }
1133 : }
1134 :
1135 : /*
1136 : * Now that we are done preprocessing expressions, and in particular done
1137 : * flattening join alias variables, get rid of the joinaliasvars lists.
1138 : * They no longer match what expressions in the rest of the tree look
1139 : * like, because we have not preprocessed expressions in those lists (and
1140 : * do not want to; for example, expanding a SubLink there would result in
1141 : * a useless unreferenced subplan). Leaving them in place simply creates
1142 : * a hazard for later scans of the tree. We could try to prevent that by
1143 : * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
1144 : * but that doesn't sound very reliable.
1145 : */
1146 550172 : if (root->hasJoinRTEs)
1147 : {
1148 335766 : foreach(l, parse->rtable)
1149 : {
1150 276672 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
1151 :
1152 276672 : rte->joinaliasvars = NIL;
1153 : }
1154 : }
1155 :
1156 : /*
1157 : * Replace any Vars in the subquery's targetlist and havingQual that
1158 : * reference GROUP outputs with the underlying grouping expressions.
1159 : *
1160 : * Note that we need to perform this replacement after we've preprocessed
1161 : * the grouping expressions. This is to ensure that there is only one
1162 : * instance of SubPlan for each SubLink contained within the grouping
1163 : * expressions.
1164 : */
1165 550172 : if (parse->hasGroupRTE)
1166 : {
1167 5074 : parse->targetList = (List *)
1168 5074 : flatten_group_exprs(root, root->parse, (Node *) parse->targetList);
1169 5074 : parse->havingQual =
1170 5074 : flatten_group_exprs(root, root->parse, parse->havingQual);
1171 : }
1172 :
1173 : /* Constant-folding might have removed all set-returning functions */
1174 550172 : if (parse->hasTargetSRFs)
1175 12088 : parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
1176 :
1177 : /*
1178 : * If we have grouping sets, expand the groupingSets tree of this query to
1179 : * a flat list of grouping sets. We need to do this before optimizing
1180 : * HAVING, since we can't easily tell if there's an empty grouping set
1181 : * until we have this representation.
1182 : */
1183 550172 : if (parse->groupingSets)
1184 : {
1185 1020 : parse->groupingSets =
1186 1020 : expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
1187 : }
1188 :
1189 : /*
1190 : * In some cases we may want to transfer a HAVING clause into WHERE. We
1191 : * cannot do so if the HAVING clause contains aggregates (obviously) or
1192 : * volatile functions (since a HAVING clause is supposed to be executed
1193 : * only once per group). We also can't do this if there are any grouping
1194 : * sets and the clause references any columns that are nullable by the
1195 : * grouping sets; the nulled values of those columns are not available
1196 : * before the grouping step. (The test on groupClause might seem wrong,
1197 : * but it's okay: it's just an optimization to avoid running pull_varnos
1198 : * when there cannot be any Vars in the HAVING clause.)
1199 : *
1200 : * Also, it may be that the clause is so expensive to execute that we're
1201 : * better off doing it only once per group, despite the loss of
1202 : * selectivity. This is hard to estimate short of doing the entire
1203 : * planning process twice, so we use a heuristic: clauses containing
1204 : * subplans are left in HAVING. Otherwise, we move or copy the HAVING
1205 : * clause into WHERE, in hopes of eliminating tuples before aggregation
1206 : * instead of after.
1207 : *
1208 : * If the query has no empty grouping set then we can simply move such a
1209 : * clause into WHERE; any group that fails the clause will not be in the
1210 : * output because none of its tuples will reach the grouping or
1211 : * aggregation stage. Otherwise we have to keep the clause in HAVING to
1212 : * ensure that we don't emit a bogus aggregated row. But then the HAVING
1213 : * clause must be degenerate (variable-free), so we can copy it into WHERE
1214 : * so that query_planner() can use it in a gating Result node. (This could
1215 : * be done better, but it seems not worth optimizing.)
1216 : *
1217 : * Note that a HAVING clause may contain expressions that are not fully
1218 : * preprocessed. This can happen if these expressions are part of
1219 : * grouping items. In such cases, they are replaced with GROUP Vars in
1220 : * the parser and then replaced back after we're done with expression
1221 : * preprocessing on havingQual. This is not an issue if the clause
1222 : * remains in HAVING, because these expressions will be matched to lower
1223 : * target items in setrefs.c. However, if the clause is moved or copied
1224 : * into WHERE, we need to ensure that these expressions are fully
1225 : * preprocessed.
1226 : *
1227 : * Note that both havingQual and parse->jointree->quals are in
1228 : * implicitly-ANDed-list form at this point, even though they are declared
1229 : * as Node *.
1230 : */
1231 550172 : newHaving = NIL;
1232 551560 : foreach(l, (List *) parse->havingQual)
1233 : {
1234 1388 : Node *havingclause = (Node *) lfirst(l);
1235 :
1236 1904 : if (contain_agg_clause(havingclause) ||
1237 1032 : contain_volatile_functions(havingclause) ||
1238 516 : contain_subplans(havingclause) ||
1239 636 : (parse->groupClause && parse->groupingSets &&
1240 120 : bms_is_member(root->group_rtindex, pull_varnos(root, havingclause))))
1241 : {
1242 : /* keep it in HAVING */
1243 944 : newHaving = lappend(newHaving, havingclause);
1244 : }
1245 444 : else if (parse->groupClause &&
1246 408 : (parse->groupingSets == NIL ||
1247 48 : (List *) linitial(parse->groupingSets) != NIL))
1248 396 : {
1249 : /* There is GROUP BY, but no empty grouping set */
1250 : Node *whereclause;
1251 :
1252 : /* Preprocess the HAVING clause fully */
1253 396 : whereclause = preprocess_expression(root, havingclause,
1254 : EXPRKIND_QUAL);
1255 : /* ... and move it to WHERE */
1256 396 : parse->jointree->quals = (Node *)
1257 396 : list_concat((List *) parse->jointree->quals,
1258 : (List *) whereclause);
1259 : }
1260 : else
1261 : {
1262 : /* There is an empty grouping set (perhaps implicitly) */
1263 : Node *whereclause;
1264 :
1265 : /* Preprocess the HAVING clause fully */
1266 48 : whereclause = preprocess_expression(root, copyObject(havingclause),
1267 : EXPRKIND_QUAL);
1268 : /* ... and put a copy in WHERE */
1269 96 : parse->jointree->quals = (Node *)
1270 48 : list_concat((List *) parse->jointree->quals,
1271 : (List *) whereclause);
1272 : /* ... and also keep it in HAVING */
1273 48 : newHaving = lappend(newHaving, havingclause);
1274 : }
1275 : }
1276 550172 : parse->havingQual = (Node *) newHaving;
1277 :
1278 : /*
1279 : * If we have any outer joins, try to reduce them to plain inner joins.
1280 : * This step is most easily done after we've done expression
1281 : * preprocessing.
1282 : */
1283 550172 : if (hasOuterJoins)
1284 34926 : reduce_outer_joins(root);
1285 :
1286 : /*
1287 : * If we have any RTE_RESULT relations, see if they can be deleted from
1288 : * the jointree. We also rely on this processing to flatten single-child
1289 : * FromExprs underneath outer joins. This step is most effectively done
1290 : * after we've done expression preprocessing and outer join reduction.
1291 : */
1292 550172 : if (hasResultRTEs || hasOuterJoins)
1293 240290 : remove_useless_result_rtes(root);
1294 :
1295 : /*
1296 : * Do the main planning.
1297 : */
1298 550172 : grouping_planner(root, tuple_fraction, setops);
1299 :
1300 : /*
1301 : * Capture the set of outer-level param IDs we have access to, for use in
1302 : * extParam/allParam calculations later.
1303 : */
1304 550100 : SS_identify_outer_params(root);
1305 :
1306 : /*
1307 : * If any initPlans were created in this query level, adjust the surviving
1308 : * Paths' costs and parallel-safety flags to account for them. The
1309 : * initPlans won't actually get attached to the plan tree till
1310 : * create_plan() runs, but we must include their effects now.
1311 : */
1312 550100 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1313 550100 : SS_charge_for_initplans(root, final_rel);
1314 :
1315 : /*
1316 : * Make sure we've identified the cheapest Path for the final rel. (By
1317 : * doing this here not in grouping_planner, we include initPlan costs in
1318 : * the decision, though it's unlikely that will change anything.)
1319 : */
1320 550100 : set_cheapest(final_rel);
1321 :
1322 550100 : return root;
1323 : }
1324 :
1325 : /*
1326 : * preprocess_expression
1327 : * Do subquery_planner's preprocessing work for an expression,
1328 : * which can be a targetlist, a WHERE clause (including JOIN/ON
1329 : * conditions), a HAVING clause, or a few other things.
1330 : */
1331 : static Node *
1332 4619118 : preprocess_expression(PlannerInfo *root, Node *expr, int kind)
1333 : {
1334 : /*
1335 : * Fall out quickly if expression is empty. This occurs often enough to
1336 : * be worth checking. Note that null->null is the correct conversion for
1337 : * implicit-AND result format, too.
1338 : */
1339 4619118 : if (expr == NULL)
1340 3646438 : return NULL;
1341 :
1342 : /*
1343 : * If the query has any join RTEs, replace join alias variables with
1344 : * base-relation variables. We must do this first, since any expressions
1345 : * we may extract from the joinaliasvars lists have not been preprocessed.
1346 : * For example, if we did this after sublink processing, sublinks expanded
1347 : * out from join aliases would not get processed. But we can skip this in
1348 : * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1349 : * they can't contain any Vars of the current query level.
1350 : */
1351 972680 : if (root->hasJoinRTEs &&
1352 426598 : !(kind == EXPRKIND_RTFUNC ||
1353 213122 : kind == EXPRKIND_VALUES ||
1354 : kind == EXPRKIND_TABLESAMPLE ||
1355 : kind == EXPRKIND_TABLEFUNC))
1356 213104 : expr = flatten_join_alias_vars(root, root->parse, expr);
1357 :
1358 : /*
1359 : * Simplify constant expressions. For function RTEs, this was already
1360 : * done by preprocess_function_rtes. (But note we must do it again for
1361 : * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1362 : * un-simplified subexpressions inserted by flattening of subqueries or
1363 : * join alias variables.)
1364 : *
1365 : * Note: an essential effect of this is to convert named-argument function
1366 : * calls to positional notation and insert the current actual values of
1367 : * any default arguments for functions. To ensure that happens, we *must*
1368 : * process all expressions here. Previous PG versions sometimes skipped
1369 : * const-simplification if it didn't seem worth the trouble, but we can't
1370 : * do that anymore.
1371 : *
1372 : * Note: this also flattens nested AND and OR expressions into N-argument
1373 : * form. All processing of a qual expression after this point must be
1374 : * careful to maintain AND/OR flatness --- that is, do not generate a tree
1375 : * with AND directly under AND, nor OR directly under OR.
1376 : */
1377 972680 : if (kind != EXPRKIND_RTFUNC)
1378 928378 : expr = eval_const_expressions(root, expr);
1379 :
1380 : /*
1381 : * If it's a qual or havingQual, canonicalize it.
1382 : */
1383 968710 : if (kind == EXPRKIND_QUAL)
1384 : {
1385 349522 : expr = (Node *) canonicalize_qual((Expr *) expr, false);
1386 :
1387 : #ifdef OPTIMIZER_DEBUG
1388 : printf("After canonicalize_qual()\n");
1389 : pprint(expr);
1390 : #endif
1391 : }
1392 :
1393 : /*
1394 : * Check for ANY ScalarArrayOpExpr with Const arrays and set the
1395 : * hashfuncid of any that might execute more quickly by using hash lookups
1396 : * instead of a linear search.
1397 : */
1398 968710 : if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1399 : {
1400 886916 : convert_saop_to_hashed_saop(expr);
1401 : }
1402 :
1403 : /* Expand SubLinks to SubPlans */
1404 968710 : if (root->parse->hasSubLinks)
1405 109268 : expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1406 :
1407 : /*
1408 : * XXX do not insert anything here unless you have grokked the comments in
1409 : * SS_replace_correlation_vars ...
1410 : */
1411 :
1412 : /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1413 968710 : if (root->query_level > 1)
1414 191468 : expr = SS_replace_correlation_vars(root, expr);
1415 :
1416 : /*
1417 : * If it's a qual or havingQual, convert it to implicit-AND format. (We
1418 : * don't want to do this before eval_const_expressions, since the latter
1419 : * would be unable to simplify a top-level AND correctly. Also,
1420 : * SS_process_sublinks expects explicit-AND format.)
1421 : */
1422 968710 : if (kind == EXPRKIND_QUAL)
1423 349522 : expr = (Node *) make_ands_implicit((Expr *) expr);
1424 :
1425 968710 : return expr;
1426 : }
1427 :
1428 : /*
1429 : * preprocess_qual_conditions
1430 : * Recursively scan the query's jointree and do subquery_planner's
1431 : * preprocessing work on each qual condition found therein.
1432 : */
1433 : static void
1434 1371048 : preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
1435 : {
1436 1371048 : if (jtnode == NULL)
1437 0 : return;
1438 1371048 : if (IsA(jtnode, RangeTblRef))
1439 : {
1440 : /* nothing to do here */
1441 : }
1442 670774 : else if (IsA(jtnode, FromExpr))
1443 : {
1444 565646 : FromExpr *f = (FromExpr *) jtnode;
1445 : ListCell *l;
1446 :
1447 1176266 : foreach(l, f->fromlist)
1448 610620 : preprocess_qual_conditions(root, lfirst(l));
1449 :
1450 565646 : f->quals = preprocess_expression(root, f->quals, EXPRKIND_QUAL);
1451 : }
1452 105128 : else if (IsA(jtnode, JoinExpr))
1453 : {
1454 105128 : JoinExpr *j = (JoinExpr *) jtnode;
1455 :
1456 105128 : preprocess_qual_conditions(root, j->larg);
1457 105128 : preprocess_qual_conditions(root, j->rarg);
1458 :
1459 105128 : j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1460 : }
1461 : else
1462 0 : elog(ERROR, "unrecognized node type: %d",
1463 : (int) nodeTag(jtnode));
1464 : }
1465 :
1466 : /*
1467 : * preprocess_phv_expression
1468 : * Do preprocessing on a PlaceHolderVar expression that's been pulled up.
1469 : *
1470 : * If a LATERAL subquery references an output of another subquery, and that
1471 : * output must be wrapped in a PlaceHolderVar because of an intermediate outer
1472 : * join, then we'll push the PlaceHolderVar expression down into the subquery
1473 : * and later pull it back up during find_lateral_references, which runs after
1474 : * subquery_planner has preprocessed all the expressions that were in the
1475 : * current query level to start with. So we need to preprocess it then.
1476 : */
1477 : Expr *
1478 90 : preprocess_phv_expression(PlannerInfo *root, Expr *expr)
1479 : {
1480 90 : return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1481 : }
1482 :
1483 : /*--------------------
1484 : * grouping_planner
1485 : * Perform planning steps related to grouping, aggregation, etc.
1486 : *
1487 : * This function adds all required top-level processing to the scan/join
1488 : * Path(s) produced by query_planner.
1489 : *
1490 : * tuple_fraction is the fraction of tuples we expect will be retrieved.
1491 : * tuple_fraction is interpreted as follows:
1492 : * 0: expect all tuples to be retrieved (normal case)
1493 : * 0 < tuple_fraction < 1: expect the given fraction of tuples available
1494 : * from the plan to be retrieved
1495 : * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
1496 : * expected to be retrieved (ie, a LIMIT specification).
1497 : * setops is used for set operation subqueries to provide the subquery with
1498 : * the context in which it's being used so that Paths correctly sorted for the
1499 : * set operation can be generated. NULL when not planning a set operation
1500 : * child, or when a child of a set op that isn't interested in sorted input.
1501 : *
1502 : * Returns nothing; the useful output is in the Paths we attach to the
1503 : * (UPPERREL_FINAL, NULL) upperrel in *root. In addition,
1504 : * root->processed_tlist contains the final processed targetlist.
1505 : *
1506 : * Note that we have not done set_cheapest() on the final rel; it's convenient
1507 : * to leave this to the caller.
1508 : *--------------------
1509 : */
1510 : static void
1511 550172 : grouping_planner(PlannerInfo *root, double tuple_fraction,
1512 : SetOperationStmt *setops)
1513 : {
1514 550172 : Query *parse = root->parse;
1515 550172 : int64 offset_est = 0;
1516 550172 : int64 count_est = 0;
1517 550172 : double limit_tuples = -1.0;
1518 550172 : bool have_postponed_srfs = false;
1519 : PathTarget *final_target;
1520 : List *final_targets;
1521 : List *final_targets_contain_srfs;
1522 : bool final_target_parallel_safe;
1523 : RelOptInfo *current_rel;
1524 : RelOptInfo *final_rel;
1525 : FinalPathExtraData extra;
1526 : ListCell *lc;
1527 :
1528 : /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1529 550172 : if (parse->limitCount || parse->limitOffset)
1530 : {
1531 5144 : tuple_fraction = preprocess_limit(root, tuple_fraction,
1532 : &offset_est, &count_est);
1533 :
1534 : /*
1535 : * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1536 : * estimate the effects of using a bounded sort.
1537 : */
1538 5144 : if (count_est > 0 && offset_est >= 0)
1539 4592 : limit_tuples = (double) count_est + (double) offset_est;
1540 : }
1541 :
1542 : /* Make tuple_fraction accessible to lower-level routines */
1543 550172 : root->tuple_fraction = tuple_fraction;
1544 :
1545 550172 : if (parse->setOperations)
1546 : {
1547 : /*
1548 : * Construct Paths for set operations. The results will not need any
1549 : * work except perhaps a top-level sort and/or LIMIT. Note that any
1550 : * special work for recursive unions is the responsibility of
1551 : * plan_set_operations.
1552 : */
1553 6222 : current_rel = plan_set_operations(root);
1554 :
1555 : /*
1556 : * We should not need to call preprocess_targetlist, since we must be
1557 : * in a SELECT query node. Instead, use the processed_tlist returned
1558 : * by plan_set_operations (since this tells whether it returned any
1559 : * resjunk columns!), and transfer any sort key information from the
1560 : * original tlist.
1561 : */
1562 : Assert(parse->commandType == CMD_SELECT);
1563 :
1564 : /* for safety, copy processed_tlist instead of modifying in-place */
1565 6216 : root->processed_tlist =
1566 6216 : postprocess_setop_tlist(copyObject(root->processed_tlist),
1567 : parse->targetList);
1568 :
1569 : /* Also extract the PathTarget form of the setop result tlist */
1570 6216 : final_target = current_rel->cheapest_total_path->pathtarget;
1571 :
1572 : /* And check whether it's parallel safe */
1573 : final_target_parallel_safe =
1574 6216 : is_parallel_safe(root, (Node *) final_target->exprs);
1575 :
1576 : /* The setop result tlist couldn't contain any SRFs */
1577 : Assert(!parse->hasTargetSRFs);
1578 6216 : final_targets = final_targets_contain_srfs = NIL;
1579 :
1580 : /*
1581 : * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1582 : * checked already, but let's make sure).
1583 : */
1584 6216 : if (parse->rowMarks)
1585 0 : ereport(ERROR,
1586 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1587 : /*------
1588 : translator: %s is a SQL row locking clause such as FOR UPDATE */
1589 : errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1590 : LCS_asString(linitial_node(RowMarkClause,
1591 : parse->rowMarks)->strength))));
1592 :
1593 : /*
1594 : * Calculate pathkeys that represent result ordering requirements
1595 : */
1596 : Assert(parse->distinctClause == NIL);
1597 6216 : root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1598 : parse->sortClause,
1599 : root->processed_tlist);
1600 : }
1601 : else
1602 : {
1603 : /* No set operations, do regular planning */
1604 : PathTarget *sort_input_target;
1605 : List *sort_input_targets;
1606 : List *sort_input_targets_contain_srfs;
1607 : bool sort_input_target_parallel_safe;
1608 : PathTarget *grouping_target;
1609 : List *grouping_targets;
1610 : List *grouping_targets_contain_srfs;
1611 : bool grouping_target_parallel_safe;
1612 : PathTarget *scanjoin_target;
1613 : List *scanjoin_targets;
1614 : List *scanjoin_targets_contain_srfs;
1615 : bool scanjoin_target_parallel_safe;
1616 : bool scanjoin_target_same_exprs;
1617 : bool have_grouping;
1618 543950 : WindowFuncLists *wflists = NULL;
1619 543950 : List *activeWindows = NIL;
1620 543950 : grouping_sets_data *gset_data = NULL;
1621 : standard_qp_extra qp_extra;
1622 :
1623 : /* A recursive query should always have setOperations */
1624 : Assert(!root->hasRecursion);
1625 :
1626 : /* Preprocess grouping sets and GROUP BY clause, if any */
1627 543950 : if (parse->groupingSets)
1628 : {
1629 1020 : gset_data = preprocess_grouping_sets(root);
1630 : }
1631 542930 : else if (parse->groupClause)
1632 : {
1633 : /* Preprocess regular GROUP BY clause, if any */
1634 4132 : root->processed_groupClause = preprocess_groupclause(root, NIL);
1635 : }
1636 :
1637 : /*
1638 : * Preprocess targetlist. Note that much of the remaining planning
1639 : * work will be done with the PathTarget representation of tlists, but
1640 : * we must also maintain the full representation of the final tlist so
1641 : * that we can transfer its decoration (resnames etc) to the topmost
1642 : * tlist of the finished Plan. This is kept in processed_tlist.
1643 : */
1644 543944 : preprocess_targetlist(root);
1645 :
1646 : /*
1647 : * Mark all the aggregates with resolved aggtranstypes, and detect
1648 : * aggregates that are duplicates or can share transition state. We
1649 : * must do this before slicing and dicing the tlist into various
1650 : * pathtargets, else some copies of the Aggref nodes might escape
1651 : * being marked.
1652 : */
1653 543944 : if (parse->hasAggs)
1654 : {
1655 45208 : preprocess_aggrefs(root, (Node *) root->processed_tlist);
1656 45208 : preprocess_aggrefs(root, (Node *) parse->havingQual);
1657 : }
1658 :
1659 : /*
1660 : * Locate any window functions in the tlist. (We don't need to look
1661 : * anywhere else, since expressions used in ORDER BY will be in there
1662 : * too.) Note that they could all have been eliminated by constant
1663 : * folding, in which case we don't need to do any more work.
1664 : */
1665 543944 : if (parse->hasWindowFuncs)
1666 : {
1667 2582 : wflists = find_window_functions((Node *) root->processed_tlist,
1668 2582 : list_length(parse->windowClause));
1669 2582 : if (wflists->numWindowFuncs > 0)
1670 : {
1671 : /*
1672 : * See if any modifications can be made to each WindowClause
1673 : * to allow the executor to execute the WindowFuncs more
1674 : * quickly.
1675 : */
1676 2576 : optimize_window_clauses(root, wflists);
1677 :
1678 : /* Extract the list of windows actually in use. */
1679 2576 : activeWindows = select_active_windows(root, wflists);
1680 :
1681 : /* Make sure they all have names, for EXPLAIN's use. */
1682 2576 : name_active_windows(activeWindows);
1683 : }
1684 : else
1685 6 : parse->hasWindowFuncs = false;
1686 : }
1687 :
1688 : /*
1689 : * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1690 : * adding logic between here and the query_planner() call. Anything
1691 : * that is needed in MIN/MAX-optimizable cases will have to be
1692 : * duplicated in planagg.c.
1693 : */
1694 543944 : if (parse->hasAggs)
1695 45208 : preprocess_minmax_aggregates(root);
1696 :
1697 : /*
1698 : * Figure out whether there's a hard limit on the number of rows that
1699 : * query_planner's result subplan needs to return. Even if we know a
1700 : * hard limit overall, it doesn't apply if the query has any
1701 : * grouping/aggregation operations, or SRFs in the tlist.
1702 : */
1703 543944 : if (parse->groupClause ||
1704 538876 : parse->groupingSets ||
1705 538798 : parse->distinctClause ||
1706 535818 : parse->hasAggs ||
1707 495048 : parse->hasWindowFuncs ||
1708 492616 : parse->hasTargetSRFs ||
1709 481046 : root->hasHavingQual)
1710 62922 : root->limit_tuples = -1.0;
1711 : else
1712 481022 : root->limit_tuples = limit_tuples;
1713 :
1714 : /* Set up data needed by standard_qp_callback */
1715 543944 : qp_extra.activeWindows = activeWindows;
1716 543944 : qp_extra.gset_data = gset_data;
1717 :
1718 : /*
1719 : * If we're a subquery for a set operation, store the SetOperationStmt
1720 : * in qp_extra.
1721 : */
1722 543944 : qp_extra.setop = setops;
1723 :
1724 : /*
1725 : * Generate the best unsorted and presorted paths for the scan/join
1726 : * portion of this Query, ie the processing represented by the
1727 : * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1728 : * We also generate (in standard_qp_callback) pathkey representations
1729 : * of the query's sort clause, distinct clause, etc.
1730 : */
1731 543944 : current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1732 :
1733 : /*
1734 : * Convert the query's result tlist into PathTarget format.
1735 : *
1736 : * Note: this cannot be done before query_planner() has performed
1737 : * appendrel expansion, because that might add resjunk entries to
1738 : * root->processed_tlist. Waiting till afterwards is also helpful
1739 : * because the target width estimates can use per-Var width numbers
1740 : * that were obtained within query_planner().
1741 : */
1742 543890 : final_target = create_pathtarget(root, root->processed_tlist);
1743 : final_target_parallel_safe =
1744 543890 : is_parallel_safe(root, (Node *) final_target->exprs);
1745 :
1746 : /*
1747 : * If ORDER BY was given, consider whether we should use a post-sort
1748 : * projection, and compute the adjusted target for preceding steps if
1749 : * so.
1750 : */
1751 543890 : if (parse->sortClause)
1752 : {
1753 72048 : sort_input_target = make_sort_input_target(root,
1754 : final_target,
1755 : &have_postponed_srfs);
1756 : sort_input_target_parallel_safe =
1757 72048 : is_parallel_safe(root, (Node *) sort_input_target->exprs);
1758 : }
1759 : else
1760 : {
1761 471842 : sort_input_target = final_target;
1762 471842 : sort_input_target_parallel_safe = final_target_parallel_safe;
1763 : }
1764 :
1765 : /*
1766 : * If we have window functions to deal with, the output from any
1767 : * grouping step needs to be what the window functions want;
1768 : * otherwise, it should be sort_input_target.
1769 : */
1770 543890 : if (activeWindows)
1771 : {
1772 2576 : grouping_target = make_window_input_target(root,
1773 : final_target,
1774 : activeWindows);
1775 : grouping_target_parallel_safe =
1776 2576 : is_parallel_safe(root, (Node *) grouping_target->exprs);
1777 : }
1778 : else
1779 : {
1780 541314 : grouping_target = sort_input_target;
1781 541314 : grouping_target_parallel_safe = sort_input_target_parallel_safe;
1782 : }
1783 :
1784 : /*
1785 : * If we have grouping or aggregation to do, the topmost scan/join
1786 : * plan node must emit what the grouping step wants; otherwise, it
1787 : * should emit grouping_target.
1788 : */
1789 538822 : have_grouping = (parse->groupClause || parse->groupingSets ||
1790 1082712 : parse->hasAggs || root->hasHavingQual);
1791 543890 : if (have_grouping)
1792 : {
1793 45978 : scanjoin_target = make_group_input_target(root, final_target);
1794 : scanjoin_target_parallel_safe =
1795 45978 : is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1796 : }
1797 : else
1798 : {
1799 497912 : scanjoin_target = grouping_target;
1800 497912 : scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1801 : }
1802 :
1803 : /*
1804 : * If there are any SRFs in the targetlist, we must separate each of
1805 : * these PathTargets into SRF-computing and SRF-free targets. Replace
1806 : * each of the named targets with a SRF-free version, and remember the
1807 : * list of additional projection steps we need to add afterwards.
1808 : */
1809 543890 : if (parse->hasTargetSRFs)
1810 : {
1811 : /* final_target doesn't recompute any SRFs in sort_input_target */
1812 12088 : split_pathtarget_at_srfs(root, final_target, sort_input_target,
1813 : &final_targets,
1814 : &final_targets_contain_srfs);
1815 12088 : final_target = linitial_node(PathTarget, final_targets);
1816 : Assert(!linitial_int(final_targets_contain_srfs));
1817 : /* likewise for sort_input_target vs. grouping_target */
1818 12088 : split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1819 : &sort_input_targets,
1820 : &sort_input_targets_contain_srfs);
1821 12088 : sort_input_target = linitial_node(PathTarget, sort_input_targets);
1822 : Assert(!linitial_int(sort_input_targets_contain_srfs));
1823 : /* likewise for grouping_target vs. scanjoin_target */
1824 12088 : split_pathtarget_at_srfs_grouping(root,
1825 : grouping_target, scanjoin_target,
1826 : &grouping_targets,
1827 : &grouping_targets_contain_srfs);
1828 12088 : grouping_target = linitial_node(PathTarget, grouping_targets);
1829 : Assert(!linitial_int(grouping_targets_contain_srfs));
1830 : /* scanjoin_target will not have any SRFs precomputed for it */
1831 12088 : split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1832 : &scanjoin_targets,
1833 : &scanjoin_targets_contain_srfs);
1834 12088 : scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1835 : Assert(!linitial_int(scanjoin_targets_contain_srfs));
1836 : }
1837 : else
1838 : {
1839 : /* initialize lists; for most of these, dummy values are OK */
1840 531802 : final_targets = final_targets_contain_srfs = NIL;
1841 531802 : sort_input_targets = sort_input_targets_contain_srfs = NIL;
1842 531802 : grouping_targets = grouping_targets_contain_srfs = NIL;
1843 531802 : scanjoin_targets = list_make1(scanjoin_target);
1844 531802 : scanjoin_targets_contain_srfs = NIL;
1845 : }
1846 :
1847 : /* Apply scan/join target. */
1848 543890 : scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1849 543890 : && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1850 543890 : apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1851 : scanjoin_targets_contain_srfs,
1852 : scanjoin_target_parallel_safe,
1853 : scanjoin_target_same_exprs);
1854 :
1855 : /*
1856 : * Save the various upper-rel PathTargets we just computed into
1857 : * root->upper_targets[]. The core code doesn't use this, but it
1858 : * provides a convenient place for extensions to get at the info. For
1859 : * consistency, we save all the intermediate targets, even though some
1860 : * of the corresponding upperrels might not be needed for this query.
1861 : */
1862 543890 : root->upper_targets[UPPERREL_FINAL] = final_target;
1863 543890 : root->upper_targets[UPPERREL_ORDERED] = final_target;
1864 543890 : root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1865 543890 : root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1866 543890 : root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1867 543890 : root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1868 :
1869 : /*
1870 : * If we have grouping and/or aggregation, consider ways to implement
1871 : * that. We build a new upperrel representing the output of this
1872 : * phase.
1873 : */
1874 543890 : if (have_grouping)
1875 : {
1876 45978 : current_rel = create_grouping_paths(root,
1877 : current_rel,
1878 : grouping_target,
1879 : grouping_target_parallel_safe,
1880 : gset_data);
1881 : /* Fix things up if grouping_target contains SRFs */
1882 45972 : if (parse->hasTargetSRFs)
1883 476 : adjust_paths_for_srfs(root, current_rel,
1884 : grouping_targets,
1885 : grouping_targets_contain_srfs);
1886 : }
1887 :
1888 : /*
1889 : * If we have window functions, consider ways to implement those. We
1890 : * build a new upperrel representing the output of this phase.
1891 : */
1892 543884 : if (activeWindows)
1893 : {
1894 2576 : current_rel = create_window_paths(root,
1895 : current_rel,
1896 : grouping_target,
1897 : sort_input_target,
1898 : sort_input_target_parallel_safe,
1899 : wflists,
1900 : activeWindows);
1901 : /* Fix things up if sort_input_target contains SRFs */
1902 2576 : if (parse->hasTargetSRFs)
1903 12 : adjust_paths_for_srfs(root, current_rel,
1904 : sort_input_targets,
1905 : sort_input_targets_contain_srfs);
1906 : }
1907 :
1908 : /*
1909 : * If there is a DISTINCT clause, consider ways to implement that. We
1910 : * build a new upperrel representing the output of this phase.
1911 : */
1912 543884 : if (parse->distinctClause)
1913 : {
1914 3014 : current_rel = create_distinct_paths(root,
1915 : current_rel,
1916 : sort_input_target);
1917 : }
1918 : } /* end of if (setOperations) */
1919 :
1920 : /*
1921 : * If ORDER BY was given, consider ways to implement that, and generate a
1922 : * new upperrel containing only paths that emit the correct ordering and
1923 : * project the correct final_target. We can apply the original
1924 : * limit_tuples limit in sort costing here, but only if there are no
1925 : * postponed SRFs.
1926 : */
1927 550100 : if (parse->sortClause)
1928 : {
1929 76020 : current_rel = create_ordered_paths(root,
1930 : current_rel,
1931 : final_target,
1932 : final_target_parallel_safe,
1933 : have_postponed_srfs ? -1.0 :
1934 : limit_tuples);
1935 : /* Fix things up if final_target contains SRFs */
1936 76020 : if (parse->hasTargetSRFs)
1937 220 : adjust_paths_for_srfs(root, current_rel,
1938 : final_targets,
1939 : final_targets_contain_srfs);
1940 : }
1941 :
1942 : /*
1943 : * Now we are prepared to build the final-output upperrel.
1944 : */
1945 550100 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1946 :
1947 : /*
1948 : * If the input rel is marked consider_parallel and there's nothing that's
1949 : * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1950 : * consider_parallel as well. Note that if the query has rowMarks or is
1951 : * not a SELECT, consider_parallel will be false for every relation in the
1952 : * query.
1953 : */
1954 730372 : if (current_rel->consider_parallel &&
1955 360520 : is_parallel_safe(root, parse->limitOffset) &&
1956 180248 : is_parallel_safe(root, parse->limitCount))
1957 180242 : final_rel->consider_parallel = true;
1958 :
1959 : /*
1960 : * If the current_rel belongs to a single FDW, so does the final_rel.
1961 : */
1962 550100 : final_rel->serverid = current_rel->serverid;
1963 550100 : final_rel->userid = current_rel->userid;
1964 550100 : final_rel->useridiscurrent = current_rel->useridiscurrent;
1965 550100 : final_rel->fdwroutine = current_rel->fdwroutine;
1966 :
1967 : /*
1968 : * Generate paths for the final_rel. Insert all surviving paths, with
1969 : * LockRows, Limit, and/or ModifyTable steps added if needed.
1970 : */
1971 1121462 : foreach(lc, current_rel->pathlist)
1972 : {
1973 571362 : Path *path = (Path *) lfirst(lc);
1974 :
1975 : /*
1976 : * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1977 : * (Note: we intentionally test parse->rowMarks not root->rowMarks
1978 : * here. If there are only non-locking rowmarks, they should be
1979 : * handled by the ModifyTable node instead. However, root->rowMarks
1980 : * is what goes into the LockRows node.)
1981 : */
1982 571362 : if (parse->rowMarks)
1983 : {
1984 14086 : path = (Path *) create_lockrows_path(root, final_rel, path,
1985 : root->rowMarks,
1986 : assign_special_exec_param(root));
1987 : }
1988 :
1989 : /*
1990 : * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1991 : */
1992 571362 : if (limit_needed(parse))
1993 : {
1994 6166 : path = (Path *) create_limit_path(root, final_rel, path,
1995 : parse->limitOffset,
1996 : parse->limitCount,
1997 : parse->limitOption,
1998 : offset_est, count_est);
1999 : }
2000 :
2001 : /*
2002 : * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
2003 : */
2004 571362 : if (parse->commandType != CMD_SELECT)
2005 : {
2006 : Index rootRelation;
2007 95458 : List *resultRelations = NIL;
2008 95458 : List *updateColnosLists = NIL;
2009 95458 : List *withCheckOptionLists = NIL;
2010 95458 : List *returningLists = NIL;
2011 95458 : List *mergeActionLists = NIL;
2012 95458 : List *mergeJoinConditions = NIL;
2013 : List *rowMarks;
2014 :
2015 95458 : if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
2016 : {
2017 : /* Inherited UPDATE/DELETE/MERGE */
2018 2916 : RelOptInfo *top_result_rel = find_base_rel(root,
2019 : parse->resultRelation);
2020 2916 : int resultRelation = -1;
2021 :
2022 : /* Pass the root result rel forward to the executor. */
2023 2916 : rootRelation = parse->resultRelation;
2024 :
2025 : /* Add only leaf children to ModifyTable. */
2026 8474 : while ((resultRelation = bms_next_member(root->leaf_result_relids,
2027 8474 : resultRelation)) >= 0)
2028 : {
2029 5558 : RelOptInfo *this_result_rel = find_base_rel(root,
2030 : resultRelation);
2031 :
2032 : /*
2033 : * Also exclude any leaf rels that have turned dummy since
2034 : * being added to the list, for example, by being excluded
2035 : * by constraint exclusion.
2036 : */
2037 5558 : if (IS_DUMMY_REL(this_result_rel))
2038 180 : continue;
2039 :
2040 : /* Build per-target-rel lists needed by ModifyTable */
2041 5378 : resultRelations = lappend_int(resultRelations,
2042 : resultRelation);
2043 5378 : if (parse->commandType == CMD_UPDATE)
2044 : {
2045 3696 : List *update_colnos = root->update_colnos;
2046 :
2047 3696 : if (this_result_rel != top_result_rel)
2048 : update_colnos =
2049 3696 : adjust_inherited_attnums_multilevel(root,
2050 : update_colnos,
2051 : this_result_rel->relid,
2052 : top_result_rel->relid);
2053 3696 : updateColnosLists = lappend(updateColnosLists,
2054 : update_colnos);
2055 : }
2056 5378 : if (parse->withCheckOptions)
2057 : {
2058 504 : List *withCheckOptions = parse->withCheckOptions;
2059 :
2060 504 : if (this_result_rel != top_result_rel)
2061 : withCheckOptions = (List *)
2062 504 : adjust_appendrel_attrs_multilevel(root,
2063 : (Node *) withCheckOptions,
2064 : this_result_rel,
2065 : top_result_rel);
2066 504 : withCheckOptionLists = lappend(withCheckOptionLists,
2067 : withCheckOptions);
2068 : }
2069 5378 : if (parse->returningList)
2070 : {
2071 846 : List *returningList = parse->returningList;
2072 :
2073 846 : if (this_result_rel != top_result_rel)
2074 : returningList = (List *)
2075 846 : adjust_appendrel_attrs_multilevel(root,
2076 : (Node *) returningList,
2077 : this_result_rel,
2078 : top_result_rel);
2079 846 : returningLists = lappend(returningLists,
2080 : returningList);
2081 : }
2082 5378 : if (parse->mergeActionList)
2083 : {
2084 : ListCell *l;
2085 542 : List *mergeActionList = NIL;
2086 :
2087 : /*
2088 : * Copy MergeActions and translate stuff that
2089 : * references attribute numbers.
2090 : */
2091 1692 : foreach(l, parse->mergeActionList)
2092 : {
2093 1150 : MergeAction *action = lfirst(l),
2094 1150 : *leaf_action = copyObject(action);
2095 :
2096 1150 : leaf_action->qual =
2097 1150 : adjust_appendrel_attrs_multilevel(root,
2098 : (Node *) action->qual,
2099 : this_result_rel,
2100 : top_result_rel);
2101 1150 : leaf_action->targetList = (List *)
2102 1150 : adjust_appendrel_attrs_multilevel(root,
2103 1150 : (Node *) action->targetList,
2104 : this_result_rel,
2105 : top_result_rel);
2106 1150 : if (leaf_action->commandType == CMD_UPDATE)
2107 642 : leaf_action->updateColnos =
2108 642 : adjust_inherited_attnums_multilevel(root,
2109 : action->updateColnos,
2110 : this_result_rel->relid,
2111 : top_result_rel->relid);
2112 1150 : mergeActionList = lappend(mergeActionList,
2113 : leaf_action);
2114 : }
2115 :
2116 542 : mergeActionLists = lappend(mergeActionLists,
2117 : mergeActionList);
2118 : }
2119 5378 : if (parse->commandType == CMD_MERGE)
2120 : {
2121 542 : Node *mergeJoinCondition = parse->mergeJoinCondition;
2122 :
2123 542 : if (this_result_rel != top_result_rel)
2124 : mergeJoinCondition =
2125 542 : adjust_appendrel_attrs_multilevel(root,
2126 : mergeJoinCondition,
2127 : this_result_rel,
2128 : top_result_rel);
2129 542 : mergeJoinConditions = lappend(mergeJoinConditions,
2130 : mergeJoinCondition);
2131 : }
2132 : }
2133 :
2134 2916 : if (resultRelations == NIL)
2135 : {
2136 : /*
2137 : * We managed to exclude every child rel, so generate a
2138 : * dummy one-relation plan using info for the top target
2139 : * rel (even though that may not be a leaf target).
2140 : * Although it's clear that no data will be updated or
2141 : * deleted, we still need to have a ModifyTable node so
2142 : * that any statement triggers will be executed. (This
2143 : * could be cleaner if we fixed nodeModifyTable.c to allow
2144 : * zero target relations, but that probably wouldn't be a
2145 : * net win.)
2146 : */
2147 36 : resultRelations = list_make1_int(parse->resultRelation);
2148 36 : if (parse->commandType == CMD_UPDATE)
2149 32 : updateColnosLists = list_make1(root->update_colnos);
2150 36 : if (parse->withCheckOptions)
2151 0 : withCheckOptionLists = list_make1(parse->withCheckOptions);
2152 36 : if (parse->returningList)
2153 18 : returningLists = list_make1(parse->returningList);
2154 36 : if (parse->mergeActionList)
2155 2 : mergeActionLists = list_make1(parse->mergeActionList);
2156 36 : if (parse->commandType == CMD_MERGE)
2157 2 : mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2158 : }
2159 : }
2160 : else
2161 : {
2162 : /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2163 92542 : rootRelation = 0; /* there's no separate root rel */
2164 92542 : resultRelations = list_make1_int(parse->resultRelation);
2165 92542 : if (parse->commandType == CMD_UPDATE)
2166 12688 : updateColnosLists = list_make1(root->update_colnos);
2167 92542 : if (parse->withCheckOptions)
2168 1040 : withCheckOptionLists = list_make1(parse->withCheckOptions);
2169 92542 : if (parse->returningList)
2170 2562 : returningLists = list_make1(parse->returningList);
2171 92542 : if (parse->mergeActionList)
2172 1602 : mergeActionLists = list_make1(parse->mergeActionList);
2173 92542 : if (parse->commandType == CMD_MERGE)
2174 1602 : mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2175 : }
2176 :
2177 : /*
2178 : * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2179 : * will have dealt with fetching non-locked marked rows, else we
2180 : * need to have ModifyTable do that.
2181 : */
2182 95458 : if (parse->rowMarks)
2183 0 : rowMarks = NIL;
2184 : else
2185 95458 : rowMarks = root->rowMarks;
2186 :
2187 : path = (Path *)
2188 95458 : create_modifytable_path(root, final_rel,
2189 : path,
2190 : parse->commandType,
2191 95458 : parse->canSetTag,
2192 95458 : parse->resultRelation,
2193 : rootRelation,
2194 : resultRelations,
2195 : updateColnosLists,
2196 : withCheckOptionLists,
2197 : returningLists,
2198 : rowMarks,
2199 : parse->onConflict,
2200 : mergeActionLists,
2201 : mergeJoinConditions,
2202 : assign_special_exec_param(root));
2203 : }
2204 :
2205 : /* And shove it into final_rel */
2206 571362 : add_path(final_rel, path);
2207 : }
2208 :
2209 : /*
2210 : * Generate partial paths for final_rel, too, if outer query levels might
2211 : * be able to make use of them.
2212 : */
2213 550100 : if (final_rel->consider_parallel && root->query_level > 1 &&
2214 31044 : !limit_needed(parse))
2215 : {
2216 : Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2217 30978 : foreach(lc, current_rel->partial_pathlist)
2218 : {
2219 120 : Path *partial_path = (Path *) lfirst(lc);
2220 :
2221 120 : add_partial_path(final_rel, partial_path);
2222 : }
2223 : }
2224 :
2225 550100 : extra.limit_needed = limit_needed(parse);
2226 550100 : extra.limit_tuples = limit_tuples;
2227 550100 : extra.count_est = count_est;
2228 550100 : extra.offset_est = offset_est;
2229 :
2230 : /*
2231 : * If there is an FDW that's responsible for all baserels of the query,
2232 : * let it consider adding ForeignPaths.
2233 : */
2234 550100 : if (final_rel->fdwroutine &&
2235 1264 : final_rel->fdwroutine->GetForeignUpperPaths)
2236 1192 : final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2237 : current_rel, final_rel,
2238 : &extra);
2239 :
2240 : /* Let extensions possibly add some more paths */
2241 550100 : if (create_upper_paths_hook)
2242 0 : (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2243 : current_rel, final_rel, &extra);
2244 :
2245 : /* Note: currently, we leave it to callers to do set_cheapest() */
2246 550100 : }
2247 :
2248 : /*
2249 : * Do preprocessing for groupingSets clause and related data.
2250 : *
2251 : * We expect that parse->groupingSets has already been expanded into a flat
2252 : * list of grouping sets (that is, just integer Lists of ressortgroupref
2253 : * numbers) by expand_grouping_sets(). This function handles the preliminary
2254 : * steps of organizing the grouping sets into lists of rollups, and preparing
2255 : * annotations which will later be filled in with size estimates.
2256 : */
2257 : static grouping_sets_data *
2258 1020 : preprocess_grouping_sets(PlannerInfo *root)
2259 : {
2260 1020 : Query *parse = root->parse;
2261 : List *sets;
2262 1020 : int maxref = 0;
2263 : ListCell *lc_set;
2264 1020 : grouping_sets_data *gd = palloc0_object(grouping_sets_data);
2265 :
2266 : /*
2267 : * We don't currently make any attempt to optimize the groupClause when
2268 : * there are grouping sets, so just duplicate it in processed_groupClause.
2269 : */
2270 1020 : root->processed_groupClause = parse->groupClause;
2271 :
2272 : /* Detect unhashable and unsortable grouping expressions */
2273 1020 : gd->any_hashable = false;
2274 1020 : gd->unhashable_refs = NULL;
2275 1020 : gd->unsortable_refs = NULL;
2276 1020 : gd->unsortable_sets = NIL;
2277 :
2278 1020 : if (parse->groupClause)
2279 : {
2280 : ListCell *lc;
2281 :
2282 2980 : foreach(lc, parse->groupClause)
2283 : {
2284 2038 : SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2285 2038 : Index ref = gc->tleSortGroupRef;
2286 :
2287 2038 : if (ref > maxref)
2288 1990 : maxref = ref;
2289 :
2290 2038 : if (!gc->hashable)
2291 30 : gd->unhashable_refs = bms_add_member(gd->unhashable_refs, ref);
2292 :
2293 2038 : if (!OidIsValid(gc->sortop))
2294 42 : gd->unsortable_refs = bms_add_member(gd->unsortable_refs, ref);
2295 : }
2296 : }
2297 :
2298 : /* Allocate workspace array for remapping */
2299 1020 : gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2300 :
2301 : /*
2302 : * If we have any unsortable sets, we must extract them before trying to
2303 : * prepare rollups. Unsortable sets don't go through
2304 : * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2305 : * here.
2306 : */
2307 1020 : if (!bms_is_empty(gd->unsortable_refs))
2308 : {
2309 42 : List *sortable_sets = NIL;
2310 : ListCell *lc;
2311 :
2312 126 : foreach(lc, parse->groupingSets)
2313 : {
2314 90 : List *gset = (List *) lfirst(lc);
2315 :
2316 90 : if (bms_overlap_list(gd->unsortable_refs, gset))
2317 : {
2318 48 : GroupingSetData *gs = makeNode(GroupingSetData);
2319 :
2320 48 : gs->set = gset;
2321 48 : gd->unsortable_sets = lappend(gd->unsortable_sets, gs);
2322 :
2323 : /*
2324 : * We must enforce here that an unsortable set is hashable;
2325 : * later code assumes this. Parse analysis only checks that
2326 : * every individual column is either hashable or sortable.
2327 : *
2328 : * Note that passing this test doesn't guarantee we can
2329 : * generate a plan; there might be other showstoppers.
2330 : */
2331 48 : if (bms_overlap_list(gd->unhashable_refs, gset))
2332 6 : ereport(ERROR,
2333 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2334 : errmsg("could not implement GROUP BY"),
2335 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2336 : }
2337 : else
2338 42 : sortable_sets = lappend(sortable_sets, gset);
2339 : }
2340 :
2341 36 : if (sortable_sets)
2342 30 : sets = extract_rollup_sets(sortable_sets);
2343 : else
2344 6 : sets = NIL;
2345 : }
2346 : else
2347 978 : sets = extract_rollup_sets(parse->groupingSets);
2348 :
2349 2654 : foreach(lc_set, sets)
2350 : {
2351 1640 : List *current_sets = (List *) lfirst(lc_set);
2352 1640 : RollupData *rollup = makeNode(RollupData);
2353 : GroupingSetData *gs;
2354 :
2355 : /*
2356 : * Reorder the current list of grouping sets into correct prefix
2357 : * order. If only one aggregation pass is needed, try to make the
2358 : * list match the ORDER BY clause; if more than one pass is needed, we
2359 : * don't bother with that.
2360 : *
2361 : * Note that this reorders the sets from smallest-member-first to
2362 : * largest-member-first, and applies the GroupingSetData annotations,
2363 : * though the data will be filled in later.
2364 : */
2365 1640 : current_sets = reorder_grouping_sets(current_sets,
2366 1640 : (list_length(sets) == 1
2367 : ? parse->sortClause
2368 : : NIL));
2369 :
2370 : /*
2371 : * Get the initial (and therefore largest) grouping set.
2372 : */
2373 1640 : gs = linitial_node(GroupingSetData, current_sets);
2374 :
2375 : /*
2376 : * Order the groupClause appropriately. If the first grouping set is
2377 : * empty, then the groupClause must also be empty; otherwise we have
2378 : * to force the groupClause to match that grouping set's order.
2379 : *
2380 : * (The first grouping set can be empty even though parse->groupClause
2381 : * is not empty only if all non-empty grouping sets are unsortable.
2382 : * The groupClauses for hashed grouping sets are built later on.)
2383 : */
2384 1640 : if (gs->set)
2385 1562 : rollup->groupClause = preprocess_groupclause(root, gs->set);
2386 : else
2387 78 : rollup->groupClause = NIL;
2388 :
2389 : /*
2390 : * Is it hashable? We pretend empty sets are hashable even though we
2391 : * actually force them not to be hashed later. But don't bother if
2392 : * there's nothing but empty sets (since in that case we can't hash
2393 : * anything).
2394 : */
2395 1640 : if (gs->set &&
2396 1562 : !bms_overlap_list(gd->unhashable_refs, gs->set))
2397 : {
2398 1538 : rollup->hashable = true;
2399 1538 : gd->any_hashable = true;
2400 : }
2401 :
2402 : /*
2403 : * Now that we've pinned down an order for the groupClause for this
2404 : * list of grouping sets, we need to remap the entries in the grouping
2405 : * sets from sortgrouprefs to plain indices (0-based) into the
2406 : * groupClause for this collection of grouping sets. We keep the
2407 : * original form for later use, though.
2408 : */
2409 1640 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2410 : current_sets,
2411 : gd->tleref_to_colnum_map);
2412 1640 : rollup->gsets_data = current_sets;
2413 :
2414 1640 : gd->rollups = lappend(gd->rollups, rollup);
2415 : }
2416 :
2417 1014 : if (gd->unsortable_sets)
2418 : {
2419 : /*
2420 : * We have not yet pinned down a groupclause for this, but we will
2421 : * need index-based lists for estimation purposes. Construct
2422 : * hash_sets_idx based on the entire original groupclause for now.
2423 : */
2424 36 : gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2425 : gd->unsortable_sets,
2426 : gd->tleref_to_colnum_map);
2427 36 : gd->any_hashable = true;
2428 : }
2429 :
2430 1014 : return gd;
2431 : }
2432 :
2433 : /*
2434 : * Given a groupclause and a list of GroupingSetData, return equivalent sets
2435 : * (without annotation) mapped to indexes into the given groupclause.
2436 : */
2437 : static List *
2438 4628 : remap_to_groupclause_idx(List *groupClause,
2439 : List *gsets,
2440 : int *tleref_to_colnum_map)
2441 : {
2442 4628 : int ref = 0;
2443 4628 : List *result = NIL;
2444 : ListCell *lc;
2445 :
2446 11132 : foreach(lc, groupClause)
2447 : {
2448 6504 : SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2449 :
2450 6504 : tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2451 : }
2452 :
2453 10640 : foreach(lc, gsets)
2454 : {
2455 6012 : List *set = NIL;
2456 : ListCell *lc2;
2457 6012 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
2458 :
2459 13378 : foreach(lc2, gs->set)
2460 : {
2461 7366 : set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2462 : }
2463 :
2464 6012 : result = lappend(result, set);
2465 : }
2466 :
2467 4628 : return result;
2468 : }
2469 :
2470 :
2471 : /*
2472 : * preprocess_rowmarks - set up PlanRowMarks if needed
2473 : */
2474 : static void
2475 554142 : preprocess_rowmarks(PlannerInfo *root)
2476 : {
2477 554142 : Query *parse = root->parse;
2478 : Bitmapset *rels;
2479 : List *prowmarks;
2480 : ListCell *l;
2481 : int i;
2482 :
2483 554142 : if (parse->rowMarks)
2484 : {
2485 : /*
2486 : * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2487 : * grouping, since grouping renders a reference to individual tuple
2488 : * CTIDs invalid. This is also checked at parse time, but that's
2489 : * insufficient because of rule substitution, query pullup, etc.
2490 : */
2491 13598 : CheckSelectLocking(parse, linitial_node(RowMarkClause,
2492 : parse->rowMarks)->strength);
2493 : }
2494 : else
2495 : {
2496 : /*
2497 : * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
2498 : * UPDATE/SHARE.
2499 : */
2500 540544 : if (parse->commandType != CMD_UPDATE &&
2501 525812 : parse->commandType != CMD_DELETE &&
2502 521428 : parse->commandType != CMD_MERGE)
2503 519574 : return;
2504 : }
2505 :
2506 : /*
2507 : * We need to have rowmarks for all base relations except the target. We
2508 : * make a bitmapset of all base rels and then remove the items we don't
2509 : * need or have FOR [KEY] UPDATE/SHARE marks for.
2510 : */
2511 34568 : rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
2512 34568 : if (parse->resultRelation)
2513 20970 : rels = bms_del_member(rels, parse->resultRelation);
2514 :
2515 : /*
2516 : * Convert RowMarkClauses to PlanRowMark representation.
2517 : */
2518 34568 : prowmarks = NIL;
2519 48396 : foreach(l, parse->rowMarks)
2520 : {
2521 13828 : RowMarkClause *rc = lfirst_node(RowMarkClause, l);
2522 13828 : RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2523 : PlanRowMark *newrc;
2524 :
2525 : /*
2526 : * Currently, it is syntactically impossible to have FOR UPDATE et al
2527 : * applied to an update/delete target rel. If that ever becomes
2528 : * possible, we should drop the target from the PlanRowMark list.
2529 : */
2530 : Assert(rc->rti != parse->resultRelation);
2531 :
2532 : /*
2533 : * Ignore RowMarkClauses for subqueries; they aren't real tables and
2534 : * can't support true locking. Subqueries that got flattened into the
2535 : * main query should be ignored completely. Any that didn't will get
2536 : * ROW_MARK_COPY items in the next loop.
2537 : */
2538 13828 : if (rte->rtekind != RTE_RELATION)
2539 60 : continue;
2540 :
2541 13768 : rels = bms_del_member(rels, rc->rti);
2542 :
2543 13768 : newrc = makeNode(PlanRowMark);
2544 13768 : newrc->rti = newrc->prti = rc->rti;
2545 13768 : newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2546 13768 : newrc->markType = select_rowmark_type(rte, rc->strength);
2547 13768 : newrc->allMarkTypes = (1 << newrc->markType);
2548 13768 : newrc->strength = rc->strength;
2549 13768 : newrc->waitPolicy = rc->waitPolicy;
2550 13768 : newrc->isParent = false;
2551 :
2552 13768 : prowmarks = lappend(prowmarks, newrc);
2553 : }
2554 :
2555 : /*
2556 : * Now, add rowmarks for any non-target, non-locked base relations.
2557 : */
2558 34568 : i = 0;
2559 79996 : foreach(l, parse->rtable)
2560 : {
2561 45428 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
2562 : PlanRowMark *newrc;
2563 :
2564 45428 : i++;
2565 45428 : if (!bms_is_member(i, rels))
2566 41658 : continue;
2567 :
2568 3770 : newrc = makeNode(PlanRowMark);
2569 3770 : newrc->rti = newrc->prti = i;
2570 3770 : newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2571 3770 : newrc->markType = select_rowmark_type(rte, LCS_NONE);
2572 3770 : newrc->allMarkTypes = (1 << newrc->markType);
2573 3770 : newrc->strength = LCS_NONE;
2574 3770 : newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2575 3770 : newrc->isParent = false;
2576 :
2577 3770 : prowmarks = lappend(prowmarks, newrc);
2578 : }
2579 :
2580 34568 : root->rowMarks = prowmarks;
2581 : }
2582 :
2583 : /*
2584 : * Select RowMarkType to use for a given table
2585 : */
2586 : RowMarkType
2587 19954 : select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
2588 : {
2589 19954 : if (rte->rtekind != RTE_RELATION)
2590 : {
2591 : /* If it's not a table at all, use ROW_MARK_COPY */
2592 1438 : return ROW_MARK_COPY;
2593 : }
2594 18516 : else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2595 : {
2596 : /* Let the FDW select the rowmark type, if it wants to */
2597 228 : FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2598 :
2599 228 : if (fdwroutine->GetForeignRowMarkType != NULL)
2600 0 : return fdwroutine->GetForeignRowMarkType(rte, strength);
2601 : /* Otherwise, use ROW_MARK_COPY by default */
2602 228 : return ROW_MARK_COPY;
2603 : }
2604 : else
2605 : {
2606 : /* Regular table, apply the appropriate lock type */
2607 18288 : switch (strength)
2608 : {
2609 2546 : case LCS_NONE:
2610 :
2611 : /*
2612 : * We don't need a tuple lock, only the ability to re-fetch
2613 : * the row.
2614 : */
2615 2546 : return ROW_MARK_REFERENCE;
2616 : break;
2617 13810 : case LCS_FORKEYSHARE:
2618 13810 : return ROW_MARK_KEYSHARE;
2619 : break;
2620 306 : case LCS_FORSHARE:
2621 306 : return ROW_MARK_SHARE;
2622 : break;
2623 78 : case LCS_FORNOKEYUPDATE:
2624 78 : return ROW_MARK_NOKEYEXCLUSIVE;
2625 : break;
2626 1548 : case LCS_FORUPDATE:
2627 1548 : return ROW_MARK_EXCLUSIVE;
2628 : break;
2629 : }
2630 0 : elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2631 : return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2632 : }
2633 : }
2634 :
2635 : /*
2636 : * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
2637 : *
2638 : * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
2639 : * results back in *count_est and *offset_est. These variables are set to
2640 : * 0 if the corresponding clause is not present, and -1 if it's present
2641 : * but we couldn't estimate the value for it. (The "0" convention is OK
2642 : * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
2643 : * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
2644 : * usual practice of never estimating less than one row.) These values will
2645 : * be passed to create_limit_path, which see if you change this code.
2646 : *
2647 : * The return value is the suitably adjusted tuple_fraction to use for
2648 : * planning the query. This adjustment is not overridable, since it reflects
2649 : * plan actions that grouping_planner() will certainly take, not assumptions
2650 : * about context.
2651 : */
2652 : static double
2653 5144 : preprocess_limit(PlannerInfo *root, double tuple_fraction,
2654 : int64 *offset_est, int64 *count_est)
2655 : {
2656 5144 : Query *parse = root->parse;
2657 : Node *est;
2658 : double limit_fraction;
2659 :
2660 : /* Should not be called unless LIMIT or OFFSET */
2661 : Assert(parse->limitCount || parse->limitOffset);
2662 :
2663 : /*
2664 : * Try to obtain the clause values. We use estimate_expression_value
2665 : * primarily because it can sometimes do something useful with Params.
2666 : */
2667 5144 : if (parse->limitCount)
2668 : {
2669 4616 : est = estimate_expression_value(root, parse->limitCount);
2670 4616 : if (est && IsA(est, Const))
2671 : {
2672 4610 : if (((Const *) est)->constisnull)
2673 : {
2674 : /* NULL indicates LIMIT ALL, ie, no limit */
2675 0 : *count_est = 0; /* treat as not present */
2676 : }
2677 : else
2678 : {
2679 4610 : *count_est = DatumGetInt64(((Const *) est)->constvalue);
2680 4610 : if (*count_est <= 0)
2681 150 : *count_est = 1; /* force to at least 1 */
2682 : }
2683 : }
2684 : else
2685 6 : *count_est = -1; /* can't estimate */
2686 : }
2687 : else
2688 528 : *count_est = 0; /* not present */
2689 :
2690 5144 : if (parse->limitOffset)
2691 : {
2692 900 : est = estimate_expression_value(root, parse->limitOffset);
2693 900 : if (est && IsA(est, Const))
2694 : {
2695 876 : if (((Const *) est)->constisnull)
2696 : {
2697 : /* Treat NULL as no offset; the executor will too */
2698 0 : *offset_est = 0; /* treat as not present */
2699 : }
2700 : else
2701 : {
2702 876 : *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2703 876 : if (*offset_est < 0)
2704 0 : *offset_est = 0; /* treat as not present */
2705 : }
2706 : }
2707 : else
2708 24 : *offset_est = -1; /* can't estimate */
2709 : }
2710 : else
2711 4244 : *offset_est = 0; /* not present */
2712 :
2713 5144 : if (*count_est != 0)
2714 : {
2715 : /*
2716 : * A LIMIT clause limits the absolute number of tuples returned.
2717 : * However, if it's not a constant LIMIT then we have to guess; for
2718 : * lack of a better idea, assume 10% of the plan's result is wanted.
2719 : */
2720 4616 : if (*count_est < 0 || *offset_est < 0)
2721 : {
2722 : /* LIMIT or OFFSET is an expression ... punt ... */
2723 24 : limit_fraction = 0.10;
2724 : }
2725 : else
2726 : {
2727 : /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2728 4592 : limit_fraction = (double) *count_est + (double) *offset_est;
2729 : }
2730 :
2731 : /*
2732 : * If we have absolute limits from both caller and LIMIT, use the
2733 : * smaller value; likewise if they are both fractional. If one is
2734 : * fractional and the other absolute, we can't easily determine which
2735 : * is smaller, but we use the heuristic that the absolute will usually
2736 : * be smaller.
2737 : */
2738 4616 : if (tuple_fraction >= 1.0)
2739 : {
2740 6 : if (limit_fraction >= 1.0)
2741 : {
2742 : /* both absolute */
2743 6 : tuple_fraction = Min(tuple_fraction, limit_fraction);
2744 : }
2745 : else
2746 : {
2747 : /* caller absolute, limit fractional; use caller's value */
2748 : }
2749 : }
2750 4610 : else if (tuple_fraction > 0.0)
2751 : {
2752 148 : if (limit_fraction >= 1.0)
2753 : {
2754 : /* caller fractional, limit absolute; use limit */
2755 148 : tuple_fraction = limit_fraction;
2756 : }
2757 : else
2758 : {
2759 : /* both fractional */
2760 0 : tuple_fraction = Min(tuple_fraction, limit_fraction);
2761 : }
2762 : }
2763 : else
2764 : {
2765 : /* no info from caller, just use limit */
2766 4462 : tuple_fraction = limit_fraction;
2767 : }
2768 : }
2769 528 : else if (*offset_est != 0 && tuple_fraction > 0.0)
2770 : {
2771 : /*
2772 : * We have an OFFSET but no LIMIT. This acts entirely differently
2773 : * from the LIMIT case: here, we need to increase rather than decrease
2774 : * the caller's tuple_fraction, because the OFFSET acts to cause more
2775 : * tuples to be fetched instead of fewer. This only matters if we got
2776 : * a tuple_fraction > 0, however.
2777 : *
2778 : * As above, use 10% if OFFSET is present but unestimatable.
2779 : */
2780 16 : if (*offset_est < 0)
2781 0 : limit_fraction = 0.10;
2782 : else
2783 16 : limit_fraction = (double) *offset_est;
2784 :
2785 : /*
2786 : * If we have absolute counts from both caller and OFFSET, add them
2787 : * together; likewise if they are both fractional. If one is
2788 : * fractional and the other absolute, we want to take the larger, and
2789 : * we heuristically assume that's the fractional one.
2790 : */
2791 16 : if (tuple_fraction >= 1.0)
2792 : {
2793 0 : if (limit_fraction >= 1.0)
2794 : {
2795 : /* both absolute, so add them together */
2796 0 : tuple_fraction += limit_fraction;
2797 : }
2798 : else
2799 : {
2800 : /* caller absolute, limit fractional; use limit */
2801 0 : tuple_fraction = limit_fraction;
2802 : }
2803 : }
2804 : else
2805 : {
2806 16 : if (limit_fraction >= 1.0)
2807 : {
2808 : /* caller fractional, limit absolute; use caller's value */
2809 : }
2810 : else
2811 : {
2812 : /* both fractional, so add them together */
2813 0 : tuple_fraction += limit_fraction;
2814 0 : if (tuple_fraction >= 1.0)
2815 0 : tuple_fraction = 0.0; /* assume fetch all */
2816 : }
2817 : }
2818 : }
2819 :
2820 5144 : return tuple_fraction;
2821 : }
2822 :
2823 : /*
2824 : * limit_needed - do we actually need a Limit plan node?
2825 : *
2826 : * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
2827 : * a Limit node. This is worth checking for because "OFFSET 0" is a common
2828 : * locution for an optimization fence. (Because other places in the planner
2829 : * merely check whether parse->limitOffset isn't NULL, it will still work as
2830 : * an optimization fence --- we're just suppressing unnecessary run-time
2831 : * overhead.)
2832 : *
2833 : * This might look like it could be merged into preprocess_limit, but there's
2834 : * a key distinction: here we need hard constants in OFFSET/LIMIT, whereas
2835 : * in preprocess_limit it's good enough to consider estimated values.
2836 : */
2837 : bool
2838 1172798 : limit_needed(Query *parse)
2839 : {
2840 : Node *node;
2841 :
2842 1172798 : node = parse->limitCount;
2843 1172798 : if (node)
2844 : {
2845 11098 : if (IsA(node, Const))
2846 : {
2847 : /* NULL indicates LIMIT ALL, ie, no limit */
2848 10862 : if (!((Const *) node)->constisnull)
2849 10862 : return true; /* LIMIT with a constant value */
2850 : }
2851 : else
2852 236 : return true; /* non-constant LIMIT */
2853 : }
2854 :
2855 1161700 : node = parse->limitOffset;
2856 1161700 : if (node)
2857 : {
2858 1534 : if (IsA(node, Const))
2859 : {
2860 : /* Treat NULL as no offset; the executor would too */
2861 1226 : if (!((Const *) node)->constisnull)
2862 : {
2863 1226 : int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2864 :
2865 1226 : if (offset != 0)
2866 146 : return true; /* OFFSET with a nonzero value */
2867 : }
2868 : }
2869 : else
2870 308 : return true; /* non-constant OFFSET */
2871 : }
2872 :
2873 1161246 : return false; /* don't need a Limit plan node */
2874 : }
2875 :
2876 : /*
2877 : * preprocess_groupclause - do preparatory work on GROUP BY clause
2878 : *
2879 : * The idea here is to adjust the ordering of the GROUP BY elements
2880 : * (which in itself is semantically insignificant) to match ORDER BY,
2881 : * thereby allowing a single sort operation to both implement the ORDER BY
2882 : * requirement and set up for a Unique step that implements GROUP BY.
2883 : * We also consider partial match between GROUP BY and ORDER BY elements,
2884 : * which could allow to implement ORDER BY using the incremental sort.
2885 : *
2886 : * We also consider other orderings of the GROUP BY elements, which could
2887 : * match the sort ordering of other possible plans (eg an indexscan) and
2888 : * thereby reduce cost. This is implemented during the generation of grouping
2889 : * paths. See get_useful_group_keys_orderings() for details.
2890 : *
2891 : * Note: we need no comparable processing of the distinctClause because
2892 : * the parser already enforced that that matches ORDER BY.
2893 : *
2894 : * Note: we return a fresh List, but its elements are the same
2895 : * SortGroupClauses appearing in parse->groupClause. This is important
2896 : * because later processing may modify the processed_groupClause list.
2897 : *
2898 : * For grouping sets, the order of items is instead forced to agree with that
2899 : * of the grouping set (and items not in the grouping set are skipped). The
2900 : * work of sorting the order of grouping set elements to match the ORDER BY if
2901 : * possible is done elsewhere.
2902 : */
2903 : static List *
2904 8646 : preprocess_groupclause(PlannerInfo *root, List *force)
2905 : {
2906 8646 : Query *parse = root->parse;
2907 8646 : List *new_groupclause = NIL;
2908 : ListCell *sl;
2909 : ListCell *gl;
2910 :
2911 : /* For grouping sets, we need to force the ordering */
2912 8646 : if (force)
2913 : {
2914 10940 : foreach(sl, force)
2915 : {
2916 6426 : Index ref = lfirst_int(sl);
2917 6426 : SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
2918 :
2919 6426 : new_groupclause = lappend(new_groupclause, cl);
2920 : }
2921 :
2922 4514 : return new_groupclause;
2923 : }
2924 :
2925 : /* If no ORDER BY, nothing useful to do here */
2926 4132 : if (parse->sortClause == NIL)
2927 2314 : return list_copy(parse->groupClause);
2928 :
2929 : /*
2930 : * Scan the ORDER BY clause and construct a list of matching GROUP BY
2931 : * items, but only as far as we can make a matching prefix.
2932 : *
2933 : * This code assumes that the sortClause contains no duplicate items.
2934 : */
2935 3542 : foreach(sl, parse->sortClause)
2936 : {
2937 2370 : SortGroupClause *sc = lfirst_node(SortGroupClause, sl);
2938 :
2939 3466 : foreach(gl, parse->groupClause)
2940 : {
2941 2820 : SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
2942 :
2943 2820 : if (equal(gc, sc))
2944 : {
2945 1724 : new_groupclause = lappend(new_groupclause, gc);
2946 1724 : break;
2947 : }
2948 : }
2949 2370 : if (gl == NULL)
2950 646 : break; /* no match, so stop scanning */
2951 : }
2952 :
2953 :
2954 : /* If no match at all, no point in reordering GROUP BY */
2955 1818 : if (new_groupclause == NIL)
2956 298 : return list_copy(parse->groupClause);
2957 :
2958 : /*
2959 : * Add any remaining GROUP BY items to the new list. We don't require a
2960 : * complete match, because even partial match allows ORDER BY to be
2961 : * implemented using incremental sort. Also, give up if there are any
2962 : * non-sortable GROUP BY items, since then there's no hope anyway.
2963 : */
2964 3410 : foreach(gl, parse->groupClause)
2965 : {
2966 1890 : SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
2967 :
2968 1890 : if (list_member_ptr(new_groupclause, gc))
2969 1724 : continue; /* it matched an ORDER BY item */
2970 166 : if (!OidIsValid(gc->sortop)) /* give up, GROUP BY can't be sorted */
2971 0 : return list_copy(parse->groupClause);
2972 166 : new_groupclause = lappend(new_groupclause, gc);
2973 : }
2974 :
2975 : /* Success --- install the rearranged GROUP BY list */
2976 : Assert(list_length(parse->groupClause) == list_length(new_groupclause));
2977 1520 : return new_groupclause;
2978 : }
2979 :
2980 : /*
2981 : * Extract lists of grouping sets that can be implemented using a single
2982 : * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
2983 : *
2984 : * Input must be sorted with smallest sets first. Result has each sublist
2985 : * sorted with smallest sets first.
2986 : *
2987 : * We want to produce the absolute minimum possible number of lists here to
2988 : * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
2989 : * of finding the minimal partition of a partially-ordered set into chains
2990 : * (which is what we need, taking the list of grouping sets as a poset ordered
2991 : * by set inclusion) can be mapped to the problem of finding the maximum
2992 : * cardinality matching on a bipartite graph, which is solvable in polynomial
2993 : * time with a worst case of no worse than O(n^2.5) and usually much
2994 : * better. Since our N is at most 4096, we don't need to consider fallbacks to
2995 : * heuristic or approximate methods. (Planning time for a 12-d cube is under
2996 : * half a second on my modest system even with optimization off and assertions
2997 : * on.)
2998 : */
2999 : static List *
3000 1008 : extract_rollup_sets(List *groupingSets)
3001 : {
3002 1008 : int num_sets_raw = list_length(groupingSets);
3003 1008 : int num_empty = 0;
3004 1008 : int num_sets = 0; /* distinct sets */
3005 1008 : int num_chains = 0;
3006 1008 : List *result = NIL;
3007 : List **results;
3008 : List **orig_sets;
3009 : Bitmapset **set_masks;
3010 : int *chains;
3011 : short **adjacency;
3012 : short *adjacency_buf;
3013 : BipartiteMatchState *state;
3014 : int i;
3015 : int j;
3016 : int j_size;
3017 1008 : ListCell *lc1 = list_head(groupingSets);
3018 : ListCell *lc;
3019 :
3020 : /*
3021 : * Start by stripping out empty sets. The algorithm doesn't require this,
3022 : * but the planner currently needs all empty sets to be returned in the
3023 : * first list, so we strip them here and add them back after.
3024 : */
3025 1712 : while (lc1 && lfirst(lc1) == NIL)
3026 : {
3027 704 : ++num_empty;
3028 704 : lc1 = lnext(groupingSets, lc1);
3029 : }
3030 :
3031 : /* bail out now if it turns out that all we had were empty sets. */
3032 1008 : if (!lc1)
3033 78 : return list_make1(groupingSets);
3034 :
3035 : /*----------
3036 : * We don't strictly need to remove duplicate sets here, but if we don't,
3037 : * they tend to become scattered through the result, which is a bit
3038 : * confusing (and irritating if we ever decide to optimize them out).
3039 : * So we remove them here and add them back after.
3040 : *
3041 : * For each non-duplicate set, we fill in the following:
3042 : *
3043 : * orig_sets[i] = list of the original set lists
3044 : * set_masks[i] = bitmapset for testing inclusion
3045 : * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
3046 : *
3047 : * chains[i] will be the result group this set is assigned to.
3048 : *
3049 : * We index all of these from 1 rather than 0 because it is convenient
3050 : * to leave 0 free for the NIL node in the graph algorithm.
3051 : *----------
3052 : */
3053 930 : orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
3054 930 : set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
3055 930 : adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
3056 930 : adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
3057 :
3058 930 : j_size = 0;
3059 930 : j = 0;
3060 930 : i = 1;
3061 :
3062 3244 : for_each_cell(lc, groupingSets, lc1)
3063 : {
3064 2314 : List *candidate = (List *) lfirst(lc);
3065 2314 : Bitmapset *candidate_set = NULL;
3066 : ListCell *lc2;
3067 2314 : int dup_of = 0;
3068 :
3069 5546 : foreach(lc2, candidate)
3070 : {
3071 3232 : candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
3072 : }
3073 :
3074 : /* we can only be a dup if we're the same length as a previous set */
3075 2314 : if (j_size == list_length(candidate))
3076 : {
3077 : int k;
3078 :
3079 2072 : for (k = j; k < i; ++k)
3080 : {
3081 1344 : if (bms_equal(set_masks[k], candidate_set))
3082 : {
3083 158 : dup_of = k;
3084 158 : break;
3085 : }
3086 : }
3087 : }
3088 1428 : else if (j_size < list_length(candidate))
3089 : {
3090 1428 : j_size = list_length(candidate);
3091 1428 : j = i;
3092 : }
3093 :
3094 2314 : if (dup_of > 0)
3095 : {
3096 158 : orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
3097 158 : bms_free(candidate_set);
3098 : }
3099 : else
3100 : {
3101 : int k;
3102 2156 : int n_adj = 0;
3103 :
3104 2156 : orig_sets[i] = list_make1(candidate);
3105 2156 : set_masks[i] = candidate_set;
3106 :
3107 : /* fill in adjacency list; no need to compare equal-size sets */
3108 :
3109 3452 : for (k = j - 1; k > 0; --k)
3110 : {
3111 1296 : if (bms_is_subset(set_masks[k], candidate_set))
3112 1134 : adjacency_buf[++n_adj] = k;
3113 : }
3114 :
3115 2156 : if (n_adj > 0)
3116 : {
3117 622 : adjacency_buf[0] = n_adj;
3118 622 : adjacency[i] = palloc((n_adj + 1) * sizeof(short));
3119 622 : memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
3120 : }
3121 : else
3122 1534 : adjacency[i] = NULL;
3123 :
3124 2156 : ++i;
3125 : }
3126 : }
3127 :
3128 930 : num_sets = i - 1;
3129 :
3130 : /*
3131 : * Apply the graph matching algorithm to do the work.
3132 : */
3133 930 : state = BipartiteMatch(num_sets, num_sets, adjacency);
3134 :
3135 : /*
3136 : * Now, the state->pair* fields have the info we need to assign sets to
3137 : * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3138 : * pair_vu[v] = u (both will be true, but we check both so that we can do
3139 : * it in one pass)
3140 : */
3141 930 : chains = palloc0((num_sets + 1) * sizeof(int));
3142 :
3143 3086 : for (i = 1; i <= num_sets; ++i)
3144 : {
3145 2156 : int u = state->pair_vu[i];
3146 2156 : int v = state->pair_uv[i];
3147 :
3148 2156 : if (u > 0 && u < i)
3149 0 : chains[i] = chains[u];
3150 2156 : else if (v > 0 && v < i)
3151 594 : chains[i] = chains[v];
3152 : else
3153 1562 : chains[i] = ++num_chains;
3154 : }
3155 :
3156 : /* build result lists. */
3157 930 : results = palloc0((num_chains + 1) * sizeof(List *));
3158 :
3159 3086 : for (i = 1; i <= num_sets; ++i)
3160 : {
3161 2156 : int c = chains[i];
3162 :
3163 : Assert(c > 0);
3164 :
3165 2156 : results[c] = list_concat(results[c], orig_sets[i]);
3166 : }
3167 :
3168 : /* push any empty sets back on the first list. */
3169 1496 : while (num_empty-- > 0)
3170 566 : results[1] = lcons(NIL, results[1]);
3171 :
3172 : /* make result list */
3173 2492 : for (i = 1; i <= num_chains; ++i)
3174 1562 : result = lappend(result, results[i]);
3175 :
3176 : /*
3177 : * Free all the things.
3178 : *
3179 : * (This is over-fussy for small sets but for large sets we could have
3180 : * tied up a nontrivial amount of memory.)
3181 : */
3182 930 : BipartiteMatchFree(state);
3183 930 : pfree(results);
3184 930 : pfree(chains);
3185 3086 : for (i = 1; i <= num_sets; ++i)
3186 2156 : if (adjacency[i])
3187 622 : pfree(adjacency[i]);
3188 930 : pfree(adjacency);
3189 930 : pfree(adjacency_buf);
3190 930 : pfree(orig_sets);
3191 3086 : for (i = 1; i <= num_sets; ++i)
3192 2156 : bms_free(set_masks[i]);
3193 930 : pfree(set_masks);
3194 :
3195 930 : return result;
3196 : }
3197 :
3198 : /*
3199 : * Reorder the elements of a list of grouping sets such that they have correct
3200 : * prefix relationships. Also inserts the GroupingSetData annotations.
3201 : *
3202 : * The input must be ordered with smallest sets first; the result is returned
3203 : * with largest sets first. Note that the result shares no list substructure
3204 : * with the input, so it's safe for the caller to modify it later.
3205 : *
3206 : * If we're passed in a sortclause, we follow its order of columns to the
3207 : * extent possible, to minimize the chance that we add unnecessary sorts.
3208 : * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
3209 : * gets implemented in one pass.)
3210 : */
3211 : static List *
3212 1640 : reorder_grouping_sets(List *groupingSets, List *sortclause)
3213 : {
3214 : ListCell *lc;
3215 1640 : List *previous = NIL;
3216 1640 : List *result = NIL;
3217 :
3218 4658 : foreach(lc, groupingSets)
3219 : {
3220 3018 : List *candidate = (List *) lfirst(lc);
3221 3018 : List *new_elems = list_difference_int(candidate, previous);
3222 3018 : GroupingSetData *gs = makeNode(GroupingSetData);
3223 :
3224 3194 : while (list_length(sortclause) > list_length(previous) &&
3225 : new_elems != NIL)
3226 : {
3227 296 : SortGroupClause *sc = list_nth(sortclause, list_length(previous));
3228 296 : int ref = sc->tleSortGroupRef;
3229 :
3230 296 : if (list_member_int(new_elems, ref))
3231 : {
3232 176 : previous = lappend_int(previous, ref);
3233 176 : new_elems = list_delete_int(new_elems, ref);
3234 : }
3235 : else
3236 : {
3237 : /* diverged from the sortclause; give up on it */
3238 120 : sortclause = NIL;
3239 120 : break;
3240 : }
3241 : }
3242 :
3243 3018 : previous = list_concat(previous, new_elems);
3244 :
3245 3018 : gs->set = list_copy(previous);
3246 3018 : result = lcons(gs, result);
3247 : }
3248 :
3249 1640 : list_free(previous);
3250 :
3251 1640 : return result;
3252 : }
3253 :
3254 : /*
3255 : * has_volatile_pathkey
3256 : * Returns true if any PathKey in 'keys' has an EquivalenceClass
3257 : * containing a volatile function. Otherwise returns false.
3258 : */
3259 : static bool
3260 2840 : has_volatile_pathkey(List *keys)
3261 : {
3262 : ListCell *lc;
3263 :
3264 5824 : foreach(lc, keys)
3265 : {
3266 3002 : PathKey *pathkey = lfirst_node(PathKey, lc);
3267 :
3268 3002 : if (pathkey->pk_eclass->ec_has_volatile)
3269 18 : return true;
3270 : }
3271 :
3272 2822 : return false;
3273 : }
3274 :
3275 : /*
3276 : * adjust_group_pathkeys_for_groupagg
3277 : * Add pathkeys to root->group_pathkeys to reflect the best set of
3278 : * pre-ordered input for ordered aggregates.
3279 : *
3280 : * We define "best" as the pathkeys that suit the largest number of
3281 : * aggregate functions. We find these by looking at the first ORDER BY /
3282 : * DISTINCT aggregate and take the pathkeys for that before searching for
3283 : * other aggregates that require the same or a more strict variation of the
3284 : * same pathkeys. We then repeat that process for any remaining aggregates
3285 : * with different pathkeys and if we find another set of pathkeys that suits a
3286 : * larger number of aggregates then we select those pathkeys instead.
3287 : *
3288 : * When the best pathkeys are found we also mark each Aggref that can use
3289 : * those pathkeys as aggpresorted = true.
3290 : *
3291 : * Note: When an aggregate function's ORDER BY / DISTINCT clause contains any
3292 : * volatile functions, we never make use of these pathkeys. We want to ensure
3293 : * that sorts using volatile functions are done independently in each Aggref
3294 : * rather than once at the query level. If we were to allow this then Aggrefs
3295 : * with compatible sort orders would all transition their rows in the same
3296 : * order if those pathkeys were deemed to be the best pathkeys to sort on.
3297 : * Whereas, if some other set of Aggref's pathkeys happened to be deemed
3298 : * better pathkeys to sort on, then the volatile function Aggrefs would be
3299 : * left to perform their sorts individually. To avoid this inconsistent
3300 : * behavior which could make Aggref results depend on what other Aggrefs the
3301 : * query contains, we always force Aggrefs with volatile functions to perform
3302 : * their own sorts.
3303 : */
3304 : static void
3305 2444 : adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
3306 : {
3307 2444 : List *grouppathkeys = root->group_pathkeys;
3308 : List *bestpathkeys;
3309 : Bitmapset *bestaggs;
3310 : Bitmapset *unprocessed_aggs;
3311 : ListCell *lc;
3312 : int i;
3313 :
3314 : /* Shouldn't be here if there are grouping sets */
3315 : Assert(root->parse->groupingSets == NIL);
3316 : /* Shouldn't be here unless there are some ordered aggregates */
3317 : Assert(root->numOrderedAggs > 0);
3318 :
3319 : /* Do nothing if disabled */
3320 2444 : if (!enable_presorted_aggregate)
3321 6 : return;
3322 :
3323 : /*
3324 : * Make a first pass over all AggInfos to collect a Bitmapset containing
3325 : * the indexes of all AggInfos to be processed below.
3326 : */
3327 2438 : unprocessed_aggs = NULL;
3328 5560 : foreach(lc, root->agginfos)
3329 : {
3330 3122 : AggInfo *agginfo = lfirst_node(AggInfo, lc);
3331 3122 : Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3332 :
3333 3122 : if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3334 264 : continue;
3335 :
3336 : /* Skip unless there's a DISTINCT or ORDER BY clause */
3337 2858 : if (aggref->aggdistinct == NIL && aggref->aggorder == NIL)
3338 300 : continue;
3339 :
3340 : /* Additional safety checks are needed if there's a FILTER clause */
3341 2558 : if (aggref->aggfilter != NULL)
3342 : {
3343 : ListCell *lc2;
3344 54 : bool allow_presort = true;
3345 :
3346 : /*
3347 : * When the Aggref has a FILTER clause, it's possible that the
3348 : * filter removes rows that cannot be sorted because the
3349 : * expression to sort by results in an error during its
3350 : * evaluation. This is a problem for presorting as that happens
3351 : * before the FILTER, whereas without presorting, the Aggregate
3352 : * node will apply the FILTER *before* sorting. So that we never
3353 : * try to sort anything that might error, here we aim to skip over
3354 : * any Aggrefs with arguments with expressions which, when
3355 : * evaluated, could cause an ERROR. Vars and Consts are ok. There
3356 : * may be more cases that should be allowed, but more thought
3357 : * needs to be given. Err on the side of caution.
3358 : */
3359 102 : foreach(lc2, aggref->args)
3360 : {
3361 72 : TargetEntry *tle = (TargetEntry *) lfirst(lc2);
3362 72 : Expr *expr = tle->expr;
3363 :
3364 84 : while (IsA(expr, RelabelType))
3365 12 : expr = (Expr *) (castNode(RelabelType, expr))->arg;
3366 :
3367 : /* Common case, Vars and Consts are ok */
3368 72 : if (IsA(expr, Var) || IsA(expr, Const))
3369 48 : continue;
3370 :
3371 : /* Unsupported. Don't try to presort for this Aggref */
3372 24 : allow_presort = false;
3373 24 : break;
3374 : }
3375 :
3376 : /* Skip unsupported Aggrefs */
3377 54 : if (!allow_presort)
3378 24 : continue;
3379 : }
3380 :
3381 2534 : unprocessed_aggs = bms_add_member(unprocessed_aggs,
3382 : foreach_current_index(lc));
3383 : }
3384 :
3385 : /*
3386 : * Now process all the unprocessed_aggs to find the best set of pathkeys
3387 : * for the given set of aggregates.
3388 : *
3389 : * On the first outer loop here 'bestaggs' will be empty. We'll populate
3390 : * this during the first loop using the pathkeys for the very first
3391 : * AggInfo then taking any stronger pathkeys from any other AggInfos with
3392 : * a more strict set of compatible pathkeys. Once the outer loop is
3393 : * complete, we mark off all the aggregates with compatible pathkeys then
3394 : * remove those from the unprocessed_aggs and repeat the process to try to
3395 : * find another set of pathkeys that are suitable for a larger number of
3396 : * aggregates. The outer loop will stop when there are not enough
3397 : * unprocessed aggregates for it to be possible to find a set of pathkeys
3398 : * to suit a larger number of aggregates.
3399 : */
3400 2438 : bestpathkeys = NIL;
3401 2438 : bestaggs = NULL;
3402 4810 : while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3403 : {
3404 2372 : Bitmapset *aggindexes = NULL;
3405 2372 : List *currpathkeys = NIL;
3406 :
3407 2372 : i = -1;
3408 5212 : while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3409 : {
3410 2840 : AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3411 2840 : Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3412 : List *sortlist;
3413 : List *pathkeys;
3414 :
3415 2840 : if (aggref->aggdistinct != NIL)
3416 724 : sortlist = aggref->aggdistinct;
3417 : else
3418 2116 : sortlist = aggref->aggorder;
3419 :
3420 2840 : pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3421 : aggref->args);
3422 :
3423 : /*
3424 : * Ignore Aggrefs which have volatile functions in their ORDER BY
3425 : * or DISTINCT clause.
3426 : */
3427 2840 : if (has_volatile_pathkey(pathkeys))
3428 : {
3429 18 : unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3430 18 : continue;
3431 : }
3432 :
3433 : /*
3434 : * When not set yet, take the pathkeys from the first unprocessed
3435 : * aggregate.
3436 : */
3437 2822 : if (currpathkeys == NIL)
3438 : {
3439 2366 : currpathkeys = pathkeys;
3440 :
3441 : /* include the GROUP BY pathkeys, if they exist */
3442 2366 : if (grouppathkeys != NIL)
3443 276 : currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3444 : currpathkeys);
3445 :
3446 : /* record that we found pathkeys for this aggregate */
3447 2366 : aggindexes = bms_add_member(aggindexes, i);
3448 : }
3449 : else
3450 : {
3451 : /* now look for a stronger set of matching pathkeys */
3452 :
3453 : /* include the GROUP BY pathkeys, if they exist */
3454 456 : if (grouppathkeys != NIL)
3455 288 : pathkeys = append_pathkeys(list_copy(grouppathkeys),
3456 : pathkeys);
3457 :
3458 : /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3459 456 : switch (compare_pathkeys(currpathkeys, pathkeys))
3460 : {
3461 12 : case PATHKEYS_BETTER2:
3462 : /* 'pathkeys' are stronger, use these ones instead */
3463 12 : currpathkeys = pathkeys;
3464 : /* FALLTHROUGH */
3465 :
3466 66 : case PATHKEYS_BETTER1:
3467 : /* 'pathkeys' are less strict */
3468 : /* FALLTHROUGH */
3469 :
3470 : case PATHKEYS_EQUAL:
3471 : /* mark this aggregate as covered by 'currpathkeys' */
3472 66 : aggindexes = bms_add_member(aggindexes, i);
3473 66 : break;
3474 :
3475 390 : case PATHKEYS_DIFFERENT:
3476 390 : break;
3477 : }
3478 : }
3479 : }
3480 :
3481 : /* remove the aggregates that we've just processed */
3482 2372 : unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3483 :
3484 : /*
3485 : * If this pass included more aggregates than the previous best then
3486 : * use these ones as the best set.
3487 : */
3488 2372 : if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3489 : {
3490 2264 : bestaggs = aggindexes;
3491 2264 : bestpathkeys = currpathkeys;
3492 : }
3493 : }
3494 :
3495 : /*
3496 : * If we found any ordered aggregates, update root->group_pathkeys to add
3497 : * the best set of aggregate pathkeys. Note that bestpathkeys includes
3498 : * the original GROUP BY pathkeys already.
3499 : */
3500 2438 : if (bestpathkeys != NIL)
3501 2204 : root->group_pathkeys = bestpathkeys;
3502 :
3503 : /*
3504 : * Now that we've found the best set of aggregates we can set the
3505 : * presorted flag to indicate to the executor that it needn't bother
3506 : * performing a sort for these Aggrefs. We're able to do this now as
3507 : * there's no chance of a Hash Aggregate plan as create_grouping_paths
3508 : * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3509 : * of ordered aggregates.
3510 : */
3511 2438 : i = -1;
3512 4738 : while ((i = bms_next_member(bestaggs, i)) >= 0)
3513 : {
3514 2300 : AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3515 :
3516 4618 : foreach(lc, agginfo->aggrefs)
3517 : {
3518 2318 : Aggref *aggref = lfirst_node(Aggref, lc);
3519 :
3520 2318 : aggref->aggpresorted = true;
3521 : }
3522 : }
3523 : }
3524 :
3525 : /*
3526 : * Compute query_pathkeys and other pathkeys during plan generation
3527 : */
3528 : static void
3529 543926 : standard_qp_callback(PlannerInfo *root, void *extra)
3530 : {
3531 543926 : Query *parse = root->parse;
3532 543926 : standard_qp_extra *qp_extra = (standard_qp_extra *) extra;
3533 543926 : List *tlist = root->processed_tlist;
3534 543926 : List *activeWindows = qp_extra->activeWindows;
3535 :
3536 : /*
3537 : * Calculate pathkeys that represent grouping/ordering and/or ordered
3538 : * aggregate requirements.
3539 : */
3540 543926 : if (qp_extra->gset_data)
3541 : {
3542 : /*
3543 : * With grouping sets, just use the first RollupData's groupClause. We
3544 : * don't make any effort to optimize grouping clauses when there are
3545 : * grouping sets, nor can we combine aggregate ordering keys with
3546 : * grouping.
3547 : */
3548 1014 : List *rollups = qp_extra->gset_data->rollups;
3549 1014 : List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3550 :
3551 1014 : if (grouping_is_sortable(groupClause))
3552 : {
3553 : bool sortable;
3554 :
3555 : /*
3556 : * The groupClause is logically below the grouping step. So if
3557 : * there is an RTE entry for the grouping step, we need to remove
3558 : * its RT index from the sort expressions before we make PathKeys
3559 : * for them.
3560 : */
3561 1014 : root->group_pathkeys =
3562 1014 : make_pathkeys_for_sortclauses_extended(root,
3563 : &groupClause,
3564 : tlist,
3565 : false,
3566 1014 : parse->hasGroupRTE,
3567 : &sortable,
3568 : false);
3569 : Assert(sortable);
3570 1014 : root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3571 : }
3572 : else
3573 : {
3574 0 : root->group_pathkeys = NIL;
3575 0 : root->num_groupby_pathkeys = 0;
3576 : }
3577 : }
3578 542912 : else if (parse->groupClause || root->numOrderedAggs > 0)
3579 6332 : {
3580 : /*
3581 : * With a plain GROUP BY list, we can remove any grouping items that
3582 : * are proven redundant by EquivalenceClass processing. For example,
3583 : * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3584 : * especially common cases, but they're nearly free to detect. Note
3585 : * that we remove redundant items from processed_groupClause but not
3586 : * the original parse->groupClause.
3587 : */
3588 : bool sortable;
3589 :
3590 : /*
3591 : * Convert group clauses into pathkeys. Set the ec_sortref field of
3592 : * EquivalenceClass'es if it's not set yet.
3593 : */
3594 6332 : root->group_pathkeys =
3595 6332 : make_pathkeys_for_sortclauses_extended(root,
3596 : &root->processed_groupClause,
3597 : tlist,
3598 : true,
3599 : false,
3600 : &sortable,
3601 : true);
3602 6332 : if (!sortable)
3603 : {
3604 : /* Can't sort; no point in considering aggregate ordering either */
3605 0 : root->group_pathkeys = NIL;
3606 0 : root->num_groupby_pathkeys = 0;
3607 : }
3608 : else
3609 : {
3610 6332 : root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3611 : /* If we have ordered aggs, consider adding onto group_pathkeys */
3612 6332 : if (root->numOrderedAggs > 0)
3613 2444 : adjust_group_pathkeys_for_groupagg(root);
3614 : }
3615 : }
3616 : else
3617 : {
3618 536580 : root->group_pathkeys = NIL;
3619 536580 : root->num_groupby_pathkeys = 0;
3620 : }
3621 :
3622 : /* We consider only the first (bottom) window in pathkeys logic */
3623 543926 : if (activeWindows != NIL)
3624 : {
3625 2576 : WindowClause *wc = linitial_node(WindowClause, activeWindows);
3626 :
3627 2576 : root->window_pathkeys = make_pathkeys_for_window(root,
3628 : wc,
3629 : tlist);
3630 : }
3631 : else
3632 541350 : root->window_pathkeys = NIL;
3633 :
3634 : /*
3635 : * As with GROUP BY, we can discard any DISTINCT items that are proven
3636 : * redundant by EquivalenceClass processing. The non-redundant list is
3637 : * kept in root->processed_distinctClause, leaving the original
3638 : * parse->distinctClause alone.
3639 : */
3640 543926 : if (parse->distinctClause)
3641 : {
3642 : bool sortable;
3643 :
3644 : /* Make a copy since pathkey processing can modify the list */
3645 3014 : root->processed_distinctClause = list_copy(parse->distinctClause);
3646 3014 : root->distinct_pathkeys =
3647 3014 : make_pathkeys_for_sortclauses_extended(root,
3648 : &root->processed_distinctClause,
3649 : tlist,
3650 : true,
3651 : false,
3652 : &sortable,
3653 : false);
3654 3014 : if (!sortable)
3655 6 : root->distinct_pathkeys = NIL;
3656 : }
3657 : else
3658 540912 : root->distinct_pathkeys = NIL;
3659 :
3660 543926 : root->sort_pathkeys =
3661 543926 : make_pathkeys_for_sortclauses(root,
3662 : parse->sortClause,
3663 : tlist);
3664 :
3665 : /* setting setop_pathkeys might be useful to the union planner */
3666 543926 : if (qp_extra->setop != NULL)
3667 : {
3668 : List *groupClauses;
3669 : bool sortable;
3670 :
3671 12776 : groupClauses = generate_setop_child_grouplist(qp_extra->setop, tlist);
3672 :
3673 12776 : root->setop_pathkeys =
3674 12776 : make_pathkeys_for_sortclauses_extended(root,
3675 : &groupClauses,
3676 : tlist,
3677 : false,
3678 : false,
3679 : &sortable,
3680 : false);
3681 12776 : if (!sortable)
3682 208 : root->setop_pathkeys = NIL;
3683 : }
3684 : else
3685 531150 : root->setop_pathkeys = NIL;
3686 :
3687 : /*
3688 : * Figure out whether we want a sorted result from query_planner.
3689 : *
3690 : * If we have a sortable GROUP BY clause, then we want a result sorted
3691 : * properly for grouping. Otherwise, if we have window functions to
3692 : * evaluate, we try to sort for the first window. Otherwise, if there's a
3693 : * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3694 : * we try to produce output that's sufficiently well sorted for the
3695 : * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3696 : * by the ORDER BY clause. Otherwise, if we're a subquery being planned
3697 : * for a set operation which can benefit from presorted results and have a
3698 : * sortable targetlist, we want to sort by the target list.
3699 : *
3700 : * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3701 : * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3702 : * that might just leave us failing to exploit an available sort order at
3703 : * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3704 : * much easier, since we know that the parser ensured that one is a
3705 : * superset of the other.
3706 : */
3707 543926 : if (root->group_pathkeys)
3708 6908 : root->query_pathkeys = root->group_pathkeys;
3709 537018 : else if (root->window_pathkeys)
3710 2110 : root->query_pathkeys = root->window_pathkeys;
3711 1069816 : else if (list_length(root->distinct_pathkeys) >
3712 534908 : list_length(root->sort_pathkeys))
3713 2518 : root->query_pathkeys = root->distinct_pathkeys;
3714 532390 : else if (root->sort_pathkeys)
3715 69264 : root->query_pathkeys = root->sort_pathkeys;
3716 463126 : else if (root->setop_pathkeys != NIL)
3717 11376 : root->query_pathkeys = root->setop_pathkeys;
3718 : else
3719 451750 : root->query_pathkeys = NIL;
3720 543926 : }
3721 :
3722 : /*
3723 : * Estimate number of groups produced by grouping clauses (1 if not grouping)
3724 : *
3725 : * path_rows: number of output rows from scan/join step
3726 : * gd: grouping sets data including list of grouping sets and their clauses
3727 : * target_list: target list containing group clause references
3728 : *
3729 : * If doing grouping sets, we also annotate the gsets data with the estimates
3730 : * for each set and each individual rollup list, with a view to later
3731 : * determining whether some combination of them could be hashed instead.
3732 : */
3733 : static double
3734 54374 : get_number_of_groups(PlannerInfo *root,
3735 : double path_rows,
3736 : grouping_sets_data *gd,
3737 : List *target_list)
3738 : {
3739 54374 : Query *parse = root->parse;
3740 : double dNumGroups;
3741 :
3742 54374 : if (parse->groupClause)
3743 : {
3744 : List *groupExprs;
3745 :
3746 10406 : if (parse->groupingSets)
3747 : {
3748 : /* Add up the estimates for each grouping set */
3749 : ListCell *lc;
3750 :
3751 : Assert(gd); /* keep Coverity happy */
3752 :
3753 936 : dNumGroups = 0;
3754 :
3755 2498 : foreach(lc, gd->rollups)
3756 : {
3757 1562 : RollupData *rollup = lfirst_node(RollupData, lc);
3758 : ListCell *lc2;
3759 : ListCell *lc3;
3760 :
3761 1562 : groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3762 : target_list);
3763 :
3764 1562 : rollup->numGroups = 0.0;
3765 :
3766 4442 : forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3767 : {
3768 2880 : List *gset = (List *) lfirst(lc2);
3769 2880 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc3);
3770 2880 : double numGroups = estimate_num_groups(root,
3771 : groupExprs,
3772 : path_rows,
3773 : &gset,
3774 : NULL);
3775 :
3776 2880 : gs->numGroups = numGroups;
3777 2880 : rollup->numGroups += numGroups;
3778 : }
3779 :
3780 1562 : dNumGroups += rollup->numGroups;
3781 : }
3782 :
3783 936 : if (gd->hash_sets_idx)
3784 : {
3785 : ListCell *lc2;
3786 :
3787 36 : gd->dNumHashGroups = 0;
3788 :
3789 36 : groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3790 : target_list);
3791 :
3792 78 : forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3793 : {
3794 42 : List *gset = (List *) lfirst(lc);
3795 42 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
3796 42 : double numGroups = estimate_num_groups(root,
3797 : groupExprs,
3798 : path_rows,
3799 : &gset,
3800 : NULL);
3801 :
3802 42 : gs->numGroups = numGroups;
3803 42 : gd->dNumHashGroups += numGroups;
3804 : }
3805 :
3806 36 : dNumGroups += gd->dNumHashGroups;
3807 : }
3808 : }
3809 : else
3810 : {
3811 : /* Plain GROUP BY -- estimate based on optimized groupClause */
3812 9470 : groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3813 : target_list);
3814 :
3815 9470 : dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3816 : NULL, NULL);
3817 : }
3818 : }
3819 43968 : else if (parse->groupingSets)
3820 : {
3821 : /* Empty grouping sets ... one result row for each one */
3822 60 : dNumGroups = list_length(parse->groupingSets);
3823 : }
3824 43908 : else if (parse->hasAggs || root->hasHavingQual)
3825 : {
3826 : /* Plain aggregation, one result row */
3827 43908 : dNumGroups = 1;
3828 : }
3829 : else
3830 : {
3831 : /* Not grouping */
3832 0 : dNumGroups = 1;
3833 : }
3834 :
3835 54374 : return dNumGroups;
3836 : }
3837 :
3838 : /*
3839 : * create_grouping_paths
3840 : *
3841 : * Build a new upperrel containing Paths for grouping and/or aggregation.
3842 : * Along the way, we also build an upperrel for Paths which are partially
3843 : * grouped and/or aggregated. A partially grouped and/or aggregated path
3844 : * needs a FinalizeAggregate node to complete the aggregation. Currently,
3845 : * the only partially grouped paths we build are also partial paths; that
3846 : * is, they need a Gather and then a FinalizeAggregate.
3847 : *
3848 : * input_rel: contains the source-data Paths
3849 : * target: the pathtarget for the result Paths to compute
3850 : * gd: grouping sets data including list of grouping sets and their clauses
3851 : *
3852 : * Note: all Paths in input_rel are expected to return the target computed
3853 : * by make_group_input_target.
3854 : */
3855 : static RelOptInfo *
3856 45978 : create_grouping_paths(PlannerInfo *root,
3857 : RelOptInfo *input_rel,
3858 : PathTarget *target,
3859 : bool target_parallel_safe,
3860 : grouping_sets_data *gd)
3861 : {
3862 45978 : Query *parse = root->parse;
3863 : RelOptInfo *grouped_rel;
3864 : RelOptInfo *partially_grouped_rel;
3865 : AggClauseCosts agg_costs;
3866 :
3867 275868 : MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3868 45978 : get_agg_clause_costs(root, AGGSPLIT_SIMPLE, &agg_costs);
3869 :
3870 : /*
3871 : * Create grouping relation to hold fully aggregated grouping and/or
3872 : * aggregation paths.
3873 : */
3874 45978 : grouped_rel = make_grouping_rel(root, input_rel, target,
3875 : target_parallel_safe, parse->havingQual);
3876 :
3877 : /*
3878 : * Create either paths for a degenerate grouping or paths for ordinary
3879 : * grouping, as appropriate.
3880 : */
3881 45978 : if (is_degenerate_grouping(root))
3882 42 : create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3883 : else
3884 : {
3885 45936 : int flags = 0;
3886 : GroupPathExtraData extra;
3887 :
3888 : /*
3889 : * Determine whether it's possible to perform sort-based
3890 : * implementations of grouping. (Note that if processed_groupClause
3891 : * is empty, grouping_is_sortable() is trivially true, and all the
3892 : * pathkeys_contained_in() tests will succeed too, so that we'll
3893 : * consider every surviving input path.)
3894 : *
3895 : * If we have grouping sets, we might be able to sort some but not all
3896 : * of them; in this case, we need can_sort to be true as long as we
3897 : * must consider any sorted-input plan.
3898 : */
3899 45936 : if ((gd && gd->rollups != NIL)
3900 44946 : || grouping_is_sortable(root->processed_groupClause))
3901 45930 : flags |= GROUPING_CAN_USE_SORT;
3902 :
3903 : /*
3904 : * Determine whether we should consider hash-based implementations of
3905 : * grouping.
3906 : *
3907 : * Hashed aggregation only applies if we're grouping. If we have
3908 : * grouping sets, some groups might be hashable but others not; in
3909 : * this case we set can_hash true as long as there is nothing globally
3910 : * preventing us from hashing (and we should therefore consider plans
3911 : * with hashes).
3912 : *
3913 : * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3914 : * BY aggregates. (Doing so would imply storing *all* the input
3915 : * values in the hash table, and/or running many sorts in parallel,
3916 : * either of which seems like a certain loser.) We similarly don't
3917 : * support ordered-set aggregates in hashed aggregation, but that case
3918 : * is also included in the numOrderedAggs count.
3919 : *
3920 : * Note: grouping_is_hashable() is much more expensive to check than
3921 : * the other gating conditions, so we want to do it last.
3922 : */
3923 45936 : if ((parse->groupClause != NIL &&
3924 9856 : root->numOrderedAggs == 0 &&
3925 4788 : (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3926 4784 : flags |= GROUPING_CAN_USE_HASH;
3927 :
3928 : /*
3929 : * Determine whether partial aggregation is possible.
3930 : */
3931 45936 : if (can_partial_agg(root))
3932 41040 : flags |= GROUPING_CAN_PARTIAL_AGG;
3933 :
3934 45936 : extra.flags = flags;
3935 45936 : extra.target_parallel_safe = target_parallel_safe;
3936 45936 : extra.havingQual = parse->havingQual;
3937 45936 : extra.targetList = parse->targetList;
3938 45936 : extra.partial_costs_set = false;
3939 :
3940 : /*
3941 : * Determine whether partitionwise aggregation is in theory possible.
3942 : * It can be disabled by the user, and for now, we don't try to
3943 : * support grouping sets. create_ordinary_grouping_paths() will check
3944 : * additional conditions, such as whether input_rel is partitioned.
3945 : */
3946 45936 : if (enable_partitionwise_aggregate && !parse->groupingSets)
3947 700 : extra.patype = PARTITIONWISE_AGGREGATE_FULL;
3948 : else
3949 45236 : extra.patype = PARTITIONWISE_AGGREGATE_NONE;
3950 :
3951 45936 : create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3952 : &agg_costs, gd, &extra,
3953 : &partially_grouped_rel);
3954 : }
3955 :
3956 45972 : set_cheapest(grouped_rel);
3957 45972 : return grouped_rel;
3958 : }
3959 :
3960 : /*
3961 : * make_grouping_rel
3962 : *
3963 : * Create a new grouping rel and set basic properties.
3964 : *
3965 : * input_rel represents the underlying scan/join relation.
3966 : * target is the output expected from the grouping relation.
3967 : */
3968 : static RelOptInfo *
3969 48144 : make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
3970 : PathTarget *target, bool target_parallel_safe,
3971 : Node *havingQual)
3972 : {
3973 : RelOptInfo *grouped_rel;
3974 :
3975 48144 : if (IS_OTHER_REL(input_rel))
3976 : {
3977 2166 : grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
3978 : input_rel->relids);
3979 2166 : grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3980 : }
3981 : else
3982 : {
3983 : /*
3984 : * By tradition, the relids set for the main grouping relation is
3985 : * NULL. (This could be changed, but might require adjustments
3986 : * elsewhere.)
3987 : */
3988 45978 : grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3989 : }
3990 :
3991 : /* Set target. */
3992 48144 : grouped_rel->reltarget = target;
3993 :
3994 : /*
3995 : * If the input relation is not parallel-safe, then the grouped relation
3996 : * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
3997 : * target list and HAVING quals are parallel-safe.
3998 : */
3999 77680 : if (input_rel->consider_parallel && target_parallel_safe &&
4000 29536 : is_parallel_safe(root, havingQual))
4001 29512 : grouped_rel->consider_parallel = true;
4002 :
4003 : /* Assume that the same path generation strategies are allowed */
4004 48144 : grouped_rel->pgs_mask = input_rel->pgs_mask;
4005 :
4006 : /*
4007 : * If the input rel belongs to a single FDW, so does the grouped rel.
4008 : */
4009 48144 : grouped_rel->serverid = input_rel->serverid;
4010 48144 : grouped_rel->userid = input_rel->userid;
4011 48144 : grouped_rel->useridiscurrent = input_rel->useridiscurrent;
4012 48144 : grouped_rel->fdwroutine = input_rel->fdwroutine;
4013 :
4014 48144 : return grouped_rel;
4015 : }
4016 :
4017 : /*
4018 : * is_degenerate_grouping
4019 : *
4020 : * A degenerate grouping is one in which the query has a HAVING qual and/or
4021 : * grouping sets, but no aggregates and no GROUP BY (which implies that the
4022 : * grouping sets are all empty).
4023 : */
4024 : static bool
4025 45978 : is_degenerate_grouping(PlannerInfo *root)
4026 : {
4027 45978 : Query *parse = root->parse;
4028 :
4029 44728 : return (root->hasHavingQual || parse->groupingSets) &&
4030 90706 : !parse->hasAggs && parse->groupClause == NIL;
4031 : }
4032 :
4033 : /*
4034 : * create_degenerate_grouping_paths
4035 : *
4036 : * When the grouping is degenerate (see is_degenerate_grouping), we are
4037 : * supposed to emit either zero or one row for each grouping set depending on
4038 : * whether HAVING succeeds. Furthermore, there cannot be any variables in
4039 : * either HAVING or the targetlist, so we actually do not need the FROM table
4040 : * at all! We can just throw away the plan-so-far and generate a Result node.
4041 : * This is a sufficiently unusual corner case that it's not worth contorting
4042 : * the structure of this module to avoid having to generate the earlier paths
4043 : * in the first place.
4044 : */
4045 : static void
4046 42 : create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
4047 : RelOptInfo *grouped_rel)
4048 : {
4049 42 : Query *parse = root->parse;
4050 : int nrows;
4051 : Path *path;
4052 :
4053 42 : nrows = list_length(parse->groupingSets);
4054 42 : if (nrows > 1)
4055 : {
4056 : /*
4057 : * Doesn't seem worthwhile writing code to cons up a generate_series
4058 : * or a values scan to emit multiple rows. Instead just make N clones
4059 : * and append them. (With a volatile HAVING clause, this means you
4060 : * might get between 0 and N output rows. Offhand I think that's
4061 : * desired.)
4062 : */
4063 12 : List *paths = NIL;
4064 :
4065 36 : while (--nrows >= 0)
4066 : {
4067 : path = (Path *)
4068 24 : create_group_result_path(root, grouped_rel,
4069 24 : grouped_rel->reltarget,
4070 24 : (List *) parse->havingQual);
4071 24 : paths = lappend(paths, path);
4072 : }
4073 : path = (Path *)
4074 12 : create_append_path(root,
4075 : grouped_rel,
4076 : paths,
4077 : NIL,
4078 : NIL,
4079 : NULL,
4080 : 0,
4081 : false,
4082 : -1);
4083 : }
4084 : else
4085 : {
4086 : /* No grouping sets, or just one, so one output row */
4087 : path = (Path *)
4088 30 : create_group_result_path(root, grouped_rel,
4089 30 : grouped_rel->reltarget,
4090 30 : (List *) parse->havingQual);
4091 : }
4092 :
4093 42 : add_path(grouped_rel, path);
4094 42 : }
4095 :
4096 : /*
4097 : * create_ordinary_grouping_paths
4098 : *
4099 : * Create grouping paths for the ordinary (that is, non-degenerate) case.
4100 : *
4101 : * We need to consider sorted and hashed aggregation in the same function,
4102 : * because otherwise (1) it would be harder to throw an appropriate error
4103 : * message if neither way works, and (2) we should not allow hashtable size
4104 : * considerations to dissuade us from using hashing if sorting is not possible.
4105 : *
4106 : * *partially_grouped_rel_p will be set to the partially grouped rel which this
4107 : * function creates, or to NULL if it doesn't create one.
4108 : */
4109 : static void
4110 48102 : create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
4111 : RelOptInfo *grouped_rel,
4112 : const AggClauseCosts *agg_costs,
4113 : grouping_sets_data *gd,
4114 : GroupPathExtraData *extra,
4115 : RelOptInfo **partially_grouped_rel_p)
4116 : {
4117 48102 : RelOptInfo *partially_grouped_rel = NULL;
4118 48102 : PartitionwiseAggregateType patype = PARTITIONWISE_AGGREGATE_NONE;
4119 :
4120 : /*
4121 : * If this is the topmost grouping relation or if the parent relation is
4122 : * doing some form of partitionwise aggregation, then we may be able to do
4123 : * it at this level also. However, if the input relation is not
4124 : * partitioned, partitionwise aggregate is impossible.
4125 : */
4126 48102 : if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
4127 2866 : IS_PARTITIONED_REL(input_rel))
4128 : {
4129 : /*
4130 : * If this is the topmost relation or if the parent relation is doing
4131 : * full partitionwise aggregation, then we can do full partitionwise
4132 : * aggregation provided that the GROUP BY clause contains all of the
4133 : * partitioning columns at this level and the collation used by GROUP
4134 : * BY matches the partitioning collation. Otherwise, we can do at
4135 : * most partial partitionwise aggregation. But if partial aggregation
4136 : * is not supported in general then we can't use it for partitionwise
4137 : * aggregation either.
4138 : *
4139 : * Check parse->groupClause not processed_groupClause, because it's
4140 : * okay if some of the partitioning columns were proved redundant.
4141 : */
4142 1640 : if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
4143 772 : group_by_has_partkey(input_rel, extra->targetList,
4144 772 : root->parse->groupClause))
4145 488 : patype = PARTITIONWISE_AGGREGATE_FULL;
4146 380 : else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4147 338 : patype = PARTITIONWISE_AGGREGATE_PARTIAL;
4148 : else
4149 42 : patype = PARTITIONWISE_AGGREGATE_NONE;
4150 : }
4151 :
4152 : /*
4153 : * Before generating paths for grouped_rel, we first generate any possible
4154 : * partially grouped paths; that way, later code can easily consider both
4155 : * parallel and non-parallel approaches to grouping.
4156 : */
4157 48102 : if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4158 : {
4159 : bool force_rel_creation;
4160 :
4161 : /*
4162 : * If we're doing partitionwise aggregation at this level, force
4163 : * creation of a partially_grouped_rel so we can add partitionwise
4164 : * paths to it.
4165 : */
4166 43134 : force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
4167 :
4168 : partially_grouped_rel =
4169 43134 : create_partial_grouping_paths(root,
4170 : grouped_rel,
4171 : input_rel,
4172 : gd,
4173 : extra,
4174 : force_rel_creation);
4175 : }
4176 :
4177 : /* Set out parameter. */
4178 48102 : *partially_grouped_rel_p = partially_grouped_rel;
4179 :
4180 : /* Apply partitionwise aggregation technique, if possible. */
4181 48102 : if (patype != PARTITIONWISE_AGGREGATE_NONE)
4182 826 : create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
4183 : partially_grouped_rel, agg_costs,
4184 : gd, patype, extra);
4185 :
4186 : /* If we are doing partial aggregation only, return. */
4187 48102 : if (extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
4188 : {
4189 : Assert(partially_grouped_rel);
4190 :
4191 858 : if (partially_grouped_rel->pathlist)
4192 858 : set_cheapest(partially_grouped_rel);
4193 :
4194 858 : return;
4195 : }
4196 :
4197 : /* Gather any partially grouped partial paths. */
4198 47244 : if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4199 2770 : gather_grouping_paths(root, partially_grouped_rel);
4200 :
4201 : /* Now choose the best path(s) for partially_grouped_rel. */
4202 47244 : if (partially_grouped_rel && partially_grouped_rel->pathlist)
4203 2998 : set_cheapest(partially_grouped_rel);
4204 :
4205 : /* Build final grouping paths */
4206 47244 : add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4207 : partially_grouped_rel, agg_costs, gd,
4208 : extra);
4209 :
4210 : /* Give a helpful error if we failed to find any implementation */
4211 47244 : if (grouped_rel->pathlist == NIL)
4212 6 : ereport(ERROR,
4213 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4214 : errmsg("could not implement GROUP BY"),
4215 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4216 :
4217 : /*
4218 : * If there is an FDW that's responsible for all baserels of the query,
4219 : * let it consider adding ForeignPaths.
4220 : */
4221 47238 : if (grouped_rel->fdwroutine &&
4222 338 : grouped_rel->fdwroutine->GetForeignUpperPaths)
4223 336 : grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4224 : input_rel, grouped_rel,
4225 : extra);
4226 :
4227 : /* Let extensions possibly add some more paths */
4228 47238 : if (create_upper_paths_hook)
4229 0 : (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4230 : input_rel, grouped_rel,
4231 : extra);
4232 : }
4233 :
4234 : /*
4235 : * For a given input path, consider the possible ways of doing grouping sets on
4236 : * it, by combinations of hashing and sorting. This can be called multiple
4237 : * times, so it's important that it not scribble on input. No result is
4238 : * returned, but any generated paths are added to grouped_rel.
4239 : */
4240 : static void
4241 1962 : consider_groupingsets_paths(PlannerInfo *root,
4242 : RelOptInfo *grouped_rel,
4243 : Path *path,
4244 : bool is_sorted,
4245 : bool can_hash,
4246 : grouping_sets_data *gd,
4247 : const AggClauseCosts *agg_costs,
4248 : double dNumGroups)
4249 : {
4250 1962 : Query *parse = root->parse;
4251 1962 : Size hash_mem_limit = get_hash_memory_limit();
4252 :
4253 : /*
4254 : * If we're not being offered sorted input, then only consider plans that
4255 : * can be done entirely by hashing.
4256 : *
4257 : * We can hash everything if it looks like it'll fit in hash_mem. But if
4258 : * the input is actually sorted despite not being advertised as such, we
4259 : * prefer to make use of that in order to use less memory.
4260 : *
4261 : * If none of the grouping sets are sortable, then ignore the hash_mem
4262 : * limit and generate a path anyway, since otherwise we'll just fail.
4263 : */
4264 1962 : if (!is_sorted)
4265 : {
4266 900 : List *new_rollups = NIL;
4267 900 : RollupData *unhashed_rollup = NULL;
4268 : List *sets_data;
4269 900 : List *empty_sets_data = NIL;
4270 900 : List *empty_sets = NIL;
4271 : ListCell *lc;
4272 900 : ListCell *l_start = list_head(gd->rollups);
4273 900 : AggStrategy strat = AGG_HASHED;
4274 : double hashsize;
4275 900 : double exclude_groups = 0.0;
4276 :
4277 : Assert(can_hash);
4278 :
4279 : /*
4280 : * If the input is coincidentally sorted usefully (which can happen
4281 : * even if is_sorted is false, since that only means that our caller
4282 : * has set up the sorting for us), then save some hashtable space by
4283 : * making use of that. But we need to watch out for degenerate cases:
4284 : *
4285 : * 1) If there are any empty grouping sets, then group_pathkeys might
4286 : * be NIL if all non-empty grouping sets are unsortable. In this case,
4287 : * there will be a rollup containing only empty groups, and the
4288 : * pathkeys_contained_in test is vacuously true; this is ok.
4289 : *
4290 : * XXX: the above relies on the fact that group_pathkeys is generated
4291 : * from the first rollup. If we add the ability to consider multiple
4292 : * sort orders for grouping input, this assumption might fail.
4293 : *
4294 : * 2) If there are no empty sets and only unsortable sets, then the
4295 : * rollups list will be empty (and thus l_start == NULL), and
4296 : * group_pathkeys will be NIL; we must ensure that the vacuously-true
4297 : * pathkeys_contained_in test doesn't cause us to crash.
4298 : */
4299 1794 : if (l_start != NULL &&
4300 894 : pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4301 : {
4302 36 : unhashed_rollup = lfirst_node(RollupData, l_start);
4303 36 : exclude_groups = unhashed_rollup->numGroups;
4304 36 : l_start = lnext(gd->rollups, l_start);
4305 : }
4306 :
4307 900 : hashsize = estimate_hashagg_tablesize(root,
4308 : path,
4309 : agg_costs,
4310 : dNumGroups - exclude_groups);
4311 :
4312 : /*
4313 : * gd->rollups is empty if we have only unsortable columns to work
4314 : * with. Override hash_mem in that case; otherwise, we'll rely on the
4315 : * sorted-input case to generate usable mixed paths.
4316 : */
4317 900 : if (hashsize > hash_mem_limit && gd->rollups)
4318 18 : return; /* nope, won't fit */
4319 :
4320 : /*
4321 : * We need to burst the existing rollups list into individual grouping
4322 : * sets and recompute a groupClause for each set.
4323 : */
4324 882 : sets_data = list_copy(gd->unsortable_sets);
4325 :
4326 2228 : for_each_cell(lc, gd->rollups, l_start)
4327 : {
4328 1370 : RollupData *rollup = lfirst_node(RollupData, lc);
4329 :
4330 : /*
4331 : * If we find an unhashable rollup that's not been skipped by the
4332 : * "actually sorted" check above, we can't cope; we'd need sorted
4333 : * input (with a different sort order) but we can't get that here.
4334 : * So bail out; we'll get a valid path from the is_sorted case
4335 : * instead.
4336 : *
4337 : * The mere presence of empty grouping sets doesn't make a rollup
4338 : * unhashable (see preprocess_grouping_sets), we handle those
4339 : * specially below.
4340 : */
4341 1370 : if (!rollup->hashable)
4342 24 : return;
4343 :
4344 1346 : sets_data = list_concat(sets_data, rollup->gsets_data);
4345 : }
4346 3480 : foreach(lc, sets_data)
4347 : {
4348 2622 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
4349 2622 : List *gset = gs->set;
4350 : RollupData *rollup;
4351 :
4352 2622 : if (gset == NIL)
4353 : {
4354 : /* Empty grouping sets can't be hashed. */
4355 530 : empty_sets_data = lappend(empty_sets_data, gs);
4356 530 : empty_sets = lappend(empty_sets, NIL);
4357 : }
4358 : else
4359 : {
4360 2092 : rollup = makeNode(RollupData);
4361 :
4362 2092 : rollup->groupClause = preprocess_groupclause(root, gset);
4363 2092 : rollup->gsets_data = list_make1(gs);
4364 2092 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4365 : rollup->gsets_data,
4366 : gd->tleref_to_colnum_map);
4367 2092 : rollup->numGroups = gs->numGroups;
4368 2092 : rollup->hashable = true;
4369 2092 : rollup->is_hashed = true;
4370 2092 : new_rollups = lappend(new_rollups, rollup);
4371 : }
4372 : }
4373 :
4374 : /*
4375 : * If we didn't find anything nonempty to hash, then bail. We'll
4376 : * generate a path from the is_sorted case.
4377 : */
4378 858 : if (new_rollups == NIL)
4379 0 : return;
4380 :
4381 : /*
4382 : * If there were empty grouping sets they should have been in the
4383 : * first rollup.
4384 : */
4385 : Assert(!unhashed_rollup || !empty_sets);
4386 :
4387 858 : if (unhashed_rollup)
4388 : {
4389 36 : new_rollups = lappend(new_rollups, unhashed_rollup);
4390 36 : strat = AGG_MIXED;
4391 : }
4392 822 : else if (empty_sets)
4393 : {
4394 482 : RollupData *rollup = makeNode(RollupData);
4395 :
4396 482 : rollup->groupClause = NIL;
4397 482 : rollup->gsets_data = empty_sets_data;
4398 482 : rollup->gsets = empty_sets;
4399 482 : rollup->numGroups = list_length(empty_sets);
4400 482 : rollup->hashable = false;
4401 482 : rollup->is_hashed = false;
4402 482 : new_rollups = lappend(new_rollups, rollup);
4403 482 : strat = AGG_MIXED;
4404 : }
4405 :
4406 858 : add_path(grouped_rel, (Path *)
4407 858 : create_groupingsets_path(root,
4408 : grouped_rel,
4409 : path,
4410 858 : (List *) parse->havingQual,
4411 : strat,
4412 : new_rollups,
4413 : agg_costs));
4414 858 : return;
4415 : }
4416 :
4417 : /*
4418 : * If we have sorted input but nothing we can do with it, bail.
4419 : */
4420 1062 : if (gd->rollups == NIL)
4421 0 : return;
4422 :
4423 : /*
4424 : * Given sorted input, we try and make two paths: one sorted and one mixed
4425 : * sort/hash. (We need to try both because hashagg might be disabled, or
4426 : * some columns might not be sortable.)
4427 : *
4428 : * can_hash is passed in as false if some obstacle elsewhere (such as
4429 : * ordered aggs) means that we shouldn't consider hashing at all.
4430 : */
4431 1062 : if (can_hash && gd->any_hashable)
4432 : {
4433 966 : List *rollups = NIL;
4434 966 : List *hash_sets = list_copy(gd->unsortable_sets);
4435 966 : double availspace = hash_mem_limit;
4436 : ListCell *lc;
4437 :
4438 : /*
4439 : * Account first for space needed for groups we can't sort at all.
4440 : */
4441 966 : availspace -= estimate_hashagg_tablesize(root,
4442 : path,
4443 : agg_costs,
4444 : gd->dNumHashGroups);
4445 :
4446 966 : if (availspace > 0 && list_length(gd->rollups) > 1)
4447 : {
4448 : double scale;
4449 492 : int num_rollups = list_length(gd->rollups);
4450 : int k_capacity;
4451 492 : int *k_weights = palloc(num_rollups * sizeof(int));
4452 492 : Bitmapset *hash_items = NULL;
4453 : int i;
4454 :
4455 : /*
4456 : * We treat this as a knapsack problem: the knapsack capacity
4457 : * represents hash_mem, the item weights are the estimated memory
4458 : * usage of the hashtables needed to implement a single rollup,
4459 : * and we really ought to use the cost saving as the item value;
4460 : * however, currently the costs assigned to sort nodes don't
4461 : * reflect the comparison costs well, and so we treat all items as
4462 : * of equal value (each rollup we hash instead saves us one sort).
4463 : *
4464 : * To use the discrete knapsack, we need to scale the values to a
4465 : * reasonably small bounded range. We choose to allow a 5% error
4466 : * margin; we have no more than 4096 rollups in the worst possible
4467 : * case, which with a 5% error margin will require a bit over 42MB
4468 : * of workspace. (Anyone wanting to plan queries that complex had
4469 : * better have the memory for it. In more reasonable cases, with
4470 : * no more than a couple of dozen rollups, the memory usage will
4471 : * be negligible.)
4472 : *
4473 : * k_capacity is naturally bounded, but we clamp the values for
4474 : * scale and weight (below) to avoid overflows or underflows (or
4475 : * uselessly trying to use a scale factor less than 1 byte).
4476 : */
4477 492 : scale = Max(availspace / (20.0 * num_rollups), 1.0);
4478 492 : k_capacity = (int) floor(availspace / scale);
4479 :
4480 : /*
4481 : * We leave the first rollup out of consideration since it's the
4482 : * one that matches the input sort order. We assign indexes "i"
4483 : * to only those entries considered for hashing; the second loop,
4484 : * below, must use the same condition.
4485 : */
4486 492 : i = 0;
4487 1248 : for_each_from(lc, gd->rollups, 1)
4488 : {
4489 756 : RollupData *rollup = lfirst_node(RollupData, lc);
4490 :
4491 756 : if (rollup->hashable)
4492 : {
4493 756 : double sz = estimate_hashagg_tablesize(root,
4494 : path,
4495 : agg_costs,
4496 : rollup->numGroups);
4497 :
4498 : /*
4499 : * If sz is enormous, but hash_mem (and hence scale) is
4500 : * small, avoid integer overflow here.
4501 : */
4502 756 : k_weights[i] = (int) Min(floor(sz / scale),
4503 : k_capacity + 1.0);
4504 756 : ++i;
4505 : }
4506 : }
4507 :
4508 : /*
4509 : * Apply knapsack algorithm; compute the set of items which
4510 : * maximizes the value stored (in this case the number of sorts
4511 : * saved) while keeping the total size (approximately) within
4512 : * capacity.
4513 : */
4514 492 : if (i > 0)
4515 492 : hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4516 :
4517 492 : if (!bms_is_empty(hash_items))
4518 : {
4519 492 : rollups = list_make1(linitial(gd->rollups));
4520 :
4521 492 : i = 0;
4522 1248 : for_each_from(lc, gd->rollups, 1)
4523 : {
4524 756 : RollupData *rollup = lfirst_node(RollupData, lc);
4525 :
4526 756 : if (rollup->hashable)
4527 : {
4528 756 : if (bms_is_member(i, hash_items))
4529 720 : hash_sets = list_concat(hash_sets,
4530 720 : rollup->gsets_data);
4531 : else
4532 36 : rollups = lappend(rollups, rollup);
4533 756 : ++i;
4534 : }
4535 : else
4536 0 : rollups = lappend(rollups, rollup);
4537 : }
4538 : }
4539 : }
4540 :
4541 966 : if (!rollups && hash_sets)
4542 24 : rollups = list_copy(gd->rollups);
4543 :
4544 1826 : foreach(lc, hash_sets)
4545 : {
4546 860 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
4547 860 : RollupData *rollup = makeNode(RollupData);
4548 :
4549 : Assert(gs->set != NIL);
4550 :
4551 860 : rollup->groupClause = preprocess_groupclause(root, gs->set);
4552 860 : rollup->gsets_data = list_make1(gs);
4553 860 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4554 : rollup->gsets_data,
4555 : gd->tleref_to_colnum_map);
4556 860 : rollup->numGroups = gs->numGroups;
4557 860 : rollup->hashable = true;
4558 860 : rollup->is_hashed = true;
4559 860 : rollups = lcons(rollup, rollups);
4560 : }
4561 :
4562 966 : if (rollups)
4563 : {
4564 516 : add_path(grouped_rel, (Path *)
4565 516 : create_groupingsets_path(root,
4566 : grouped_rel,
4567 : path,
4568 516 : (List *) parse->havingQual,
4569 : AGG_MIXED,
4570 : rollups,
4571 : agg_costs));
4572 : }
4573 : }
4574 :
4575 : /*
4576 : * Now try the simple sorted case.
4577 : */
4578 1062 : if (!gd->unsortable_sets)
4579 1032 : add_path(grouped_rel, (Path *)
4580 1032 : create_groupingsets_path(root,
4581 : grouped_rel,
4582 : path,
4583 1032 : (List *) parse->havingQual,
4584 : AGG_SORTED,
4585 : gd->rollups,
4586 : agg_costs));
4587 : }
4588 :
4589 : /*
4590 : * create_window_paths
4591 : *
4592 : * Build a new upperrel containing Paths for window-function evaluation.
4593 : *
4594 : * input_rel: contains the source-data Paths
4595 : * input_target: result of make_window_input_target
4596 : * output_target: what the topmost WindowAggPath should return
4597 : * wflists: result of find_window_functions
4598 : * activeWindows: result of select_active_windows
4599 : *
4600 : * Note: all Paths in input_rel are expected to return input_target.
4601 : */
4602 : static RelOptInfo *
4603 2576 : create_window_paths(PlannerInfo *root,
4604 : RelOptInfo *input_rel,
4605 : PathTarget *input_target,
4606 : PathTarget *output_target,
4607 : bool output_target_parallel_safe,
4608 : WindowFuncLists *wflists,
4609 : List *activeWindows)
4610 : {
4611 : RelOptInfo *window_rel;
4612 : ListCell *lc;
4613 :
4614 : /* For now, do all work in the (WINDOW, NULL) upperrel */
4615 2576 : window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4616 :
4617 : /*
4618 : * If the input relation is not parallel-safe, then the window relation
4619 : * can't be parallel-safe, either. Otherwise, we need to examine the
4620 : * target list and active windows for non-parallel-safe constructs.
4621 : */
4622 2576 : if (input_rel->consider_parallel && output_target_parallel_safe &&
4623 0 : is_parallel_safe(root, (Node *) activeWindows))
4624 0 : window_rel->consider_parallel = true;
4625 :
4626 : /*
4627 : * If the input rel belongs to a single FDW, so does the window rel.
4628 : */
4629 2576 : window_rel->serverid = input_rel->serverid;
4630 2576 : window_rel->userid = input_rel->userid;
4631 2576 : window_rel->useridiscurrent = input_rel->useridiscurrent;
4632 2576 : window_rel->fdwroutine = input_rel->fdwroutine;
4633 :
4634 : /*
4635 : * Consider computing window functions starting from the existing
4636 : * cheapest-total path (which will likely require a sort) as well as any
4637 : * existing paths that satisfy or partially satisfy root->window_pathkeys.
4638 : */
4639 5492 : foreach(lc, input_rel->pathlist)
4640 : {
4641 2916 : Path *path = (Path *) lfirst(lc);
4642 : int presorted_keys;
4643 :
4644 3256 : if (path == input_rel->cheapest_total_path ||
4645 340 : pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4646 152 : &presorted_keys) ||
4647 152 : presorted_keys > 0)
4648 2790 : create_one_window_path(root,
4649 : window_rel,
4650 : path,
4651 : input_target,
4652 : output_target,
4653 : wflists,
4654 : activeWindows);
4655 : }
4656 :
4657 : /*
4658 : * If there is an FDW that's responsible for all baserels of the query,
4659 : * let it consider adding ForeignPaths.
4660 : */
4661 2576 : if (window_rel->fdwroutine &&
4662 12 : window_rel->fdwroutine->GetForeignUpperPaths)
4663 12 : window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4664 : input_rel, window_rel,
4665 : NULL);
4666 :
4667 : /* Let extensions possibly add some more paths */
4668 2576 : if (create_upper_paths_hook)
4669 0 : (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4670 : input_rel, window_rel, NULL);
4671 :
4672 : /* Now choose the best path(s) */
4673 2576 : set_cheapest(window_rel);
4674 :
4675 2576 : return window_rel;
4676 : }
4677 :
4678 : /*
4679 : * Stack window-function implementation steps atop the given Path, and
4680 : * add the result to window_rel.
4681 : *
4682 : * window_rel: upperrel to contain result
4683 : * path: input Path to use (must return input_target)
4684 : * input_target: result of make_window_input_target
4685 : * output_target: what the topmost WindowAggPath should return
4686 : * wflists: result of find_window_functions
4687 : * activeWindows: result of select_active_windows
4688 : */
4689 : static void
4690 2790 : create_one_window_path(PlannerInfo *root,
4691 : RelOptInfo *window_rel,
4692 : Path *path,
4693 : PathTarget *input_target,
4694 : PathTarget *output_target,
4695 : WindowFuncLists *wflists,
4696 : List *activeWindows)
4697 : {
4698 : PathTarget *window_target;
4699 : ListCell *l;
4700 2790 : List *topqual = NIL;
4701 :
4702 : /*
4703 : * Since each window clause could require a different sort order, we stack
4704 : * up a WindowAgg node for each clause, with sort steps between them as
4705 : * needed. (We assume that select_active_windows chose a good order for
4706 : * executing the clauses in.)
4707 : *
4708 : * input_target should contain all Vars and Aggs needed for the result.
4709 : * (In some cases we wouldn't need to propagate all of these all the way
4710 : * to the top, since they might only be needed as inputs to WindowFuncs.
4711 : * It's probably not worth trying to optimize that though.) It must also
4712 : * contain all window partitioning and sorting expressions, to ensure
4713 : * they're computed only once at the bottom of the stack (that's critical
4714 : * for volatile functions). As we climb up the stack, we'll add outputs
4715 : * for the WindowFuncs computed at each level.
4716 : */
4717 2790 : window_target = input_target;
4718 :
4719 5766 : foreach(l, activeWindows)
4720 : {
4721 2976 : WindowClause *wc = lfirst_node(WindowClause, l);
4722 : List *window_pathkeys;
4723 2976 : List *runcondition = NIL;
4724 : int presorted_keys;
4725 : bool is_sorted;
4726 : bool topwindow;
4727 : ListCell *lc2;
4728 :
4729 2976 : window_pathkeys = make_pathkeys_for_window(root,
4730 : wc,
4731 : root->processed_tlist);
4732 :
4733 2976 : is_sorted = pathkeys_count_contained_in(window_pathkeys,
4734 : path->pathkeys,
4735 : &presorted_keys);
4736 :
4737 : /* Sort if necessary */
4738 2976 : if (!is_sorted)
4739 : {
4740 : /*
4741 : * No presorted keys or incremental sort disabled, just perform a
4742 : * complete sort.
4743 : */
4744 2184 : if (presorted_keys == 0 || !enable_incremental_sort)
4745 2122 : path = (Path *) create_sort_path(root, window_rel,
4746 : path,
4747 : window_pathkeys,
4748 : -1.0);
4749 : else
4750 : {
4751 : /*
4752 : * Since we have presorted keys and incremental sort is
4753 : * enabled, just use incremental sort.
4754 : */
4755 62 : path = (Path *) create_incremental_sort_path(root,
4756 : window_rel,
4757 : path,
4758 : window_pathkeys,
4759 : presorted_keys,
4760 : -1.0);
4761 : }
4762 : }
4763 :
4764 2976 : if (lnext(activeWindows, l))
4765 : {
4766 : /*
4767 : * Add the current WindowFuncs to the output target for this
4768 : * intermediate WindowAggPath. We must copy window_target to
4769 : * avoid changing the previous path's target.
4770 : *
4771 : * Note: a WindowFunc adds nothing to the target's eval costs; but
4772 : * we do need to account for the increase in tlist width.
4773 : */
4774 186 : int64 tuple_width = window_target->width;
4775 :
4776 186 : window_target = copy_pathtarget(window_target);
4777 444 : foreach(lc2, wflists->windowFuncs[wc->winref])
4778 : {
4779 258 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4780 :
4781 258 : add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4782 258 : tuple_width += get_typavgwidth(wfunc->wintype, -1);
4783 : }
4784 186 : window_target->width = clamp_width_est(tuple_width);
4785 : }
4786 : else
4787 : {
4788 : /* Install the goal target in the topmost WindowAgg */
4789 2790 : window_target = output_target;
4790 : }
4791 :
4792 : /* mark the final item in the list as the top-level window */
4793 2976 : topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4794 :
4795 : /*
4796 : * Collect the WindowFuncRunConditions from each WindowFunc and
4797 : * convert them into OpExprs
4798 : */
4799 6834 : foreach(lc2, wflists->windowFuncs[wc->winref])
4800 : {
4801 : ListCell *lc3;
4802 3858 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4803 :
4804 4038 : foreach(lc3, wfunc->runCondition)
4805 : {
4806 180 : WindowFuncRunCondition *wfuncrc =
4807 : lfirst_node(WindowFuncRunCondition, lc3);
4808 : Expr *opexpr;
4809 : Expr *leftop;
4810 : Expr *rightop;
4811 :
4812 180 : if (wfuncrc->wfunc_left)
4813 : {
4814 162 : leftop = (Expr *) copyObject(wfunc);
4815 162 : rightop = copyObject(wfuncrc->arg);
4816 : }
4817 : else
4818 : {
4819 18 : leftop = copyObject(wfuncrc->arg);
4820 18 : rightop = (Expr *) copyObject(wfunc);
4821 : }
4822 :
4823 180 : opexpr = make_opclause(wfuncrc->opno,
4824 : BOOLOID,
4825 : false,
4826 : leftop,
4827 : rightop,
4828 : InvalidOid,
4829 : wfuncrc->inputcollid);
4830 :
4831 180 : runcondition = lappend(runcondition, opexpr);
4832 :
4833 180 : if (!topwindow)
4834 24 : topqual = lappend(topqual, opexpr);
4835 : }
4836 : }
4837 :
4838 : path = (Path *)
4839 2976 : create_windowagg_path(root, window_rel, path, window_target,
4840 2976 : wflists->windowFuncs[wc->winref],
4841 : runcondition, wc,
4842 : topwindow ? topqual : NIL, topwindow);
4843 : }
4844 :
4845 2790 : add_path(window_rel, path);
4846 2790 : }
4847 :
4848 : /*
4849 : * create_distinct_paths
4850 : *
4851 : * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
4852 : *
4853 : * input_rel: contains the source-data Paths
4854 : * target: the pathtarget for the result Paths to compute
4855 : *
4856 : * Note: input paths should already compute the desired pathtarget, since
4857 : * Sort/Unique won't project anything.
4858 : */
4859 : static RelOptInfo *
4860 3014 : create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4861 : PathTarget *target)
4862 : {
4863 : RelOptInfo *distinct_rel;
4864 :
4865 : /* For now, do all work in the (DISTINCT, NULL) upperrel */
4866 3014 : distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4867 :
4868 : /*
4869 : * We don't compute anything at this level, so distinct_rel will be
4870 : * parallel-safe if the input rel is parallel-safe. In particular, if
4871 : * there is a DISTINCT ON (...) clause, any path for the input_rel will
4872 : * output those expressions, and will not be parallel-safe unless those
4873 : * expressions are parallel-safe.
4874 : */
4875 3014 : distinct_rel->consider_parallel = input_rel->consider_parallel;
4876 :
4877 : /*
4878 : * If the input rel belongs to a single FDW, so does the distinct_rel.
4879 : */
4880 3014 : distinct_rel->serverid = input_rel->serverid;
4881 3014 : distinct_rel->userid = input_rel->userid;
4882 3014 : distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4883 3014 : distinct_rel->fdwroutine = input_rel->fdwroutine;
4884 :
4885 : /* build distinct paths based on input_rel's pathlist */
4886 3014 : create_final_distinct_paths(root, input_rel, distinct_rel);
4887 :
4888 : /* now build distinct paths based on input_rel's partial_pathlist */
4889 3014 : create_partial_distinct_paths(root, input_rel, distinct_rel, target);
4890 :
4891 : /* Give a helpful error if we failed to create any paths */
4892 3014 : if (distinct_rel->pathlist == NIL)
4893 0 : ereport(ERROR,
4894 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4895 : errmsg("could not implement DISTINCT"),
4896 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4897 :
4898 : /*
4899 : * If there is an FDW that's responsible for all baserels of the query,
4900 : * let it consider adding ForeignPaths.
4901 : */
4902 3014 : if (distinct_rel->fdwroutine &&
4903 16 : distinct_rel->fdwroutine->GetForeignUpperPaths)
4904 16 : distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4905 : UPPERREL_DISTINCT,
4906 : input_rel,
4907 : distinct_rel,
4908 : NULL);
4909 :
4910 : /* Let extensions possibly add some more paths */
4911 3014 : if (create_upper_paths_hook)
4912 0 : (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4913 : distinct_rel, NULL);
4914 :
4915 : /* Now choose the best path(s) */
4916 3014 : set_cheapest(distinct_rel);
4917 :
4918 3014 : return distinct_rel;
4919 : }
4920 :
4921 : /*
4922 : * create_partial_distinct_paths
4923 : *
4924 : * Process 'input_rel' partial paths and add unique/aggregate paths to the
4925 : * UPPERREL_PARTIAL_DISTINCT rel. For paths created, add Gather/GatherMerge
4926 : * paths on top and add a final unique/aggregate path to remove any duplicate
4927 : * produced from combining rows from parallel workers.
4928 : */
4929 : static void
4930 3014 : create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4931 : RelOptInfo *final_distinct_rel,
4932 : PathTarget *target)
4933 : {
4934 : RelOptInfo *partial_distinct_rel;
4935 : Query *parse;
4936 : List *distinctExprs;
4937 : double numDistinctRows;
4938 : Path *cheapest_partial_path;
4939 : ListCell *lc;
4940 :
4941 : /* nothing to do when there are no partial paths in the input rel */
4942 3014 : if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4943 2906 : return;
4944 :
4945 108 : parse = root->parse;
4946 :
4947 : /* can't do parallel DISTINCT ON */
4948 108 : if (parse->hasDistinctOn)
4949 0 : return;
4950 :
4951 108 : partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4952 : NULL);
4953 108 : partial_distinct_rel->reltarget = target;
4954 108 : partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4955 :
4956 : /*
4957 : * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4958 : */
4959 108 : partial_distinct_rel->serverid = input_rel->serverid;
4960 108 : partial_distinct_rel->userid = input_rel->userid;
4961 108 : partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4962 108 : partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4963 :
4964 108 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
4965 :
4966 108 : distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4967 : parse->targetList);
4968 :
4969 : /* estimate how many distinct rows we'll get from each worker */
4970 108 : numDistinctRows = estimate_num_groups(root, distinctExprs,
4971 : cheapest_partial_path->rows,
4972 : NULL, NULL);
4973 :
4974 : /*
4975 : * Try sorting the cheapest path and incrementally sorting any paths with
4976 : * presorted keys and put a unique paths atop of those. We'll also
4977 : * attempt to reorder the required pathkeys to match the input path's
4978 : * pathkeys as much as possible, in hopes of avoiding a possible need to
4979 : * re-sort.
4980 : */
4981 108 : if (grouping_is_sortable(root->processed_distinctClause))
4982 : {
4983 234 : foreach(lc, input_rel->partial_pathlist)
4984 : {
4985 126 : Path *input_path = (Path *) lfirst(lc);
4986 : Path *sorted_path;
4987 126 : List *useful_pathkeys_list = NIL;
4988 :
4989 : useful_pathkeys_list =
4990 126 : get_useful_pathkeys_for_distinct(root,
4991 : root->distinct_pathkeys,
4992 : input_path->pathkeys);
4993 : Assert(list_length(useful_pathkeys_list) > 0);
4994 :
4995 390 : foreach_node(List, useful_pathkeys, useful_pathkeys_list)
4996 : {
4997 138 : sorted_path = make_ordered_path(root,
4998 : partial_distinct_rel,
4999 : input_path,
5000 : cheapest_partial_path,
5001 : useful_pathkeys,
5002 : -1.0);
5003 :
5004 138 : if (sorted_path == NULL)
5005 12 : continue;
5006 :
5007 : /*
5008 : * An empty distinct_pathkeys means all tuples have the same
5009 : * value for the DISTINCT clause. See
5010 : * create_final_distinct_paths()
5011 : */
5012 126 : if (root->distinct_pathkeys == NIL)
5013 : {
5014 : Node *limitCount;
5015 :
5016 6 : limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5017 : sizeof(int64),
5018 : Int64GetDatum(1), false,
5019 : true);
5020 :
5021 : /*
5022 : * Apply a LimitPath onto the partial path to restrict the
5023 : * tuples from each worker to 1.
5024 : * create_final_distinct_paths will need to apply an
5025 : * additional LimitPath to restrict this to a single row
5026 : * after the Gather node. If the query already has a
5027 : * LIMIT clause, then we could end up with three Limit
5028 : * nodes in the final plan. Consolidating the top two of
5029 : * these could be done, but does not seem worth troubling
5030 : * over.
5031 : */
5032 6 : add_partial_path(partial_distinct_rel, (Path *)
5033 6 : create_limit_path(root, partial_distinct_rel,
5034 : sorted_path,
5035 : NULL,
5036 : limitCount,
5037 : LIMIT_OPTION_COUNT,
5038 : 0, 1));
5039 : }
5040 : else
5041 : {
5042 120 : add_partial_path(partial_distinct_rel, (Path *)
5043 120 : create_unique_path(root, partial_distinct_rel,
5044 : sorted_path,
5045 120 : list_length(root->distinct_pathkeys),
5046 : numDistinctRows));
5047 : }
5048 : }
5049 : }
5050 : }
5051 :
5052 : /*
5053 : * Now try hash aggregate paths, if enabled and hashing is possible. Since
5054 : * we're not on the hook to ensure we do our best to create at least one
5055 : * path here, we treat enable_hashagg as a hard off-switch rather than the
5056 : * slightly softer variant in create_final_distinct_paths.
5057 : */
5058 108 : if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
5059 : {
5060 78 : add_partial_path(partial_distinct_rel, (Path *)
5061 78 : create_agg_path(root,
5062 : partial_distinct_rel,
5063 : cheapest_partial_path,
5064 : cheapest_partial_path->pathtarget,
5065 : AGG_HASHED,
5066 : AGGSPLIT_SIMPLE,
5067 : root->processed_distinctClause,
5068 : NIL,
5069 : NULL,
5070 : numDistinctRows));
5071 : }
5072 :
5073 : /*
5074 : * If there is an FDW that's responsible for all baserels of the query,
5075 : * let it consider adding ForeignPaths.
5076 : */
5077 108 : if (partial_distinct_rel->fdwroutine &&
5078 0 : partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
5079 0 : partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
5080 : UPPERREL_PARTIAL_DISTINCT,
5081 : input_rel,
5082 : partial_distinct_rel,
5083 : NULL);
5084 :
5085 : /* Let extensions possibly add some more partial paths */
5086 108 : if (create_upper_paths_hook)
5087 0 : (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
5088 : input_rel, partial_distinct_rel, NULL);
5089 :
5090 108 : if (partial_distinct_rel->partial_pathlist != NIL)
5091 : {
5092 108 : generate_useful_gather_paths(root, partial_distinct_rel, true);
5093 108 : set_cheapest(partial_distinct_rel);
5094 :
5095 : /*
5096 : * Finally, create paths to distinctify the final result. This step
5097 : * is needed to remove any duplicates due to combining rows from
5098 : * parallel workers.
5099 : */
5100 108 : create_final_distinct_paths(root, partial_distinct_rel,
5101 : final_distinct_rel);
5102 : }
5103 : }
5104 :
5105 : /*
5106 : * create_final_distinct_paths
5107 : * Create distinct paths in 'distinct_rel' based on 'input_rel' pathlist
5108 : *
5109 : * input_rel: contains the source-data paths
5110 : * distinct_rel: destination relation for storing created paths
5111 : */
5112 : static RelOptInfo *
5113 3122 : create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
5114 : RelOptInfo *distinct_rel)
5115 : {
5116 3122 : Query *parse = root->parse;
5117 3122 : Path *cheapest_input_path = input_rel->cheapest_total_path;
5118 : double numDistinctRows;
5119 : bool allow_hash;
5120 :
5121 : /* Estimate number of distinct rows there will be */
5122 3122 : if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
5123 3048 : root->hasHavingQual)
5124 : {
5125 : /*
5126 : * If there was grouping or aggregation, use the number of input rows
5127 : * as the estimated number of DISTINCT rows (ie, assume the input is
5128 : * already mostly unique).
5129 : */
5130 74 : numDistinctRows = cheapest_input_path->rows;
5131 : }
5132 : else
5133 : {
5134 : /*
5135 : * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
5136 : */
5137 : List *distinctExprs;
5138 :
5139 3048 : distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
5140 : parse->targetList);
5141 3048 : numDistinctRows = estimate_num_groups(root, distinctExprs,
5142 : cheapest_input_path->rows,
5143 : NULL, NULL);
5144 : }
5145 :
5146 : /*
5147 : * Consider sort-based implementations of DISTINCT, if possible.
5148 : */
5149 3122 : if (grouping_is_sortable(root->processed_distinctClause))
5150 : {
5151 : /*
5152 : * Firstly, if we have any adequately-presorted paths, just stick a
5153 : * Unique node on those. We also, consider doing an explicit sort of
5154 : * the cheapest input path and Unique'ing that. If any paths have
5155 : * presorted keys then we'll create an incremental sort atop of those
5156 : * before adding a unique node on the top. We'll also attempt to
5157 : * reorder the required pathkeys to match the input path's pathkeys as
5158 : * much as possible, in hopes of avoiding a possible need to re-sort.
5159 : *
5160 : * When we have DISTINCT ON, we must sort by the more rigorous of
5161 : * DISTINCT and ORDER BY, else it won't have the desired behavior.
5162 : * Also, if we do have to do an explicit sort, we might as well use
5163 : * the more rigorous ordering to avoid a second sort later. (Note
5164 : * that the parser will have ensured that one clause is a prefix of
5165 : * the other.)
5166 : */
5167 : List *needed_pathkeys;
5168 : ListCell *lc;
5169 3116 : double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
5170 :
5171 3364 : if (parse->hasDistinctOn &&
5172 248 : list_length(root->distinct_pathkeys) <
5173 248 : list_length(root->sort_pathkeys))
5174 54 : needed_pathkeys = root->sort_pathkeys;
5175 : else
5176 3062 : needed_pathkeys = root->distinct_pathkeys;
5177 :
5178 8410 : foreach(lc, input_rel->pathlist)
5179 : {
5180 5294 : Path *input_path = (Path *) lfirst(lc);
5181 : Path *sorted_path;
5182 5294 : List *useful_pathkeys_list = NIL;
5183 :
5184 : useful_pathkeys_list =
5185 5294 : get_useful_pathkeys_for_distinct(root,
5186 : needed_pathkeys,
5187 : input_path->pathkeys);
5188 : Assert(list_length(useful_pathkeys_list) > 0);
5189 :
5190 16702 : foreach_node(List, useful_pathkeys, useful_pathkeys_list)
5191 : {
5192 6114 : sorted_path = make_ordered_path(root,
5193 : distinct_rel,
5194 : input_path,
5195 : cheapest_input_path,
5196 : useful_pathkeys,
5197 : limittuples);
5198 :
5199 6114 : if (sorted_path == NULL)
5200 878 : continue;
5201 :
5202 : /*
5203 : * distinct_pathkeys may have become empty if all of the
5204 : * pathkeys were determined to be redundant. If all of the
5205 : * pathkeys are redundant then each DISTINCT target must only
5206 : * allow a single value, therefore all resulting tuples must
5207 : * be identical (or at least indistinguishable by an equality
5208 : * check). We can uniquify these tuples simply by just taking
5209 : * the first tuple. All we do here is add a path to do "LIMIT
5210 : * 1" atop of 'sorted_path'. When doing a DISTINCT ON we may
5211 : * still have a non-NIL sort_pathkeys list, so we must still
5212 : * only do this with paths which are correctly sorted by
5213 : * sort_pathkeys.
5214 : */
5215 5236 : if (root->distinct_pathkeys == NIL)
5216 : {
5217 : Node *limitCount;
5218 :
5219 138 : limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5220 : sizeof(int64),
5221 : Int64GetDatum(1), false,
5222 : true);
5223 :
5224 : /*
5225 : * If the query already has a LIMIT clause, then we could
5226 : * end up with a duplicate LimitPath in the final plan.
5227 : * That does not seem worth troubling over too much.
5228 : */
5229 138 : add_path(distinct_rel, (Path *)
5230 138 : create_limit_path(root, distinct_rel, sorted_path,
5231 : NULL, limitCount,
5232 : LIMIT_OPTION_COUNT, 0, 1));
5233 : }
5234 : else
5235 : {
5236 5098 : add_path(distinct_rel, (Path *)
5237 5098 : create_unique_path(root, distinct_rel,
5238 : sorted_path,
5239 5098 : list_length(root->distinct_pathkeys),
5240 : numDistinctRows));
5241 : }
5242 : }
5243 : }
5244 : }
5245 :
5246 : /*
5247 : * Consider hash-based implementations of DISTINCT, if possible.
5248 : *
5249 : * If we were not able to make any other types of path, we *must* hash or
5250 : * die trying. If we do have other choices, there are two things that
5251 : * should prevent selection of hashing: if the query uses DISTINCT ON
5252 : * (because it won't really have the expected behavior if we hash), or if
5253 : * enable_hashagg is off.
5254 : *
5255 : * Note: grouping_is_hashable() is much more expensive to check than the
5256 : * other gating conditions, so we want to do it last.
5257 : */
5258 3122 : if (distinct_rel->pathlist == NIL)
5259 6 : allow_hash = true; /* we have no alternatives */
5260 3116 : else if (parse->hasDistinctOn || !enable_hashagg)
5261 398 : allow_hash = false; /* policy-based decision not to hash */
5262 : else
5263 2718 : allow_hash = true; /* default */
5264 :
5265 3122 : if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5266 : {
5267 : /* Generate hashed aggregate path --- no sort needed */
5268 2724 : add_path(distinct_rel, (Path *)
5269 2724 : create_agg_path(root,
5270 : distinct_rel,
5271 : cheapest_input_path,
5272 : cheapest_input_path->pathtarget,
5273 : AGG_HASHED,
5274 : AGGSPLIT_SIMPLE,
5275 : root->processed_distinctClause,
5276 : NIL,
5277 : NULL,
5278 : numDistinctRows));
5279 : }
5280 :
5281 3122 : return distinct_rel;
5282 : }
5283 :
5284 : /*
5285 : * get_useful_pathkeys_for_distinct
5286 : * Get useful orderings of pathkeys for distinctClause by reordering
5287 : * 'needed_pathkeys' to match the given 'path_pathkeys' as much as possible.
5288 : *
5289 : * This returns a list of pathkeys that can be useful for DISTINCT or DISTINCT
5290 : * ON clause. For convenience, it always includes the given 'needed_pathkeys'.
5291 : */
5292 : static List *
5293 5420 : get_useful_pathkeys_for_distinct(PlannerInfo *root, List *needed_pathkeys,
5294 : List *path_pathkeys)
5295 : {
5296 5420 : List *useful_pathkeys_list = NIL;
5297 5420 : List *useful_pathkeys = NIL;
5298 :
5299 : /* always include the given 'needed_pathkeys' */
5300 5420 : useful_pathkeys_list = lappend(useful_pathkeys_list,
5301 : needed_pathkeys);
5302 :
5303 5420 : if (!enable_distinct_reordering)
5304 0 : return useful_pathkeys_list;
5305 :
5306 : /*
5307 : * Scan the given 'path_pathkeys' and construct a list of PathKey nodes
5308 : * that match 'needed_pathkeys', but only up to the longest matching
5309 : * prefix.
5310 : *
5311 : * When we have DISTINCT ON, we must ensure that the resulting pathkey
5312 : * list matches initial distinctClause pathkeys; otherwise, it won't have
5313 : * the desired behavior.
5314 : */
5315 13422 : foreach_node(PathKey, pathkey, path_pathkeys)
5316 : {
5317 : /*
5318 : * The PathKey nodes are canonical, so they can be checked for
5319 : * equality by simple pointer comparison.
5320 : */
5321 2610 : if (!list_member_ptr(needed_pathkeys, pathkey))
5322 10 : break;
5323 2600 : if (root->parse->hasDistinctOn &&
5324 200 : !list_member_ptr(root->distinct_pathkeys, pathkey))
5325 18 : break;
5326 :
5327 2582 : useful_pathkeys = lappend(useful_pathkeys, pathkey);
5328 : }
5329 :
5330 : /* If no match at all, no point in reordering needed_pathkeys */
5331 5420 : if (useful_pathkeys == NIL)
5332 3102 : return useful_pathkeys_list;
5333 :
5334 : /*
5335 : * If not full match, the resulting pathkey list is not useful without
5336 : * incremental sort.
5337 : */
5338 2318 : if (list_length(useful_pathkeys) < list_length(needed_pathkeys) &&
5339 1560 : !enable_incremental_sort)
5340 60 : return useful_pathkeys_list;
5341 :
5342 : /* Append the remaining PathKey nodes in needed_pathkeys */
5343 2258 : useful_pathkeys = list_concat_unique_ptr(useful_pathkeys,
5344 : needed_pathkeys);
5345 :
5346 : /*
5347 : * If the resulting pathkey list is the same as the 'needed_pathkeys',
5348 : * just drop it.
5349 : */
5350 2258 : if (compare_pathkeys(needed_pathkeys,
5351 : useful_pathkeys) == PATHKEYS_EQUAL)
5352 1426 : return useful_pathkeys_list;
5353 :
5354 832 : useful_pathkeys_list = lappend(useful_pathkeys_list,
5355 : useful_pathkeys);
5356 :
5357 832 : return useful_pathkeys_list;
5358 : }
5359 :
5360 : /*
5361 : * create_ordered_paths
5362 : *
5363 : * Build a new upperrel containing Paths for ORDER BY evaluation.
5364 : *
5365 : * All paths in the result must satisfy the ORDER BY ordering.
5366 : * The only new paths we need consider are an explicit full sort
5367 : * and incremental sort on the cheapest-total existing path.
5368 : *
5369 : * input_rel: contains the source-data Paths
5370 : * target: the output tlist the result Paths must emit
5371 : * limit_tuples: estimated bound on the number of output tuples,
5372 : * or -1 if no LIMIT or couldn't estimate
5373 : *
5374 : * XXX This only looks at sort_pathkeys. I wonder if it needs to look at the
5375 : * other pathkeys (grouping, ...) like generate_useful_gather_paths.
5376 : */
5377 : static RelOptInfo *
5378 76020 : create_ordered_paths(PlannerInfo *root,
5379 : RelOptInfo *input_rel,
5380 : PathTarget *target,
5381 : bool target_parallel_safe,
5382 : double limit_tuples)
5383 : {
5384 76020 : Path *cheapest_input_path = input_rel->cheapest_total_path;
5385 : RelOptInfo *ordered_rel;
5386 : ListCell *lc;
5387 :
5388 : /* For now, do all work in the (ORDERED, NULL) upperrel */
5389 76020 : ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5390 :
5391 : /*
5392 : * If the input relation is not parallel-safe, then the ordered relation
5393 : * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5394 : * target list is parallel-safe.
5395 : */
5396 76020 : if (input_rel->consider_parallel && target_parallel_safe)
5397 53078 : ordered_rel->consider_parallel = true;
5398 :
5399 : /* Assume that the same path generation strategies are allowed. */
5400 76020 : ordered_rel->pgs_mask = input_rel->pgs_mask;
5401 :
5402 : /*
5403 : * If the input rel belongs to a single FDW, so does the ordered_rel.
5404 : */
5405 76020 : ordered_rel->serverid = input_rel->serverid;
5406 76020 : ordered_rel->userid = input_rel->userid;
5407 76020 : ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5408 76020 : ordered_rel->fdwroutine = input_rel->fdwroutine;
5409 :
5410 192080 : foreach(lc, input_rel->pathlist)
5411 : {
5412 116060 : Path *input_path = (Path *) lfirst(lc);
5413 : Path *sorted_path;
5414 : bool is_sorted;
5415 : int presorted_keys;
5416 :
5417 116060 : is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5418 : input_path->pathkeys, &presorted_keys);
5419 :
5420 116060 : if (is_sorted)
5421 43570 : sorted_path = input_path;
5422 : else
5423 : {
5424 : /*
5425 : * Try at least sorting the cheapest path and also try
5426 : * incrementally sorting any path which is partially sorted
5427 : * already (no need to deal with paths which have presorted keys
5428 : * when incremental sort is disabled unless it's the cheapest
5429 : * input path).
5430 : */
5431 72490 : if (input_path != cheapest_input_path &&
5432 6322 : (presorted_keys == 0 || !enable_incremental_sort))
5433 2214 : continue;
5434 :
5435 : /*
5436 : * We've no need to consider both a sort and incremental sort.
5437 : * We'll just do a sort if there are no presorted keys and an
5438 : * incremental sort when there are presorted keys.
5439 : */
5440 70276 : if (presorted_keys == 0 || !enable_incremental_sort)
5441 65538 : sorted_path = (Path *) create_sort_path(root,
5442 : ordered_rel,
5443 : input_path,
5444 : root->sort_pathkeys,
5445 : limit_tuples);
5446 : else
5447 4738 : sorted_path = (Path *) create_incremental_sort_path(root,
5448 : ordered_rel,
5449 : input_path,
5450 : root->sort_pathkeys,
5451 : presorted_keys,
5452 : limit_tuples);
5453 : }
5454 :
5455 : /*
5456 : * If the pathtarget of the result path has different expressions from
5457 : * the target to be applied, a projection step is needed.
5458 : */
5459 113846 : if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5460 294 : sorted_path = apply_projection_to_path(root, ordered_rel,
5461 : sorted_path, target);
5462 :
5463 113846 : add_path(ordered_rel, sorted_path);
5464 : }
5465 :
5466 : /*
5467 : * generate_gather_paths() will have already generated a simple Gather
5468 : * path for the best parallel path, if any, and the loop above will have
5469 : * considered sorting it. Similarly, generate_gather_paths() will also
5470 : * have generated order-preserving Gather Merge plans which can be used
5471 : * without sorting if they happen to match the sort_pathkeys, and the loop
5472 : * above will have handled those as well. However, there's one more
5473 : * possibility: it may make sense to sort the cheapest partial path or
5474 : * incrementally sort any partial path that is partially sorted according
5475 : * to the required output order and then use Gather Merge.
5476 : */
5477 76020 : if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5478 52874 : input_rel->partial_pathlist != NIL)
5479 : {
5480 : Path *cheapest_partial_path;
5481 :
5482 2868 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
5483 :
5484 6530 : foreach(lc, input_rel->partial_pathlist)
5485 : {
5486 3662 : Path *input_path = (Path *) lfirst(lc);
5487 : Path *sorted_path;
5488 : bool is_sorted;
5489 : int presorted_keys;
5490 : double total_groups;
5491 :
5492 3662 : is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5493 : input_path->pathkeys,
5494 : &presorted_keys);
5495 :
5496 3662 : if (is_sorted)
5497 674 : continue;
5498 :
5499 : /*
5500 : * Try at least sorting the cheapest path and also try
5501 : * incrementally sorting any path which is partially sorted
5502 : * already (no need to deal with paths which have presorted keys
5503 : * when incremental sort is disabled unless it's the cheapest
5504 : * partial path).
5505 : */
5506 2988 : if (input_path != cheapest_partial_path &&
5507 150 : (presorted_keys == 0 || !enable_incremental_sort))
5508 0 : continue;
5509 :
5510 : /*
5511 : * We've no need to consider both a sort and incremental sort.
5512 : * We'll just do a sort if there are no presorted keys and an
5513 : * incremental sort when there are presorted keys.
5514 : */
5515 2988 : if (presorted_keys == 0 || !enable_incremental_sort)
5516 2820 : sorted_path = (Path *) create_sort_path(root,
5517 : ordered_rel,
5518 : input_path,
5519 : root->sort_pathkeys,
5520 : limit_tuples);
5521 : else
5522 168 : sorted_path = (Path *) create_incremental_sort_path(root,
5523 : ordered_rel,
5524 : input_path,
5525 : root->sort_pathkeys,
5526 : presorted_keys,
5527 : limit_tuples);
5528 2988 : total_groups = compute_gather_rows(sorted_path);
5529 : sorted_path = (Path *)
5530 2988 : create_gather_merge_path(root, ordered_rel,
5531 : sorted_path,
5532 : sorted_path->pathtarget,
5533 : root->sort_pathkeys, NULL,
5534 : &total_groups);
5535 :
5536 : /*
5537 : * If the pathtarget of the result path has different expressions
5538 : * from the target to be applied, a projection step is needed.
5539 : */
5540 2988 : if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5541 6 : sorted_path = apply_projection_to_path(root, ordered_rel,
5542 : sorted_path, target);
5543 :
5544 2988 : add_path(ordered_rel, sorted_path);
5545 : }
5546 : }
5547 :
5548 : /*
5549 : * If there is an FDW that's responsible for all baserels of the query,
5550 : * let it consider adding ForeignPaths.
5551 : */
5552 76020 : if (ordered_rel->fdwroutine &&
5553 386 : ordered_rel->fdwroutine->GetForeignUpperPaths)
5554 370 : ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5555 : input_rel, ordered_rel,
5556 : NULL);
5557 :
5558 : /* Let extensions possibly add some more paths */
5559 76020 : if (create_upper_paths_hook)
5560 0 : (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5561 : input_rel, ordered_rel, NULL);
5562 :
5563 : /*
5564 : * No need to bother with set_cheapest here; grouping_planner does not
5565 : * need us to do it.
5566 : */
5567 : Assert(ordered_rel->pathlist != NIL);
5568 :
5569 76020 : return ordered_rel;
5570 : }
5571 :
5572 :
5573 : /*
5574 : * make_group_input_target
5575 : * Generate appropriate PathTarget for initial input to grouping nodes.
5576 : *
5577 : * If there is grouping or aggregation, the scan/join subplan cannot emit
5578 : * the query's final targetlist; for example, it certainly can't emit any
5579 : * aggregate function calls. This routine generates the correct target
5580 : * for the scan/join subplan.
5581 : *
5582 : * The query target list passed from the parser already contains entries
5583 : * for all ORDER BY and GROUP BY expressions, but it will not have entries
5584 : * for variables used only in HAVING clauses; so we need to add those
5585 : * variables to the subplan target list. Also, we flatten all expressions
5586 : * except GROUP BY items into their component variables; other expressions
5587 : * will be computed by the upper plan nodes rather than by the subplan.
5588 : * For example, given a query like
5589 : * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
5590 : * we want to pass this targetlist to the subplan:
5591 : * a+b,c,d
5592 : * where the a+b target will be used by the Sort/Group steps, and the
5593 : * other targets will be used for computing the final results.
5594 : *
5595 : * 'final_target' is the query's final target list (in PathTarget form)
5596 : *
5597 : * The result is the PathTarget to be computed by the Paths returned from
5598 : * query_planner().
5599 : */
5600 : static PathTarget *
5601 45978 : make_group_input_target(PlannerInfo *root, PathTarget *final_target)
5602 : {
5603 45978 : Query *parse = root->parse;
5604 : PathTarget *input_target;
5605 : List *non_group_cols;
5606 : List *non_group_vars;
5607 : int i;
5608 : ListCell *lc;
5609 :
5610 : /*
5611 : * We must build a target containing all grouping columns, plus any other
5612 : * Vars mentioned in the query's targetlist and HAVING qual.
5613 : */
5614 45978 : input_target = create_empty_pathtarget();
5615 45978 : non_group_cols = NIL;
5616 :
5617 45978 : i = 0;
5618 114026 : foreach(lc, final_target->exprs)
5619 : {
5620 68048 : Expr *expr = (Expr *) lfirst(lc);
5621 68048 : Index sgref = get_pathtarget_sortgroupref(final_target, i);
5622 :
5623 77726 : if (sgref && root->processed_groupClause &&
5624 9678 : get_sortgroupref_clause_noerr(sgref,
5625 : root->processed_groupClause) != NULL)
5626 : {
5627 : /*
5628 : * It's a grouping column, so add it to the input target as-is.
5629 : *
5630 : * Note that the target is logically below the grouping step. So
5631 : * with grouping sets we need to remove the RT index of the
5632 : * grouping step if there is any from the target expression.
5633 : */
5634 7808 : if (parse->hasGroupRTE && parse->groupingSets != NIL)
5635 : {
5636 : Assert(root->group_rtindex > 0);
5637 : expr = (Expr *)
5638 2026 : remove_nulling_relids((Node *) expr,
5639 2026 : bms_make_singleton(root->group_rtindex),
5640 : NULL);
5641 : }
5642 7808 : add_column_to_pathtarget(input_target, expr, sgref);
5643 : }
5644 : else
5645 : {
5646 : /*
5647 : * Non-grouping column, so just remember the expression for later
5648 : * call to pull_var_clause.
5649 : */
5650 60240 : non_group_cols = lappend(non_group_cols, expr);
5651 : }
5652 :
5653 68048 : i++;
5654 : }
5655 :
5656 : /*
5657 : * If there's a HAVING clause, we'll need the Vars it uses, too.
5658 : */
5659 45978 : if (parse->havingQual)
5660 962 : non_group_cols = lappend(non_group_cols, parse->havingQual);
5661 :
5662 : /*
5663 : * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5664 : * add them to the input target if not already present. (A Var used
5665 : * directly as a GROUP BY item will be present already.) Note this
5666 : * includes Vars used in resjunk items, so we are covering the needs of
5667 : * ORDER BY and window specifications. Vars used within Aggrefs and
5668 : * WindowFuncs will be pulled out here, too.
5669 : *
5670 : * Note that the target is logically below the grouping step. So with
5671 : * grouping sets we need to remove the RT index of the grouping step if
5672 : * there is any from the non-group Vars.
5673 : */
5674 45978 : non_group_vars = pull_var_clause((Node *) non_group_cols,
5675 : PVC_RECURSE_AGGREGATES |
5676 : PVC_RECURSE_WINDOWFUNCS |
5677 : PVC_INCLUDE_PLACEHOLDERS);
5678 45978 : if (parse->hasGroupRTE && parse->groupingSets != NIL)
5679 : {
5680 : Assert(root->group_rtindex > 0);
5681 : non_group_vars = (List *)
5682 936 : remove_nulling_relids((Node *) non_group_vars,
5683 936 : bms_make_singleton(root->group_rtindex),
5684 : NULL);
5685 : }
5686 45978 : add_new_columns_to_pathtarget(input_target, non_group_vars);
5687 :
5688 : /* clean up cruft */
5689 45978 : list_free(non_group_vars);
5690 45978 : list_free(non_group_cols);
5691 :
5692 : /* XXX this causes some redundant cost calculation ... */
5693 45978 : return set_pathtarget_cost_width(root, input_target);
5694 : }
5695 :
5696 : /*
5697 : * make_partial_grouping_target
5698 : * Generate appropriate PathTarget for output of partial aggregate
5699 : * (or partial grouping, if there are no aggregates) nodes.
5700 : *
5701 : * A partial aggregation node needs to emit all the same aggregates that
5702 : * a regular aggregation node would, plus any aggregates used in HAVING;
5703 : * except that the Aggref nodes should be marked as partial aggregates.
5704 : *
5705 : * In addition, we'd better emit any Vars and PlaceHolderVars that are
5706 : * used outside of Aggrefs in the aggregation tlist and HAVING. (Presumably,
5707 : * these would be Vars that are grouped by or used in grouping expressions.)
5708 : *
5709 : * grouping_target is the tlist to be emitted by the topmost aggregation step.
5710 : * havingQual represents the HAVING clause.
5711 : */
5712 : static PathTarget *
5713 3856 : make_partial_grouping_target(PlannerInfo *root,
5714 : PathTarget *grouping_target,
5715 : Node *havingQual)
5716 : {
5717 : PathTarget *partial_target;
5718 : List *non_group_cols;
5719 : List *non_group_exprs;
5720 : int i;
5721 : ListCell *lc;
5722 :
5723 3856 : partial_target = create_empty_pathtarget();
5724 3856 : non_group_cols = NIL;
5725 :
5726 3856 : i = 0;
5727 12754 : foreach(lc, grouping_target->exprs)
5728 : {
5729 8898 : Expr *expr = (Expr *) lfirst(lc);
5730 8898 : Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5731 :
5732 13674 : if (sgref && root->processed_groupClause &&
5733 4776 : get_sortgroupref_clause_noerr(sgref,
5734 : root->processed_groupClause) != NULL)
5735 : {
5736 : /*
5737 : * It's a grouping column, so add it to the partial_target as-is.
5738 : * (This allows the upper agg step to repeat the grouping calcs.)
5739 : */
5740 2828 : add_column_to_pathtarget(partial_target, expr, sgref);
5741 : }
5742 : else
5743 : {
5744 : /*
5745 : * Non-grouping column, so just remember the expression for later
5746 : * call to pull_var_clause.
5747 : */
5748 6070 : non_group_cols = lappend(non_group_cols, expr);
5749 : }
5750 :
5751 8898 : i++;
5752 : }
5753 :
5754 : /*
5755 : * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5756 : */
5757 3856 : if (havingQual)
5758 878 : non_group_cols = lappend(non_group_cols, havingQual);
5759 :
5760 : /*
5761 : * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5762 : * non-group cols (plus HAVING), and add them to the partial_target if not
5763 : * already present. (An expression used directly as a GROUP BY item will
5764 : * be present already.) Note this includes Vars used in resjunk items, so
5765 : * we are covering the needs of ORDER BY and window specifications.
5766 : */
5767 3856 : non_group_exprs = pull_var_clause((Node *) non_group_cols,
5768 : PVC_INCLUDE_AGGREGATES |
5769 : PVC_RECURSE_WINDOWFUNCS |
5770 : PVC_INCLUDE_PLACEHOLDERS);
5771 :
5772 3856 : add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5773 :
5774 : /*
5775 : * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5776 : * are at the top level of the target list, so we can just scan the list
5777 : * rather than recursing through the expression trees.
5778 : */
5779 13350 : foreach(lc, partial_target->exprs)
5780 : {
5781 9494 : Aggref *aggref = (Aggref *) lfirst(lc);
5782 :
5783 9494 : if (IsA(aggref, Aggref))
5784 : {
5785 : Aggref *newaggref;
5786 :
5787 : /*
5788 : * We shouldn't need to copy the substructure of the Aggref node,
5789 : * but flat-copy the node itself to avoid damaging other trees.
5790 : */
5791 6636 : newaggref = makeNode(Aggref);
5792 6636 : memcpy(newaggref, aggref, sizeof(Aggref));
5793 :
5794 : /* For now, assume serialization is required */
5795 6636 : mark_partial_aggref(newaggref, AGGSPLIT_INITIAL_SERIAL);
5796 :
5797 6636 : lfirst(lc) = newaggref;
5798 : }
5799 : }
5800 :
5801 : /* clean up cruft */
5802 3856 : list_free(non_group_exprs);
5803 3856 : list_free(non_group_cols);
5804 :
5805 : /* XXX this causes some redundant cost calculation ... */
5806 3856 : return set_pathtarget_cost_width(root, partial_target);
5807 : }
5808 :
5809 : /*
5810 : * mark_partial_aggref
5811 : * Adjust an Aggref to make it represent a partial-aggregation step.
5812 : *
5813 : * The Aggref node is modified in-place; caller must do any copying required.
5814 : */
5815 : void
5816 18580 : mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
5817 : {
5818 : /* aggtranstype should be computed by this point */
5819 : Assert(OidIsValid(agg->aggtranstype));
5820 : /* ... but aggsplit should still be as the parser left it */
5821 : Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5822 :
5823 : /* Mark the Aggref with the intended partial-aggregation mode */
5824 18580 : agg->aggsplit = aggsplit;
5825 :
5826 : /*
5827 : * Adjust result type if needed. Normally, a partial aggregate returns
5828 : * the aggregate's transition type; but if that's INTERNAL and we're
5829 : * serializing, it returns BYTEA instead.
5830 : */
5831 18580 : if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5832 : {
5833 16274 : if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5834 314 : agg->aggtype = BYTEAOID;
5835 : else
5836 15960 : agg->aggtype = agg->aggtranstype;
5837 : }
5838 18580 : }
5839 :
5840 : /*
5841 : * postprocess_setop_tlist
5842 : * Fix up targetlist returned by plan_set_operations().
5843 : *
5844 : * We need to transpose sort key info from the orig_tlist into new_tlist.
5845 : * NOTE: this would not be good enough if we supported resjunk sort keys
5846 : * for results of set operations --- then, we'd need to project a whole
5847 : * new tlist to evaluate the resjunk columns. For now, just ereport if we
5848 : * find any resjunk columns in orig_tlist.
5849 : */
5850 : static List *
5851 6216 : postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
5852 : {
5853 : ListCell *l;
5854 6216 : ListCell *orig_tlist_item = list_head(orig_tlist);
5855 :
5856 23854 : foreach(l, new_tlist)
5857 : {
5858 17638 : TargetEntry *new_tle = lfirst_node(TargetEntry, l);
5859 : TargetEntry *orig_tle;
5860 :
5861 : /* ignore resjunk columns in setop result */
5862 17638 : if (new_tle->resjunk)
5863 0 : continue;
5864 :
5865 : Assert(orig_tlist_item != NULL);
5866 17638 : orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
5867 17638 : orig_tlist_item = lnext(orig_tlist, orig_tlist_item);
5868 17638 : if (orig_tle->resjunk) /* should not happen */
5869 0 : elog(ERROR, "resjunk output columns are not implemented");
5870 : Assert(new_tle->resno == orig_tle->resno);
5871 17638 : new_tle->ressortgroupref = orig_tle->ressortgroupref;
5872 : }
5873 6216 : if (orig_tlist_item != NULL)
5874 0 : elog(ERROR, "resjunk output columns are not implemented");
5875 6216 : return new_tlist;
5876 : }
5877 :
5878 : /*
5879 : * optimize_window_clauses
5880 : * Call each WindowFunc's prosupport function to see if we're able to
5881 : * make any adjustments to any of the WindowClause's so that the executor
5882 : * can execute the window functions in a more optimal way.
5883 : *
5884 : * Currently we only allow adjustments to the WindowClause's frameOptions. We
5885 : * may allow more things to be done here in the future.
5886 : */
5887 : static void
5888 2576 : optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
5889 : {
5890 2576 : List *windowClause = root->parse->windowClause;
5891 : ListCell *lc;
5892 :
5893 5398 : foreach(lc, windowClause)
5894 : {
5895 2822 : WindowClause *wc = lfirst_node(WindowClause, lc);
5896 : ListCell *lc2;
5897 2822 : int optimizedFrameOptions = 0;
5898 :
5899 : Assert(wc->winref <= wflists->maxWinRef);
5900 :
5901 : /* skip any WindowClauses that have no WindowFuncs */
5902 2822 : if (wflists->windowFuncs[wc->winref] == NIL)
5903 24 : continue;
5904 :
5905 3452 : foreach(lc2, wflists->windowFuncs[wc->winref])
5906 : {
5907 : SupportRequestOptimizeWindowClause req;
5908 : SupportRequestOptimizeWindowClause *res;
5909 2840 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
5910 : Oid prosupport;
5911 :
5912 2840 : prosupport = get_func_support(wfunc->winfnoid);
5913 :
5914 : /* Check if there's a support function for 'wfunc' */
5915 2840 : if (!OidIsValid(prosupport))
5916 2186 : break; /* can't optimize this WindowClause */
5917 :
5918 880 : req.type = T_SupportRequestOptimizeWindowClause;
5919 880 : req.window_clause = wc;
5920 880 : req.window_func = wfunc;
5921 880 : req.frameOptions = wc->frameOptions;
5922 :
5923 : /* call the support function */
5924 : res = (SupportRequestOptimizeWindowClause *)
5925 880 : DatumGetPointer(OidFunctionCall1(prosupport,
5926 : PointerGetDatum(&req)));
5927 :
5928 : /*
5929 : * Skip to next WindowClause if the support function does not
5930 : * support this request type.
5931 : */
5932 880 : if (res == NULL)
5933 226 : break;
5934 :
5935 : /*
5936 : * Save these frameOptions for the first WindowFunc for this
5937 : * WindowClause.
5938 : */
5939 654 : if (foreach_current_index(lc2) == 0)
5940 630 : optimizedFrameOptions = res->frameOptions;
5941 :
5942 : /*
5943 : * On subsequent WindowFuncs, if the frameOptions are not the same
5944 : * then we're unable to optimize the frameOptions for this
5945 : * WindowClause.
5946 : */
5947 24 : else if (optimizedFrameOptions != res->frameOptions)
5948 0 : break; /* skip to the next WindowClause, if any */
5949 : }
5950 :
5951 : /* adjust the frameOptions if all WindowFunc's agree that it's ok */
5952 2798 : if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5953 : {
5954 : ListCell *lc3;
5955 :
5956 : /* apply the new frame options */
5957 612 : wc->frameOptions = optimizedFrameOptions;
5958 :
5959 : /*
5960 : * We now check to see if changing the frameOptions has caused
5961 : * this WindowClause to be a duplicate of some other WindowClause.
5962 : * This can only happen if we have multiple WindowClauses, so
5963 : * don't bother if there's only 1.
5964 : */
5965 612 : if (list_length(windowClause) == 1)
5966 522 : continue;
5967 :
5968 : /*
5969 : * Do the duplicate check and reuse the existing WindowClause if
5970 : * we find a duplicate.
5971 : */
5972 228 : foreach(lc3, windowClause)
5973 : {
5974 174 : WindowClause *existing_wc = lfirst_node(WindowClause, lc3);
5975 :
5976 : /* skip over the WindowClause we're currently editing */
5977 174 : if (existing_wc == wc)
5978 54 : continue;
5979 :
5980 : /*
5981 : * Perform the same duplicate check that is done in
5982 : * transformWindowFuncCall.
5983 : */
5984 240 : if (equal(wc->partitionClause, existing_wc->partitionClause) &&
5985 120 : equal(wc->orderClause, existing_wc->orderClause) &&
5986 120 : wc->frameOptions == existing_wc->frameOptions &&
5987 72 : equal(wc->startOffset, existing_wc->startOffset) &&
5988 36 : equal(wc->endOffset, existing_wc->endOffset))
5989 : {
5990 : ListCell *lc4;
5991 :
5992 : /*
5993 : * Now move each WindowFunc in 'wc' into 'existing_wc'.
5994 : * This required adjusting each WindowFunc's winref and
5995 : * moving the WindowFuncs in 'wc' to the list of
5996 : * WindowFuncs in 'existing_wc'.
5997 : */
5998 78 : foreach(lc4, wflists->windowFuncs[wc->winref])
5999 : {
6000 42 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc4);
6001 :
6002 42 : wfunc->winref = existing_wc->winref;
6003 : }
6004 :
6005 : /* move list items */
6006 72 : wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
6007 36 : wflists->windowFuncs[wc->winref]);
6008 36 : wflists->windowFuncs[wc->winref] = NIL;
6009 :
6010 : /*
6011 : * transformWindowFuncCall() should have made sure there
6012 : * are no other duplicates, so we needn't bother looking
6013 : * any further.
6014 : */
6015 36 : break;
6016 : }
6017 : }
6018 : }
6019 : }
6020 2576 : }
6021 :
6022 : /*
6023 : * select_active_windows
6024 : * Create a list of the "active" window clauses (ie, those referenced
6025 : * by non-deleted WindowFuncs) in the order they are to be executed.
6026 : */
6027 : static List *
6028 2576 : select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
6029 : {
6030 2576 : List *windowClause = root->parse->windowClause;
6031 2576 : List *result = NIL;
6032 : ListCell *lc;
6033 2576 : int nActive = 0;
6034 2576 : WindowClauseSortData *actives = palloc_array(WindowClauseSortData,
6035 : list_length(windowClause));
6036 :
6037 : /* First, construct an array of the active windows */
6038 5398 : foreach(lc, windowClause)
6039 : {
6040 2822 : WindowClause *wc = lfirst_node(WindowClause, lc);
6041 :
6042 : /* It's only active if wflists shows some related WindowFuncs */
6043 : Assert(wc->winref <= wflists->maxWinRef);
6044 2822 : if (wflists->windowFuncs[wc->winref] == NIL)
6045 60 : continue;
6046 :
6047 2762 : actives[nActive].wc = wc; /* original clause */
6048 :
6049 : /*
6050 : * For sorting, we want the list of partition keys followed by the
6051 : * list of sort keys. But pathkeys construction will remove duplicates
6052 : * between the two, so we can as well (even though we can't detect all
6053 : * of the duplicates, since some may come from ECs - that might mean
6054 : * we miss optimization chances here). We must, however, ensure that
6055 : * the order of entries is preserved with respect to the ones we do
6056 : * keep.
6057 : *
6058 : * partitionClause and orderClause had their own duplicates removed in
6059 : * parse analysis, so we're only concerned here with removing
6060 : * orderClause entries that also appear in partitionClause.
6061 : */
6062 5524 : actives[nActive].uniqueOrder =
6063 2762 : list_concat_unique(list_copy(wc->partitionClause),
6064 2762 : wc->orderClause);
6065 2762 : nActive++;
6066 : }
6067 :
6068 : /*
6069 : * Sort active windows by their partitioning/ordering clauses, ignoring
6070 : * any framing clauses, so that the windows that need the same sorting are
6071 : * adjacent in the list. When we come to generate paths, this will avoid
6072 : * inserting additional Sort nodes.
6073 : *
6074 : * This is how we implement a specific requirement from the SQL standard,
6075 : * which says that when two or more windows are order-equivalent (i.e.
6076 : * have matching partition and order clauses, even if their names or
6077 : * framing clauses differ), then all peer rows must be presented in the
6078 : * same order in all of them. If we allowed multiple sort nodes for such
6079 : * cases, we'd risk having the peer rows end up in different orders in
6080 : * equivalent windows due to sort instability. (See General Rule 4 of
6081 : * <window clause> in SQL2008 - SQL2016.)
6082 : *
6083 : * Additionally, if the entire list of clauses of one window is a prefix
6084 : * of another, put first the window with stronger sorting requirements.
6085 : * This way we will first sort for stronger window, and won't have to sort
6086 : * again for the weaker one.
6087 : */
6088 2576 : qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp);
6089 :
6090 : /* build ordered list of the original WindowClause nodes */
6091 5338 : for (int i = 0; i < nActive; i++)
6092 2762 : result = lappend(result, actives[i].wc);
6093 :
6094 2576 : pfree(actives);
6095 :
6096 2576 : return result;
6097 : }
6098 :
6099 : /*
6100 : * name_active_windows
6101 : * Ensure all active windows have unique names.
6102 : *
6103 : * The parser will have checked that user-assigned window names are unique
6104 : * within the Query. Here we assign made-up names to any unnamed
6105 : * WindowClauses for the benefit of EXPLAIN. (We don't want to do this
6106 : * at parse time, because it'd mess up decompilation of views.)
6107 : *
6108 : * activeWindows: result of select_active_windows
6109 : */
6110 : static void
6111 2576 : name_active_windows(List *activeWindows)
6112 : {
6113 2576 : int next_n = 1;
6114 : char newname[16];
6115 : ListCell *lc;
6116 :
6117 5338 : foreach(lc, activeWindows)
6118 : {
6119 2762 : WindowClause *wc = lfirst_node(WindowClause, lc);
6120 :
6121 : /* Nothing to do if it has a name already. */
6122 2762 : if (wc->name)
6123 576 : continue;
6124 :
6125 : /* Select a name not currently present in the list. */
6126 : for (;;)
6127 6 : {
6128 : ListCell *lc2;
6129 :
6130 2192 : snprintf(newname, sizeof(newname), "w%d", next_n++);
6131 4732 : foreach(lc2, activeWindows)
6132 : {
6133 2546 : WindowClause *wc2 = lfirst_node(WindowClause, lc2);
6134 :
6135 2546 : if (wc2->name && strcmp(wc2->name, newname) == 0)
6136 6 : break; /* matched */
6137 : }
6138 2192 : if (lc2 == NULL)
6139 2186 : break; /* reached the end with no match */
6140 : }
6141 2186 : wc->name = pstrdup(newname);
6142 : }
6143 2576 : }
6144 :
6145 : /*
6146 : * common_prefix_cmp
6147 : * QSort comparison function for WindowClauseSortData
6148 : *
6149 : * Sort the windows by the required sorting clauses. First, compare the sort
6150 : * clauses themselves. Second, if one window's clauses are a prefix of another
6151 : * one's clauses, put the window with more sort clauses first.
6152 : *
6153 : * We purposefully sort by the highest tleSortGroupRef first. Since
6154 : * tleSortGroupRefs are assigned for the query's DISTINCT and ORDER BY first
6155 : * and because here we sort the lowest tleSortGroupRefs last, if a
6156 : * WindowClause is sharing a tleSortGroupRef with the query's DISTINCT or
6157 : * ORDER BY clause, this makes it more likely that the final WindowAgg will
6158 : * provide presorted input for the query's DISTINCT or ORDER BY clause, thus
6159 : * reducing the total number of sorts required for the query.
6160 : */
6161 : static int
6162 204 : common_prefix_cmp(const void *a, const void *b)
6163 : {
6164 204 : const WindowClauseSortData *wcsa = a;
6165 204 : const WindowClauseSortData *wcsb = b;
6166 : ListCell *item_a;
6167 : ListCell *item_b;
6168 :
6169 366 : forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
6170 : {
6171 264 : SortGroupClause *sca = lfirst_node(SortGroupClause, item_a);
6172 264 : SortGroupClause *scb = lfirst_node(SortGroupClause, item_b);
6173 :
6174 264 : if (sca->tleSortGroupRef > scb->tleSortGroupRef)
6175 102 : return -1;
6176 252 : else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
6177 66 : return 1;
6178 186 : else if (sca->sortop > scb->sortop)
6179 0 : return -1;
6180 186 : else if (sca->sortop < scb->sortop)
6181 24 : return 1;
6182 162 : else if (sca->nulls_first && !scb->nulls_first)
6183 0 : return -1;
6184 162 : else if (!sca->nulls_first && scb->nulls_first)
6185 0 : return 1;
6186 : /* no need to compare eqop, since it is fully determined by sortop */
6187 : }
6188 :
6189 102 : if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
6190 6 : return -1;
6191 96 : else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
6192 30 : return 1;
6193 :
6194 66 : return 0;
6195 : }
6196 :
6197 : /*
6198 : * make_window_input_target
6199 : * Generate appropriate PathTarget for initial input to WindowAgg nodes.
6200 : *
6201 : * When the query has window functions, this function computes the desired
6202 : * target to be computed by the node just below the first WindowAgg.
6203 : * This tlist must contain all values needed to evaluate the window functions,
6204 : * compute the final target list, and perform any required final sort step.
6205 : * If multiple WindowAggs are needed, each intermediate one adds its window
6206 : * function results onto this base tlist; only the topmost WindowAgg computes
6207 : * the actual desired target list.
6208 : *
6209 : * This function is much like make_group_input_target, though not quite enough
6210 : * like it to share code. As in that function, we flatten most expressions
6211 : * into their component variables. But we do not want to flatten window
6212 : * PARTITION BY/ORDER BY clauses, since that might result in multiple
6213 : * evaluations of them, which would be bad (possibly even resulting in
6214 : * inconsistent answers, if they contain volatile functions).
6215 : * Also, we must not flatten GROUP BY clauses that were left unflattened by
6216 : * make_group_input_target, because we may no longer have access to the
6217 : * individual Vars in them.
6218 : *
6219 : * Another key difference from make_group_input_target is that we don't
6220 : * flatten Aggref expressions, since those are to be computed below the
6221 : * window functions and just referenced like Vars above that.
6222 : *
6223 : * 'final_target' is the query's final target list (in PathTarget form)
6224 : * 'activeWindows' is the list of active windows previously identified by
6225 : * select_active_windows.
6226 : *
6227 : * The result is the PathTarget to be computed by the plan node immediately
6228 : * below the first WindowAgg node.
6229 : */
6230 : static PathTarget *
6231 2576 : make_window_input_target(PlannerInfo *root,
6232 : PathTarget *final_target,
6233 : List *activeWindows)
6234 : {
6235 : PathTarget *input_target;
6236 : Bitmapset *sgrefs;
6237 : List *flattenable_cols;
6238 : List *flattenable_vars;
6239 : int i;
6240 : ListCell *lc;
6241 :
6242 : Assert(root->parse->hasWindowFuncs);
6243 :
6244 : /*
6245 : * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
6246 : * into a bitmapset for convenient reference below.
6247 : */
6248 2576 : sgrefs = NULL;
6249 5338 : foreach(lc, activeWindows)
6250 : {
6251 2762 : WindowClause *wc = lfirst_node(WindowClause, lc);
6252 : ListCell *lc2;
6253 :
6254 3518 : foreach(lc2, wc->partitionClause)
6255 : {
6256 756 : SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
6257 :
6258 756 : sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6259 : }
6260 5034 : foreach(lc2, wc->orderClause)
6261 : {
6262 2272 : SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
6263 :
6264 2272 : sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6265 : }
6266 : }
6267 :
6268 : /* Add in sortgroupref numbers of GROUP BY clauses, too */
6269 2768 : foreach(lc, root->processed_groupClause)
6270 : {
6271 192 : SortGroupClause *grpcl = lfirst_node(SortGroupClause, lc);
6272 :
6273 192 : sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
6274 : }
6275 :
6276 : /*
6277 : * Construct a target containing all the non-flattenable targetlist items,
6278 : * and save aside the others for a moment.
6279 : */
6280 2576 : input_target = create_empty_pathtarget();
6281 2576 : flattenable_cols = NIL;
6282 :
6283 2576 : i = 0;
6284 10928 : foreach(lc, final_target->exprs)
6285 : {
6286 8352 : Expr *expr = (Expr *) lfirst(lc);
6287 8352 : Index sgref = get_pathtarget_sortgroupref(final_target, i);
6288 :
6289 : /*
6290 : * Don't want to deconstruct window clauses or GROUP BY items. (Note
6291 : * that such items can't contain window functions, so it's okay to
6292 : * compute them below the WindowAgg nodes.)
6293 : */
6294 8352 : if (sgref != 0 && bms_is_member(sgref, sgrefs))
6295 : {
6296 : /*
6297 : * Don't want to deconstruct this value, so add it to the input
6298 : * target as-is.
6299 : */
6300 2864 : add_column_to_pathtarget(input_target, expr, sgref);
6301 : }
6302 : else
6303 : {
6304 : /*
6305 : * Column is to be flattened, so just remember the expression for
6306 : * later call to pull_var_clause.
6307 : */
6308 5488 : flattenable_cols = lappend(flattenable_cols, expr);
6309 : }
6310 :
6311 8352 : i++;
6312 : }
6313 :
6314 : /*
6315 : * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
6316 : * add them to the input target if not already present. (Some might be
6317 : * there already because they're used directly as window/group clauses.)
6318 : *
6319 : * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
6320 : * Aggrefs are placed in the Agg node's tlist and not left to be computed
6321 : * at higher levels. On the other hand, we should recurse into
6322 : * WindowFuncs to make sure their input expressions are available.
6323 : */
6324 2576 : flattenable_vars = pull_var_clause((Node *) flattenable_cols,
6325 : PVC_INCLUDE_AGGREGATES |
6326 : PVC_RECURSE_WINDOWFUNCS |
6327 : PVC_INCLUDE_PLACEHOLDERS);
6328 2576 : add_new_columns_to_pathtarget(input_target, flattenable_vars);
6329 :
6330 : /* clean up cruft */
6331 2576 : list_free(flattenable_vars);
6332 2576 : list_free(flattenable_cols);
6333 :
6334 : /* XXX this causes some redundant cost calculation ... */
6335 2576 : return set_pathtarget_cost_width(root, input_target);
6336 : }
6337 :
6338 : /*
6339 : * make_pathkeys_for_window
6340 : * Create a pathkeys list describing the required input ordering
6341 : * for the given WindowClause.
6342 : *
6343 : * Modifies wc's partitionClause to remove any clauses which are deemed
6344 : * redundant by the pathkey logic.
6345 : *
6346 : * The required ordering is first the PARTITION keys, then the ORDER keys.
6347 : * In the future we might try to implement windowing using hashing, in which
6348 : * case the ordering could be relaxed, but for now we always sort.
6349 : */
6350 : static List *
6351 5552 : make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
6352 : List *tlist)
6353 : {
6354 5552 : List *window_pathkeys = NIL;
6355 :
6356 : /* Throw error if can't sort */
6357 5552 : if (!grouping_is_sortable(wc->partitionClause))
6358 0 : ereport(ERROR,
6359 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6360 : errmsg("could not implement window PARTITION BY"),
6361 : errdetail("Window partitioning columns must be of sortable datatypes.")));
6362 5552 : if (!grouping_is_sortable(wc->orderClause))
6363 0 : ereport(ERROR,
6364 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6365 : errmsg("could not implement window ORDER BY"),
6366 : errdetail("Window ordering columns must be of sortable datatypes.")));
6367 :
6368 : /*
6369 : * First fetch the pathkeys for the PARTITION BY clause. We can safely
6370 : * remove any clauses from the wc->partitionClause for redundant pathkeys.
6371 : */
6372 5552 : if (wc->partitionClause != NIL)
6373 : {
6374 : bool sortable;
6375 :
6376 1320 : window_pathkeys = make_pathkeys_for_sortclauses_extended(root,
6377 : &wc->partitionClause,
6378 : tlist,
6379 : true,
6380 : false,
6381 : &sortable,
6382 : false);
6383 :
6384 : Assert(sortable);
6385 : }
6386 :
6387 : /*
6388 : * In principle, we could also consider removing redundant ORDER BY items
6389 : * too as doing so does not alter the result of peer row checks done by
6390 : * the executor. However, we must *not* remove the ordering column for
6391 : * RANGE OFFSET cases, as the executor needs that for in_range tests even
6392 : * if it's known to be equal to some partitioning column.
6393 : */
6394 5552 : if (wc->orderClause != NIL)
6395 : {
6396 : List *orderby_pathkeys;
6397 :
6398 4450 : orderby_pathkeys = make_pathkeys_for_sortclauses(root,
6399 : wc->orderClause,
6400 : tlist);
6401 :
6402 : /* Okay, make the combined pathkeys */
6403 4450 : if (window_pathkeys != NIL)
6404 946 : window_pathkeys = append_pathkeys(window_pathkeys, orderby_pathkeys);
6405 : else
6406 3504 : window_pathkeys = orderby_pathkeys;
6407 : }
6408 :
6409 5552 : return window_pathkeys;
6410 : }
6411 :
6412 : /*
6413 : * make_sort_input_target
6414 : * Generate appropriate PathTarget for initial input to Sort step.
6415 : *
6416 : * If the query has ORDER BY, this function chooses the target to be computed
6417 : * by the node just below the Sort (and DISTINCT, if any, since Unique can't
6418 : * project) steps. This might or might not be identical to the query's final
6419 : * output target.
6420 : *
6421 : * The main argument for keeping the sort-input tlist the same as the final
6422 : * is that we avoid a separate projection node (which will be needed if
6423 : * they're different, because Sort can't project). However, there are also
6424 : * advantages to postponing tlist evaluation till after the Sort: it ensures
6425 : * a consistent order of evaluation for any volatile functions in the tlist,
6426 : * and if there's also a LIMIT, we can stop the query without ever computing
6427 : * tlist functions for later rows, which is beneficial for both volatile and
6428 : * expensive functions.
6429 : *
6430 : * Our current policy is to postpone volatile expressions till after the sort
6431 : * unconditionally (assuming that that's possible, ie they are in plain tlist
6432 : * columns and not ORDER BY/GROUP BY/DISTINCT columns). We also prefer to
6433 : * postpone set-returning expressions, because running them beforehand would
6434 : * bloat the sort dataset, and because it might cause unexpected output order
6435 : * if the sort isn't stable. However there's a constraint on that: all SRFs
6436 : * in the tlist should be evaluated at the same plan step, so that they can
6437 : * run in sync in nodeProjectSet. So if any SRFs are in sort columns, we
6438 : * mustn't postpone any SRFs. (Note that in principle that policy should
6439 : * probably get applied to the group/window input targetlists too, but we
6440 : * have not done that historically.) Lastly, expensive expressions are
6441 : * postponed if there is a LIMIT, or if root->tuple_fraction shows that
6442 : * partial evaluation of the query is possible (if neither is true, we expect
6443 : * to have to evaluate the expressions for every row anyway), or if there are
6444 : * any volatile or set-returning expressions (since once we've put in a
6445 : * projection at all, it won't cost any more to postpone more stuff).
6446 : *
6447 : * Another issue that could potentially be considered here is that
6448 : * evaluating tlist expressions could result in data that's either wider
6449 : * or narrower than the input Vars, thus changing the volume of data that
6450 : * has to go through the Sort. However, we usually have only a very bad
6451 : * idea of the output width of any expression more complex than a Var,
6452 : * so for now it seems too risky to try to optimize on that basis.
6453 : *
6454 : * Note that if we do produce a modified sort-input target, and then the
6455 : * query ends up not using an explicit Sort, no particular harm is done:
6456 : * we'll initially use the modified target for the preceding path nodes,
6457 : * but then change them to the final target with apply_projection_to_path.
6458 : * Moreover, in such a case the guarantees about evaluation order of
6459 : * volatile functions still hold, since the rows are sorted already.
6460 : *
6461 : * This function has some things in common with make_group_input_target and
6462 : * make_window_input_target, though the detailed rules for what to do are
6463 : * different. We never flatten/postpone any grouping or ordering columns;
6464 : * those are needed before the sort. If we do flatten a particular
6465 : * expression, we leave Aggref and WindowFunc nodes alone, since those were
6466 : * computed earlier.
6467 : *
6468 : * 'final_target' is the query's final target list (in PathTarget form)
6469 : * 'have_postponed_srfs' is an output argument, see below
6470 : *
6471 : * The result is the PathTarget to be computed by the plan node immediately
6472 : * below the Sort step (and the Distinct step, if any). This will be
6473 : * exactly final_target if we decide a projection step wouldn't be helpful.
6474 : *
6475 : * In addition, *have_postponed_srfs is set to true if we choose to postpone
6476 : * any set-returning functions to after the Sort.
6477 : */
6478 : static PathTarget *
6479 72048 : make_sort_input_target(PlannerInfo *root,
6480 : PathTarget *final_target,
6481 : bool *have_postponed_srfs)
6482 : {
6483 72048 : Query *parse = root->parse;
6484 : PathTarget *input_target;
6485 : int ncols;
6486 : bool *col_is_srf;
6487 : bool *postpone_col;
6488 : bool have_srf;
6489 : bool have_volatile;
6490 : bool have_expensive;
6491 : bool have_srf_sortcols;
6492 : bool postpone_srfs;
6493 : List *postponable_cols;
6494 : List *postponable_vars;
6495 : int i;
6496 : ListCell *lc;
6497 :
6498 : /* Shouldn't get here unless query has ORDER BY */
6499 : Assert(parse->sortClause);
6500 :
6501 72048 : *have_postponed_srfs = false; /* default result */
6502 :
6503 : /* Inspect tlist and collect per-column information */
6504 72048 : ncols = list_length(final_target->exprs);
6505 72048 : col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6506 72048 : postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6507 72048 : have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
6508 :
6509 72048 : i = 0;
6510 435226 : foreach(lc, final_target->exprs)
6511 : {
6512 363178 : Expr *expr = (Expr *) lfirst(lc);
6513 :
6514 : /*
6515 : * If the column has a sortgroupref, assume it has to be evaluated
6516 : * before sorting. Generally such columns would be ORDER BY, GROUP
6517 : * BY, etc targets. One exception is columns that were removed from
6518 : * GROUP BY by remove_useless_groupby_columns() ... but those would
6519 : * only be Vars anyway. There don't seem to be any cases where it
6520 : * would be worth the trouble to double-check.
6521 : */
6522 363178 : if (get_pathtarget_sortgroupref(final_target, i) == 0)
6523 : {
6524 : /*
6525 : * Check for SRF or volatile functions. Check the SRF case first
6526 : * because we must know whether we have any postponed SRFs.
6527 : */
6528 261810 : if (parse->hasTargetSRFs &&
6529 216 : expression_returns_set((Node *) expr))
6530 : {
6531 : /* We'll decide below whether these are postponable */
6532 96 : col_is_srf[i] = true;
6533 96 : have_srf = true;
6534 : }
6535 261498 : else if (contain_volatile_functions((Node *) expr))
6536 : {
6537 : /* Unconditionally postpone */
6538 148 : postpone_col[i] = true;
6539 148 : have_volatile = true;
6540 : }
6541 : else
6542 : {
6543 : /*
6544 : * Else check the cost. XXX it's annoying to have to do this
6545 : * when set_pathtarget_cost_width() just did it. Refactor to
6546 : * allow sharing the work?
6547 : */
6548 : QualCost cost;
6549 :
6550 261350 : cost_qual_eval_node(&cost, (Node *) expr, root);
6551 :
6552 : /*
6553 : * We arbitrarily define "expensive" as "more than 10X
6554 : * cpu_operator_cost". Note this will take in any PL function
6555 : * with default cost.
6556 : */
6557 261350 : if (cost.per_tuple > 10 * cpu_operator_cost)
6558 : {
6559 16896 : postpone_col[i] = true;
6560 16896 : have_expensive = true;
6561 : }
6562 : }
6563 : }
6564 : else
6565 : {
6566 : /* For sortgroupref cols, just check if any contain SRFs */
6567 101584 : if (!have_srf_sortcols &&
6568 101894 : parse->hasTargetSRFs &&
6569 334 : expression_returns_set((Node *) expr))
6570 148 : have_srf_sortcols = true;
6571 : }
6572 :
6573 363178 : i++;
6574 : }
6575 :
6576 : /*
6577 : * We can postpone SRFs if we have some but none are in sortgroupref cols.
6578 : */
6579 72048 : postpone_srfs = (have_srf && !have_srf_sortcols);
6580 :
6581 : /*
6582 : * If we don't need a post-sort projection, just return final_target.
6583 : */
6584 72048 : if (!(postpone_srfs || have_volatile ||
6585 71844 : (have_expensive &&
6586 9930 : (parse->limitCount || root->tuple_fraction > 0))))
6587 71808 : return final_target;
6588 :
6589 : /*
6590 : * Report whether the post-sort projection will contain set-returning
6591 : * functions. This is important because it affects whether the Sort can
6592 : * rely on the query's LIMIT (if any) to bound the number of rows it needs
6593 : * to return.
6594 : */
6595 240 : *have_postponed_srfs = postpone_srfs;
6596 :
6597 : /*
6598 : * Construct the sort-input target, taking all non-postponable columns and
6599 : * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6600 : * the postponable ones.
6601 : */
6602 240 : input_target = create_empty_pathtarget();
6603 240 : postponable_cols = NIL;
6604 :
6605 240 : i = 0;
6606 1990 : foreach(lc, final_target->exprs)
6607 : {
6608 1750 : Expr *expr = (Expr *) lfirst(lc);
6609 :
6610 1750 : if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
6611 298 : postponable_cols = lappend(postponable_cols, expr);
6612 : else
6613 1452 : add_column_to_pathtarget(input_target, expr,
6614 1452 : get_pathtarget_sortgroupref(final_target, i));
6615 :
6616 1750 : i++;
6617 : }
6618 :
6619 : /*
6620 : * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6621 : * postponable columns, and add them to the sort-input target if not
6622 : * already present. (Some might be there already.) We mustn't
6623 : * deconstruct Aggrefs or WindowFuncs here, since the projection node
6624 : * would be unable to recompute them.
6625 : */
6626 240 : postponable_vars = pull_var_clause((Node *) postponable_cols,
6627 : PVC_INCLUDE_AGGREGATES |
6628 : PVC_INCLUDE_WINDOWFUNCS |
6629 : PVC_INCLUDE_PLACEHOLDERS);
6630 240 : add_new_columns_to_pathtarget(input_target, postponable_vars);
6631 :
6632 : /* clean up cruft */
6633 240 : list_free(postponable_vars);
6634 240 : list_free(postponable_cols);
6635 :
6636 : /* XXX this represents even more redundant cost calculation ... */
6637 240 : return set_pathtarget_cost_width(root, input_target);
6638 : }
6639 :
6640 : /*
6641 : * get_cheapest_fractional_path
6642 : * Find the cheapest path for retrieving a specified fraction of all
6643 : * the tuples expected to be returned by the given relation.
6644 : *
6645 : * Do not consider parameterized paths. If the caller needs a path for upper
6646 : * rel, it can't have parameterized paths. If the caller needs an append
6647 : * subpath, it could become limited by the treatment of similar
6648 : * parameterization of all the subpaths.
6649 : *
6650 : * We interpret tuple_fraction the same way as grouping_planner.
6651 : *
6652 : * We assume set_cheapest() has been run on the given rel.
6653 : */
6654 : Path *
6655 507138 : get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
6656 : {
6657 507138 : Path *best_path = rel->cheapest_total_path;
6658 : ListCell *l;
6659 :
6660 : /* If all tuples will be retrieved, just return the cheapest-total path */
6661 507138 : if (tuple_fraction <= 0.0)
6662 497552 : return best_path;
6663 :
6664 : /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6665 9586 : if (tuple_fraction >= 1.0 && best_path->rows > 0)
6666 3988 : tuple_fraction /= best_path->rows;
6667 :
6668 24998 : foreach(l, rel->pathlist)
6669 : {
6670 15412 : Path *path = (Path *) lfirst(l);
6671 :
6672 15412 : if (path->param_info)
6673 200 : continue;
6674 :
6675 20838 : if (path == rel->cheapest_total_path ||
6676 5626 : compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6677 14688 : continue;
6678 :
6679 524 : best_path = path;
6680 : }
6681 :
6682 9586 : return best_path;
6683 : }
6684 :
6685 : /*
6686 : * adjust_paths_for_srfs
6687 : * Fix up the Paths of the given upperrel to handle tSRFs properly.
6688 : *
6689 : * The executor can only handle set-returning functions that appear at the
6690 : * top level of the targetlist of a ProjectSet plan node. If we have any SRFs
6691 : * that are not at top level, we need to split up the evaluation into multiple
6692 : * plan levels in which each level satisfies this constraint. This function
6693 : * modifies each Path of an upperrel that (might) compute any SRFs in its
6694 : * output tlist to insert appropriate projection steps.
6695 : *
6696 : * The given targets and targets_contain_srfs lists are from
6697 : * split_pathtarget_at_srfs(). We assume the existing Paths emit the first
6698 : * target in targets.
6699 : */
6700 : static void
6701 12796 : adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
6702 : List *targets, List *targets_contain_srfs)
6703 : {
6704 : ListCell *lc;
6705 :
6706 : Assert(list_length(targets) == list_length(targets_contain_srfs));
6707 : Assert(!linitial_int(targets_contain_srfs));
6708 :
6709 : /* If no SRFs appear at this plan level, nothing to do */
6710 12796 : if (list_length(targets) == 1)
6711 708 : return;
6712 :
6713 : /*
6714 : * Stack SRF-evaluation nodes atop each path for the rel.
6715 : *
6716 : * In principle we should re-run set_cheapest() here to identify the
6717 : * cheapest path, but it seems unlikely that adding the same tlist eval
6718 : * costs to all the paths would change that, so we don't bother. Instead,
6719 : * just assume that the cheapest-startup and cheapest-total paths remain
6720 : * so. (There should be no parameterized paths anymore, so we needn't
6721 : * worry about updating cheapest_parameterized_paths.)
6722 : */
6723 24214 : foreach(lc, rel->pathlist)
6724 : {
6725 12126 : Path *subpath = (Path *) lfirst(lc);
6726 12126 : Path *newpath = subpath;
6727 : ListCell *lc1,
6728 : *lc2;
6729 :
6730 : Assert(subpath->param_info == NULL);
6731 37576 : forboth(lc1, targets, lc2, targets_contain_srfs)
6732 : {
6733 25450 : PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6734 25450 : bool contains_srfs = (bool) lfirst_int(lc2);
6735 :
6736 : /* If this level doesn't contain SRFs, do regular projection */
6737 25450 : if (contains_srfs)
6738 12186 : newpath = (Path *) create_set_projection_path(root,
6739 : rel,
6740 : newpath,
6741 : thistarget);
6742 : else
6743 13264 : newpath = (Path *) apply_projection_to_path(root,
6744 : rel,
6745 : newpath,
6746 : thistarget);
6747 : }
6748 12126 : lfirst(lc) = newpath;
6749 12126 : if (subpath == rel->cheapest_startup_path)
6750 404 : rel->cheapest_startup_path = newpath;
6751 12126 : if (subpath == rel->cheapest_total_path)
6752 404 : rel->cheapest_total_path = newpath;
6753 : }
6754 :
6755 : /* Likewise for partial paths, if any */
6756 12106 : foreach(lc, rel->partial_pathlist)
6757 : {
6758 18 : Path *subpath = (Path *) lfirst(lc);
6759 18 : Path *newpath = subpath;
6760 : ListCell *lc1,
6761 : *lc2;
6762 :
6763 : Assert(subpath->param_info == NULL);
6764 72 : forboth(lc1, targets, lc2, targets_contain_srfs)
6765 : {
6766 54 : PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6767 54 : bool contains_srfs = (bool) lfirst_int(lc2);
6768 :
6769 : /* If this level doesn't contain SRFs, do regular projection */
6770 54 : if (contains_srfs)
6771 18 : newpath = (Path *) create_set_projection_path(root,
6772 : rel,
6773 : newpath,
6774 : thistarget);
6775 : else
6776 : {
6777 : /* avoid apply_projection_to_path, in case of multiple refs */
6778 36 : newpath = (Path *) create_projection_path(root,
6779 : rel,
6780 : newpath,
6781 : thistarget);
6782 : }
6783 : }
6784 18 : lfirst(lc) = newpath;
6785 : }
6786 : }
6787 :
6788 : /*
6789 : * expression_planner
6790 : * Perform planner's transformations on a standalone expression.
6791 : *
6792 : * Various utility commands need to evaluate expressions that are not part
6793 : * of a plannable query. They can do so using the executor's regular
6794 : * expression-execution machinery, but first the expression has to be fed
6795 : * through here to transform it from parser output to something executable.
6796 : *
6797 : * Currently, we disallow sublinks in standalone expressions, so there's no
6798 : * real "planning" involved here. (That might not always be true though.)
6799 : * What we must do is run eval_const_expressions to ensure that any function
6800 : * calls are converted to positional notation and function default arguments
6801 : * get inserted. The fact that constant subexpressions get simplified is a
6802 : * side-effect that is useful when the expression will get evaluated more than
6803 : * once. Also, we must fix operator function IDs.
6804 : *
6805 : * This does not return any information about dependencies of the expression.
6806 : * Hence callers should use the results only for the duration of the current
6807 : * query. Callers that would like to cache the results for longer should use
6808 : * expression_planner_with_deps, probably via the plancache.
6809 : *
6810 : * Note: this must not make any damaging changes to the passed-in expression
6811 : * tree. (It would actually be okay to apply fix_opfuncids to it, but since
6812 : * we first do an expression_tree_mutator-based walk, what is returned will
6813 : * be a new node tree.) The result is constructed in the current memory
6814 : * context; beware that this can leak a lot of additional stuff there, too.
6815 : */
6816 : Expr *
6817 246506 : expression_planner(Expr *expr)
6818 : {
6819 : Node *result;
6820 :
6821 : /*
6822 : * Convert named-argument function calls, insert default arguments and
6823 : * simplify constant subexprs
6824 : */
6825 246506 : result = eval_const_expressions(NULL, (Node *) expr);
6826 :
6827 : /* Fill in opfuncid values if missing */
6828 246488 : fix_opfuncids(result);
6829 :
6830 246488 : return (Expr *) result;
6831 : }
6832 :
6833 : /*
6834 : * expression_planner_with_deps
6835 : * Perform planner's transformations on a standalone expression,
6836 : * returning expression dependency information along with the result.
6837 : *
6838 : * This is identical to expression_planner() except that it also returns
6839 : * information about possible dependencies of the expression, ie identities of
6840 : * objects whose definitions affect the result. As in a PlannedStmt, these
6841 : * are expressed as a list of relation Oids and a list of PlanInvalItems.
6842 : */
6843 : Expr *
6844 370 : expression_planner_with_deps(Expr *expr,
6845 : List **relationOids,
6846 : List **invalItems)
6847 : {
6848 : Node *result;
6849 : PlannerGlobal glob;
6850 : PlannerInfo root;
6851 :
6852 : /* Make up dummy planner state so we can use setrefs machinery */
6853 9990 : MemSet(&glob, 0, sizeof(glob));
6854 370 : glob.type = T_PlannerGlobal;
6855 370 : glob.relationOids = NIL;
6856 370 : glob.invalItems = NIL;
6857 :
6858 34410 : MemSet(&root, 0, sizeof(root));
6859 370 : root.type = T_PlannerInfo;
6860 370 : root.glob = &glob;
6861 :
6862 : /*
6863 : * Convert named-argument function calls, insert default arguments and
6864 : * simplify constant subexprs. Collect identities of inlined functions
6865 : * and elided domains, too.
6866 : */
6867 370 : result = eval_const_expressions(&root, (Node *) expr);
6868 :
6869 : /* Fill in opfuncid values if missing */
6870 370 : fix_opfuncids(result);
6871 :
6872 : /*
6873 : * Now walk the finished expression to find anything else we ought to
6874 : * record as an expression dependency.
6875 : */
6876 370 : (void) extract_query_dependencies_walker(result, &root);
6877 :
6878 370 : *relationOids = glob.relationOids;
6879 370 : *invalItems = glob.invalItems;
6880 :
6881 370 : return (Expr *) result;
6882 : }
6883 :
6884 :
6885 : /*
6886 : * plan_cluster_use_sort
6887 : * Use the planner to decide how CLUSTER should implement sorting
6888 : *
6889 : * tableOid is the OID of a table to be clustered on its index indexOid
6890 : * (which is already known to be a btree index). Decide whether it's
6891 : * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
6892 : * Return true to use sorting, false to use an indexscan.
6893 : *
6894 : * Note: caller had better already hold some type of lock on the table.
6895 : */
6896 : bool
6897 188 : plan_cluster_use_sort(Oid tableOid, Oid indexOid)
6898 : {
6899 : PlannerInfo *root;
6900 : Query *query;
6901 : PlannerGlobal *glob;
6902 : RangeTblEntry *rte;
6903 : RelOptInfo *rel;
6904 : IndexOptInfo *indexInfo;
6905 : QualCost indexExprCost;
6906 : Cost comparisonCost;
6907 : Path *seqScanPath;
6908 : Path seqScanAndSortPath;
6909 : IndexPath *indexScanPath;
6910 : ListCell *lc;
6911 :
6912 : /* We can short-circuit the cost comparison if indexscans are disabled */
6913 188 : if (!enable_indexscan)
6914 30 : return true; /* use sort */
6915 :
6916 : /* Set up mostly-dummy planner state */
6917 158 : query = makeNode(Query);
6918 158 : query->commandType = CMD_SELECT;
6919 :
6920 158 : glob = makeNode(PlannerGlobal);
6921 :
6922 158 : root = makeNode(PlannerInfo);
6923 158 : root->parse = query;
6924 158 : root->glob = glob;
6925 158 : root->query_level = 1;
6926 158 : root->planner_cxt = CurrentMemoryContext;
6927 158 : root->wt_param_id = -1;
6928 158 : root->join_domains = list_make1(makeNode(JoinDomain));
6929 :
6930 : /* Build a minimal RTE for the rel */
6931 158 : rte = makeNode(RangeTblEntry);
6932 158 : rte->rtekind = RTE_RELATION;
6933 158 : rte->relid = tableOid;
6934 158 : rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6935 158 : rte->rellockmode = AccessShareLock;
6936 158 : rte->lateral = false;
6937 158 : rte->inh = false;
6938 158 : rte->inFromCl = true;
6939 158 : query->rtable = list_make1(rte);
6940 158 : addRTEPermissionInfo(&query->rteperminfos, rte);
6941 :
6942 : /* Set up RTE/RelOptInfo arrays */
6943 158 : setup_simple_rel_arrays(root);
6944 :
6945 : /* Build RelOptInfo */
6946 158 : rel = build_simple_rel(root, 1, NULL);
6947 :
6948 : /* Locate IndexOptInfo for the target index */
6949 158 : indexInfo = NULL;
6950 196 : foreach(lc, rel->indexlist)
6951 : {
6952 196 : indexInfo = lfirst_node(IndexOptInfo, lc);
6953 196 : if (indexInfo->indexoid == indexOid)
6954 158 : break;
6955 : }
6956 :
6957 : /*
6958 : * It's possible that get_relation_info did not generate an IndexOptInfo
6959 : * for the desired index; this could happen if it's not yet reached its
6960 : * indcheckxmin usability horizon, or if it's a system index and we're
6961 : * ignoring system indexes. In such cases we should tell CLUSTER to not
6962 : * trust the index contents but use seqscan-and-sort.
6963 : */
6964 158 : if (lc == NULL) /* not in the list? */
6965 0 : return true; /* use sort */
6966 :
6967 : /*
6968 : * Rather than doing all the pushups that would be needed to use
6969 : * set_baserel_size_estimates, just do a quick hack for rows and width.
6970 : */
6971 158 : rel->rows = rel->tuples;
6972 158 : rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6973 :
6974 158 : root->total_table_pages = rel->pages;
6975 :
6976 : /*
6977 : * Determine eval cost of the index expressions, if any. We need to
6978 : * charge twice that amount for each tuple comparison that happens during
6979 : * the sort, since tuplesort.c will have to re-evaluate the index
6980 : * expressions each time. (XXX that's pretty inefficient...)
6981 : */
6982 158 : cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6983 158 : comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6984 :
6985 : /* Estimate the cost of seq scan + sort */
6986 158 : seqScanPath = create_seqscan_path(root, rel, NULL, 0);
6987 158 : cost_sort(&seqScanAndSortPath, root, NIL,
6988 : seqScanPath->disabled_nodes,
6989 158 : seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6990 : comparisonCost, maintenance_work_mem, -1.0);
6991 :
6992 : /* Estimate the cost of index scan */
6993 158 : indexScanPath = create_index_path(root, indexInfo,
6994 : NIL, NIL, NIL, NIL,
6995 : ForwardScanDirection, false,
6996 : NULL, 1.0, false);
6997 :
6998 158 : return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
6999 : }
7000 :
7001 : /*
7002 : * plan_create_index_workers
7003 : * Use the planner to decide how many parallel worker processes
7004 : * CREATE INDEX should request for use
7005 : *
7006 : * tableOid is the table on which the index is to be built. indexOid is the
7007 : * OID of an index to be created or reindexed (which must be an index with
7008 : * support for parallel builds - currently btree, GIN, or BRIN).
7009 : *
7010 : * Return value is the number of parallel worker processes to request. It
7011 : * may be unsafe to proceed if this is 0. Note that this does not include the
7012 : * leader participating as a worker (value is always a number of parallel
7013 : * worker processes).
7014 : *
7015 : * Note: caller had better already hold some type of lock on the table and
7016 : * index.
7017 : */
7018 : int
7019 36550 : plan_create_index_workers(Oid tableOid, Oid indexOid)
7020 : {
7021 : PlannerInfo *root;
7022 : Query *query;
7023 : PlannerGlobal *glob;
7024 : RangeTblEntry *rte;
7025 : Relation heap;
7026 : Relation index;
7027 : RelOptInfo *rel;
7028 : int parallel_workers;
7029 : BlockNumber heap_blocks;
7030 : double reltuples;
7031 : double allvisfrac;
7032 :
7033 : /*
7034 : * We don't allow performing parallel operation in standalone backend or
7035 : * when parallelism is disabled.
7036 : */
7037 36550 : if (!IsUnderPostmaster || max_parallel_maintenance_workers == 0)
7038 514 : return 0;
7039 :
7040 : /* Set up largely-dummy planner state */
7041 36036 : query = makeNode(Query);
7042 36036 : query->commandType = CMD_SELECT;
7043 :
7044 36036 : glob = makeNode(PlannerGlobal);
7045 :
7046 36036 : root = makeNode(PlannerInfo);
7047 36036 : root->parse = query;
7048 36036 : root->glob = glob;
7049 36036 : root->query_level = 1;
7050 36036 : root->planner_cxt = CurrentMemoryContext;
7051 36036 : root->wt_param_id = -1;
7052 36036 : root->join_domains = list_make1(makeNode(JoinDomain));
7053 :
7054 : /*
7055 : * Build a minimal RTE.
7056 : *
7057 : * Mark the RTE with inh = true. This is a kludge to prevent
7058 : * get_relation_info() from fetching index info, which is necessary
7059 : * because it does not expect that any IndexOptInfo is currently
7060 : * undergoing REINDEX.
7061 : */
7062 36036 : rte = makeNode(RangeTblEntry);
7063 36036 : rte->rtekind = RTE_RELATION;
7064 36036 : rte->relid = tableOid;
7065 36036 : rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
7066 36036 : rte->rellockmode = AccessShareLock;
7067 36036 : rte->lateral = false;
7068 36036 : rte->inh = true;
7069 36036 : rte->inFromCl = true;
7070 36036 : query->rtable = list_make1(rte);
7071 36036 : addRTEPermissionInfo(&query->rteperminfos, rte);
7072 :
7073 : /* Set up RTE/RelOptInfo arrays */
7074 36036 : setup_simple_rel_arrays(root);
7075 :
7076 : /* Build RelOptInfo */
7077 36036 : rel = build_simple_rel(root, 1, NULL);
7078 :
7079 : /* Rels are assumed already locked by the caller */
7080 36036 : heap = table_open(tableOid, NoLock);
7081 36036 : index = index_open(indexOid, NoLock);
7082 :
7083 : /*
7084 : * Determine if it's safe to proceed.
7085 : *
7086 : * Currently, parallel workers can't access the leader's temporary tables.
7087 : * Furthermore, any index predicate or index expressions must be parallel
7088 : * safe.
7089 : */
7090 36036 : if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
7091 33958 : !is_parallel_safe(root, (Node *) RelationGetIndexExpressions(index)) ||
7092 33818 : !is_parallel_safe(root, (Node *) RelationGetIndexPredicate(index)))
7093 : {
7094 2218 : parallel_workers = 0;
7095 2218 : goto done;
7096 : }
7097 :
7098 : /*
7099 : * If parallel_workers storage parameter is set for the table, accept that
7100 : * as the number of parallel worker processes to launch (though still cap
7101 : * at max_parallel_maintenance_workers). Note that we deliberately do not
7102 : * consider any other factor when parallel_workers is set. (e.g., memory
7103 : * use by workers.)
7104 : */
7105 33818 : if (rel->rel_parallel_workers != -1)
7106 : {
7107 98 : parallel_workers = Min(rel->rel_parallel_workers,
7108 : max_parallel_maintenance_workers);
7109 98 : goto done;
7110 : }
7111 :
7112 : /*
7113 : * Estimate heap relation size ourselves, since rel->pages cannot be
7114 : * trusted (heap RTE was marked as inheritance parent)
7115 : */
7116 33720 : estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
7117 :
7118 : /*
7119 : * Determine number of workers to scan the heap relation using generic
7120 : * model
7121 : */
7122 33720 : parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
7123 : max_parallel_maintenance_workers);
7124 :
7125 : /*
7126 : * Cap workers based on available maintenance_work_mem as needed.
7127 : *
7128 : * Note that each tuplesort participant receives an even share of the
7129 : * total maintenance_work_mem budget. Aim to leave participants
7130 : * (including the leader as a participant) with no less than 32MB of
7131 : * memory. This leaves cases where maintenance_work_mem is set to 64MB
7132 : * immediately past the threshold of being capable of launching a single
7133 : * parallel worker to sort.
7134 : */
7135 33882 : while (parallel_workers > 0 &&
7136 326 : maintenance_work_mem / (parallel_workers + 1) < 32 * 1024)
7137 162 : parallel_workers--;
7138 :
7139 33720 : done:
7140 36036 : index_close(index, NoLock);
7141 36036 : table_close(heap, NoLock);
7142 :
7143 36036 : return parallel_workers;
7144 : }
7145 :
7146 : /*
7147 : * add_paths_to_grouping_rel
7148 : *
7149 : * Add non-partial paths to grouping relation.
7150 : */
7151 : static void
7152 47244 : add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
7153 : RelOptInfo *grouped_rel,
7154 : RelOptInfo *partially_grouped_rel,
7155 : const AggClauseCosts *agg_costs,
7156 : grouping_sets_data *gd,
7157 : GroupPathExtraData *extra)
7158 : {
7159 47244 : Query *parse = root->parse;
7160 47244 : Path *cheapest_path = input_rel->cheapest_total_path;
7161 47244 : Path *cheapest_partially_grouped_path = NULL;
7162 : ListCell *lc;
7163 47244 : bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7164 47244 : bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7165 47244 : List *havingQual = (List *) extra->havingQual;
7166 47244 : AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7167 47244 : double dNumGroups = 0;
7168 47244 : double dNumFinalGroups = 0;
7169 :
7170 : /*
7171 : * Estimate number of groups for non-split aggregation.
7172 : */
7173 47244 : dNumGroups = get_number_of_groups(root,
7174 : cheapest_path->rows,
7175 : gd,
7176 : extra->targetList);
7177 :
7178 47244 : if (partially_grouped_rel && partially_grouped_rel->pathlist)
7179 : {
7180 2998 : cheapest_partially_grouped_path =
7181 : partially_grouped_rel->cheapest_total_path;
7182 :
7183 : /*
7184 : * Estimate number of groups for final phase of partial aggregation.
7185 : */
7186 : dNumFinalGroups =
7187 2998 : get_number_of_groups(root,
7188 : cheapest_partially_grouped_path->rows,
7189 : gd,
7190 : extra->targetList);
7191 : }
7192 :
7193 47244 : if (can_sort)
7194 : {
7195 : /*
7196 : * Use any available suitably-sorted path as input, and also consider
7197 : * sorting the cheapest-total path and incremental sort on any paths
7198 : * with presorted keys.
7199 : */
7200 97898 : foreach(lc, input_rel->pathlist)
7201 : {
7202 : ListCell *lc2;
7203 50660 : Path *path = (Path *) lfirst(lc);
7204 50660 : Path *path_save = path;
7205 50660 : List *pathkey_orderings = NIL;
7206 :
7207 : /* generate alternative group orderings that might be useful */
7208 50660 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7209 :
7210 : Assert(list_length(pathkey_orderings) > 0);
7211 :
7212 101464 : foreach(lc2, pathkey_orderings)
7213 : {
7214 50804 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7215 :
7216 : /* restore the path (we replace it in the loop) */
7217 50804 : path = path_save;
7218 :
7219 50804 : path = make_ordered_path(root,
7220 : grouped_rel,
7221 : path,
7222 : cheapest_path,
7223 : info->pathkeys,
7224 : -1.0);
7225 50804 : if (path == NULL)
7226 386 : continue;
7227 :
7228 : /* Now decide what to stick atop it */
7229 50418 : if (parse->groupingSets)
7230 : {
7231 1062 : consider_groupingsets_paths(root, grouped_rel,
7232 : path, true, can_hash,
7233 : gd, agg_costs, dNumGroups);
7234 : }
7235 49356 : else if (parse->hasAggs)
7236 : {
7237 : /*
7238 : * We have aggregation, possibly with plain GROUP BY. Make
7239 : * an AggPath.
7240 : */
7241 48572 : add_path(grouped_rel, (Path *)
7242 48572 : create_agg_path(root,
7243 : grouped_rel,
7244 : path,
7245 48572 : grouped_rel->reltarget,
7246 48572 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7247 : AGGSPLIT_SIMPLE,
7248 : info->clauses,
7249 : havingQual,
7250 : agg_costs,
7251 : dNumGroups));
7252 : }
7253 784 : else if (parse->groupClause)
7254 : {
7255 : /*
7256 : * We have GROUP BY without aggregation or grouping sets.
7257 : * Make a GroupPath.
7258 : */
7259 784 : add_path(grouped_rel, (Path *)
7260 784 : create_group_path(root,
7261 : grouped_rel,
7262 : path,
7263 : info->clauses,
7264 : havingQual,
7265 : dNumGroups));
7266 : }
7267 : else
7268 : {
7269 : /* Other cases should have been handled above */
7270 : Assert(false);
7271 : }
7272 : }
7273 : }
7274 :
7275 : /*
7276 : * Instead of operating directly on the input relation, we can
7277 : * consider finalizing a partially aggregated path.
7278 : */
7279 47238 : if (partially_grouped_rel != NULL)
7280 : {
7281 7678 : foreach(lc, partially_grouped_rel->pathlist)
7282 : {
7283 : ListCell *lc2;
7284 4680 : Path *path = (Path *) lfirst(lc);
7285 4680 : Path *path_save = path;
7286 4680 : List *pathkey_orderings = NIL;
7287 :
7288 : /* generate alternative group orderings that might be useful */
7289 4680 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7290 :
7291 : Assert(list_length(pathkey_orderings) > 0);
7292 :
7293 : /* process all potentially interesting grouping reorderings */
7294 9360 : foreach(lc2, pathkey_orderings)
7295 : {
7296 4680 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7297 :
7298 : /* restore the path (we replace it in the loop) */
7299 4680 : path = path_save;
7300 :
7301 4680 : path = make_ordered_path(root,
7302 : grouped_rel,
7303 : path,
7304 : cheapest_partially_grouped_path,
7305 : info->pathkeys,
7306 : -1.0);
7307 :
7308 4680 : if (path == NULL)
7309 204 : continue;
7310 :
7311 4476 : if (parse->hasAggs)
7312 4228 : add_path(grouped_rel, (Path *)
7313 4228 : create_agg_path(root,
7314 : grouped_rel,
7315 : path,
7316 4228 : grouped_rel->reltarget,
7317 4228 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7318 : AGGSPLIT_FINAL_DESERIAL,
7319 : info->clauses,
7320 : havingQual,
7321 : agg_final_costs,
7322 : dNumFinalGroups));
7323 : else
7324 248 : add_path(grouped_rel, (Path *)
7325 248 : create_group_path(root,
7326 : grouped_rel,
7327 : path,
7328 : info->clauses,
7329 : havingQual,
7330 : dNumFinalGroups));
7331 :
7332 : }
7333 : }
7334 : }
7335 : }
7336 :
7337 47244 : if (can_hash)
7338 : {
7339 6020 : if (parse->groupingSets)
7340 : {
7341 : /*
7342 : * Try for a hash-only groupingsets path over unsorted input.
7343 : */
7344 900 : consider_groupingsets_paths(root, grouped_rel,
7345 : cheapest_path, false, true,
7346 : gd, agg_costs, dNumGroups);
7347 : }
7348 : else
7349 : {
7350 : /*
7351 : * Generate a HashAgg Path. We just need an Agg over the
7352 : * cheapest-total input path, since input order won't matter.
7353 : */
7354 5120 : add_path(grouped_rel, (Path *)
7355 5120 : create_agg_path(root, grouped_rel,
7356 : cheapest_path,
7357 5120 : grouped_rel->reltarget,
7358 : AGG_HASHED,
7359 : AGGSPLIT_SIMPLE,
7360 : root->processed_groupClause,
7361 : havingQual,
7362 : agg_costs,
7363 : dNumGroups));
7364 : }
7365 :
7366 : /*
7367 : * Generate a Finalize HashAgg Path atop of the cheapest partially
7368 : * grouped path, assuming there is one
7369 : */
7370 6020 : if (partially_grouped_rel && partially_grouped_rel->pathlist)
7371 : {
7372 1448 : add_path(grouped_rel, (Path *)
7373 1448 : create_agg_path(root,
7374 : grouped_rel,
7375 : cheapest_partially_grouped_path,
7376 1448 : grouped_rel->reltarget,
7377 : AGG_HASHED,
7378 : AGGSPLIT_FINAL_DESERIAL,
7379 : root->processed_groupClause,
7380 : havingQual,
7381 : agg_final_costs,
7382 : dNumFinalGroups));
7383 : }
7384 : }
7385 :
7386 : /*
7387 : * When partitionwise aggregate is used, we might have fully aggregated
7388 : * paths in the partial pathlist, because add_paths_to_append_rel() will
7389 : * consider a path for grouped_rel consisting of a Parallel Append of
7390 : * non-partial paths from each child.
7391 : */
7392 47244 : if (grouped_rel->partial_pathlist != NIL)
7393 318 : gather_grouping_paths(root, grouped_rel);
7394 47244 : }
7395 :
7396 : /*
7397 : * create_partial_grouping_paths
7398 : *
7399 : * Create a new upper relation representing the result of partial aggregation
7400 : * and populate it with appropriate paths. Note that we don't finalize the
7401 : * lists of paths here, so the caller can add additional partial or non-partial
7402 : * paths and must afterward call gather_grouping_paths and set_cheapest on
7403 : * the returned upper relation.
7404 : *
7405 : * All paths for this new upper relation -- both partial and non-partial --
7406 : * have been partially aggregated but require a subsequent FinalizeAggregate
7407 : * step.
7408 : *
7409 : * NB: This function is allowed to return NULL if it determines that there is
7410 : * no real need to create a new RelOptInfo.
7411 : */
7412 : static RelOptInfo *
7413 43134 : create_partial_grouping_paths(PlannerInfo *root,
7414 : RelOptInfo *grouped_rel,
7415 : RelOptInfo *input_rel,
7416 : grouping_sets_data *gd,
7417 : GroupPathExtraData *extra,
7418 : bool force_rel_creation)
7419 : {
7420 43134 : Query *parse = root->parse;
7421 : RelOptInfo *partially_grouped_rel;
7422 43134 : RelOptInfo *eager_agg_rel = NULL;
7423 43134 : AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7424 43134 : AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7425 43134 : Path *cheapest_partial_path = NULL;
7426 43134 : Path *cheapest_total_path = NULL;
7427 43134 : double dNumPartialGroups = 0;
7428 43134 : double dNumPartialPartialGroups = 0;
7429 : ListCell *lc;
7430 43134 : bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7431 43134 : bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7432 :
7433 : /*
7434 : * Check whether any partially aggregated paths have been generated
7435 : * through eager aggregation.
7436 : */
7437 43134 : if (input_rel->grouped_rel &&
7438 958 : !IS_DUMMY_REL(input_rel->grouped_rel) &&
7439 958 : input_rel->grouped_rel->pathlist != NIL)
7440 898 : eager_agg_rel = input_rel->grouped_rel;
7441 :
7442 : /*
7443 : * Consider whether we should generate partially aggregated non-partial
7444 : * paths. We can only do this if we have a non-partial path, and only if
7445 : * the parent of the input rel is performing partial partitionwise
7446 : * aggregation. (Note that extra->patype is the type of partitionwise
7447 : * aggregation being used at the parent level, not this level.)
7448 : */
7449 43134 : if (input_rel->pathlist != NIL &&
7450 43134 : extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
7451 858 : cheapest_total_path = input_rel->cheapest_total_path;
7452 :
7453 : /*
7454 : * If parallelism is possible for grouped_rel, then we should consider
7455 : * generating partially-grouped partial paths. However, if the input rel
7456 : * has no partial paths, then we can't.
7457 : */
7458 43134 : if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7459 3274 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
7460 :
7461 : /*
7462 : * If we can't partially aggregate partial paths, and we can't partially
7463 : * aggregate non-partial paths, and no partially aggregated paths were
7464 : * generated by eager aggregation, then don't bother creating the new
7465 : * RelOptInfo at all, unless the caller specified force_rel_creation.
7466 : */
7467 43134 : if (cheapest_total_path == NULL &&
7468 39506 : cheapest_partial_path == NULL &&
7469 39376 : eager_agg_rel == NULL &&
7470 39376 : !force_rel_creation)
7471 39278 : return NULL;
7472 :
7473 : /*
7474 : * Build a new upper relation to represent the result of partially
7475 : * aggregating the rows from the input relation.
7476 : */
7477 3856 : partially_grouped_rel = fetch_upper_rel(root,
7478 : UPPERREL_PARTIAL_GROUP_AGG,
7479 : grouped_rel->relids);
7480 3856 : partially_grouped_rel->consider_parallel =
7481 3856 : grouped_rel->consider_parallel;
7482 3856 : partially_grouped_rel->pgs_mask = grouped_rel->pgs_mask;
7483 3856 : partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7484 3856 : partially_grouped_rel->serverid = grouped_rel->serverid;
7485 3856 : partially_grouped_rel->userid = grouped_rel->userid;
7486 3856 : partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7487 3856 : partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7488 :
7489 : /*
7490 : * Build target list for partial aggregate paths. These paths cannot just
7491 : * emit the same tlist as regular aggregate paths, because (1) we must
7492 : * include Vars and Aggrefs needed in HAVING, which might not appear in
7493 : * the result tlist, and (2) the Aggrefs must be set in partial mode.
7494 : */
7495 3856 : partially_grouped_rel->reltarget =
7496 3856 : make_partial_grouping_target(root, grouped_rel->reltarget,
7497 : extra->havingQual);
7498 :
7499 3856 : if (!extra->partial_costs_set)
7500 : {
7501 : /*
7502 : * Collect statistics about aggregates for estimating costs of
7503 : * performing aggregation in parallel.
7504 : */
7505 13884 : MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7506 13884 : MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7507 2314 : if (parse->hasAggs)
7508 : {
7509 : /* partial phase */
7510 2180 : get_agg_clause_costs(root, AGGSPLIT_INITIAL_SERIAL,
7511 : agg_partial_costs);
7512 :
7513 : /* final phase */
7514 2180 : get_agg_clause_costs(root, AGGSPLIT_FINAL_DESERIAL,
7515 : agg_final_costs);
7516 : }
7517 :
7518 2314 : extra->partial_costs_set = true;
7519 : }
7520 :
7521 : /* Estimate number of partial groups. */
7522 3856 : if (cheapest_total_path != NULL)
7523 : dNumPartialGroups =
7524 858 : get_number_of_groups(root,
7525 : cheapest_total_path->rows,
7526 : gd,
7527 : extra->targetList);
7528 3856 : if (cheapest_partial_path != NULL)
7529 : dNumPartialPartialGroups =
7530 3274 : get_number_of_groups(root,
7531 : cheapest_partial_path->rows,
7532 : gd,
7533 : extra->targetList);
7534 :
7535 3856 : if (can_sort && cheapest_total_path != NULL)
7536 : {
7537 : /* This should have been checked previously */
7538 : Assert(parse->hasAggs || parse->groupClause);
7539 :
7540 : /*
7541 : * Use any available suitably-sorted path as input, and also consider
7542 : * sorting the cheapest partial path.
7543 : */
7544 1716 : foreach(lc, input_rel->pathlist)
7545 : {
7546 : ListCell *lc2;
7547 858 : Path *path = (Path *) lfirst(lc);
7548 858 : Path *path_save = path;
7549 858 : List *pathkey_orderings = NIL;
7550 :
7551 : /* generate alternative group orderings that might be useful */
7552 858 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7553 :
7554 : Assert(list_length(pathkey_orderings) > 0);
7555 :
7556 : /* process all potentially interesting grouping reorderings */
7557 1716 : foreach(lc2, pathkey_orderings)
7558 : {
7559 858 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7560 :
7561 : /* restore the path (we replace it in the loop) */
7562 858 : path = path_save;
7563 :
7564 858 : path = make_ordered_path(root,
7565 : partially_grouped_rel,
7566 : path,
7567 : cheapest_total_path,
7568 : info->pathkeys,
7569 : -1.0);
7570 :
7571 858 : if (path == NULL)
7572 0 : continue;
7573 :
7574 858 : if (parse->hasAggs)
7575 786 : add_path(partially_grouped_rel, (Path *)
7576 786 : create_agg_path(root,
7577 : partially_grouped_rel,
7578 : path,
7579 786 : partially_grouped_rel->reltarget,
7580 786 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7581 : AGGSPLIT_INITIAL_SERIAL,
7582 : info->clauses,
7583 : NIL,
7584 : agg_partial_costs,
7585 : dNumPartialGroups));
7586 : else
7587 72 : add_path(partially_grouped_rel, (Path *)
7588 72 : create_group_path(root,
7589 : partially_grouped_rel,
7590 : path,
7591 : info->clauses,
7592 : NIL,
7593 : dNumPartialGroups));
7594 : }
7595 : }
7596 : }
7597 :
7598 3856 : if (can_sort && cheapest_partial_path != NULL)
7599 : {
7600 : /* Similar to above logic, but for partial paths. */
7601 7070 : foreach(lc, input_rel->partial_pathlist)
7602 : {
7603 : ListCell *lc2;
7604 3796 : Path *path = (Path *) lfirst(lc);
7605 3796 : Path *path_save = path;
7606 3796 : List *pathkey_orderings = NIL;
7607 :
7608 : /* generate alternative group orderings that might be useful */
7609 3796 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7610 :
7611 : Assert(list_length(pathkey_orderings) > 0);
7612 :
7613 : /* process all potentially interesting grouping reorderings */
7614 7592 : foreach(lc2, pathkey_orderings)
7615 : {
7616 3796 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7617 :
7618 :
7619 : /* restore the path (we replace it in the loop) */
7620 3796 : path = path_save;
7621 :
7622 3796 : path = make_ordered_path(root,
7623 : partially_grouped_rel,
7624 : path,
7625 : cheapest_partial_path,
7626 : info->pathkeys,
7627 : -1.0);
7628 :
7629 3796 : if (path == NULL)
7630 6 : continue;
7631 :
7632 3790 : if (parse->hasAggs)
7633 3668 : add_partial_path(partially_grouped_rel, (Path *)
7634 3668 : create_agg_path(root,
7635 : partially_grouped_rel,
7636 : path,
7637 3668 : partially_grouped_rel->reltarget,
7638 3668 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7639 : AGGSPLIT_INITIAL_SERIAL,
7640 : info->clauses,
7641 : NIL,
7642 : agg_partial_costs,
7643 : dNumPartialPartialGroups));
7644 : else
7645 122 : add_partial_path(partially_grouped_rel, (Path *)
7646 122 : create_group_path(root,
7647 : partially_grouped_rel,
7648 : path,
7649 : info->clauses,
7650 : NIL,
7651 : dNumPartialPartialGroups));
7652 : }
7653 : }
7654 : }
7655 :
7656 : /*
7657 : * Add a partially-grouped HashAgg Path where possible
7658 : */
7659 3856 : if (can_hash && cheapest_total_path != NULL)
7660 : {
7661 : /* Checked above */
7662 : Assert(parse->hasAggs || parse->groupClause);
7663 :
7664 858 : add_path(partially_grouped_rel, (Path *)
7665 858 : create_agg_path(root,
7666 : partially_grouped_rel,
7667 : cheapest_total_path,
7668 858 : partially_grouped_rel->reltarget,
7669 : AGG_HASHED,
7670 : AGGSPLIT_INITIAL_SERIAL,
7671 : root->processed_groupClause,
7672 : NIL,
7673 : agg_partial_costs,
7674 : dNumPartialGroups));
7675 : }
7676 :
7677 : /*
7678 : * Now add a partially-grouped HashAgg partial Path where possible
7679 : */
7680 3856 : if (can_hash && cheapest_partial_path != NULL)
7681 : {
7682 1724 : add_partial_path(partially_grouped_rel, (Path *)
7683 1724 : create_agg_path(root,
7684 : partially_grouped_rel,
7685 : cheapest_partial_path,
7686 1724 : partially_grouped_rel->reltarget,
7687 : AGG_HASHED,
7688 : AGGSPLIT_INITIAL_SERIAL,
7689 : root->processed_groupClause,
7690 : NIL,
7691 : agg_partial_costs,
7692 : dNumPartialPartialGroups));
7693 : }
7694 :
7695 : /*
7696 : * Add any partially aggregated paths generated by eager aggregation to
7697 : * the new upper relation after applying projection steps as needed.
7698 : */
7699 3856 : if (eager_agg_rel)
7700 : {
7701 : /* Add the paths */
7702 2348 : foreach(lc, eager_agg_rel->pathlist)
7703 : {
7704 1450 : Path *path = (Path *) lfirst(lc);
7705 :
7706 : /* Shouldn't have any parameterized paths anymore */
7707 : Assert(path->param_info == NULL);
7708 :
7709 1450 : path = (Path *) create_projection_path(root,
7710 : partially_grouped_rel,
7711 : path,
7712 1450 : partially_grouped_rel->reltarget);
7713 :
7714 1450 : add_path(partially_grouped_rel, path);
7715 : }
7716 :
7717 : /*
7718 : * Likewise add the partial paths, but only if parallelism is possible
7719 : * for partially_grouped_rel.
7720 : */
7721 898 : if (partially_grouped_rel->consider_parallel)
7722 : {
7723 2028 : foreach(lc, eager_agg_rel->partial_pathlist)
7724 : {
7725 1212 : Path *path = (Path *) lfirst(lc);
7726 :
7727 : /* Shouldn't have any parameterized paths anymore */
7728 : Assert(path->param_info == NULL);
7729 :
7730 1212 : path = (Path *) create_projection_path(root,
7731 : partially_grouped_rel,
7732 : path,
7733 1212 : partially_grouped_rel->reltarget);
7734 :
7735 1212 : add_partial_path(partially_grouped_rel, path);
7736 : }
7737 : }
7738 : }
7739 :
7740 : /*
7741 : * If there is an FDW that's responsible for all baserels of the query,
7742 : * let it consider adding partially grouped ForeignPaths.
7743 : */
7744 3856 : if (partially_grouped_rel->fdwroutine &&
7745 6 : partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7746 : {
7747 6 : FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7748 :
7749 6 : fdwroutine->GetForeignUpperPaths(root,
7750 : UPPERREL_PARTIAL_GROUP_AGG,
7751 : input_rel, partially_grouped_rel,
7752 : extra);
7753 : }
7754 :
7755 3856 : return partially_grouped_rel;
7756 : }
7757 :
7758 : /*
7759 : * make_ordered_path
7760 : * Return a path ordered by 'pathkeys' based on the given 'path'. May
7761 : * return NULL if it doesn't make sense to generate an ordered path in
7762 : * this case.
7763 : */
7764 : static Path *
7765 66390 : make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path,
7766 : Path *cheapest_path, List *pathkeys, double limit_tuples)
7767 : {
7768 : bool is_sorted;
7769 : int presorted_keys;
7770 :
7771 66390 : is_sorted = pathkeys_count_contained_in(pathkeys,
7772 : path->pathkeys,
7773 : &presorted_keys);
7774 :
7775 66390 : if (!is_sorted)
7776 : {
7777 : /*
7778 : * Try at least sorting the cheapest path and also try incrementally
7779 : * sorting any path which is partially sorted already (no need to deal
7780 : * with paths which have presorted keys when incremental sort is
7781 : * disabled unless it's the cheapest input path).
7782 : */
7783 17132 : if (path != cheapest_path &&
7784 3312 : (presorted_keys == 0 || !enable_incremental_sort))
7785 1486 : return NULL;
7786 :
7787 : /*
7788 : * We've no need to consider both a sort and incremental sort. We'll
7789 : * just do a sort if there are no presorted keys and an incremental
7790 : * sort when there are presorted keys.
7791 : */
7792 15646 : if (presorted_keys == 0 || !enable_incremental_sort)
7793 13646 : path = (Path *) create_sort_path(root,
7794 : rel,
7795 : path,
7796 : pathkeys,
7797 : limit_tuples);
7798 : else
7799 2000 : path = (Path *) create_incremental_sort_path(root,
7800 : rel,
7801 : path,
7802 : pathkeys,
7803 : presorted_keys,
7804 : limit_tuples);
7805 : }
7806 :
7807 64904 : return path;
7808 : }
7809 :
7810 : /*
7811 : * Generate Gather and Gather Merge paths for a grouping relation or partial
7812 : * grouping relation.
7813 : *
7814 : * generate_useful_gather_paths does most of the work, but we also consider a
7815 : * special case: we could try sorting the data by the group_pathkeys and then
7816 : * applying Gather Merge.
7817 : *
7818 : * NB: This function shouldn't be used for anything other than a grouped or
7819 : * partially grouped relation not only because of the fact that it explicitly
7820 : * references group_pathkeys but we pass "true" as the third argument to
7821 : * generate_useful_gather_paths().
7822 : */
7823 : static void
7824 3088 : gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
7825 : {
7826 : ListCell *lc;
7827 : Path *cheapest_partial_path;
7828 : List *groupby_pathkeys;
7829 :
7830 : /*
7831 : * This occurs after any partial aggregation has taken place, so trim off
7832 : * any pathkeys added for ORDER BY / DISTINCT aggregates.
7833 : */
7834 3088 : if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7835 18 : groupby_pathkeys = list_copy_head(root->group_pathkeys,
7836 : root->num_groupby_pathkeys);
7837 : else
7838 3070 : groupby_pathkeys = root->group_pathkeys;
7839 :
7840 : /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7841 3088 : generate_useful_gather_paths(root, rel, true);
7842 :
7843 3088 : cheapest_partial_path = linitial(rel->partial_pathlist);
7844 :
7845 : /* XXX Shouldn't this also consider the group-key-reordering? */
7846 7304 : foreach(lc, rel->partial_pathlist)
7847 : {
7848 4216 : Path *path = (Path *) lfirst(lc);
7849 : bool is_sorted;
7850 : int presorted_keys;
7851 : double total_groups;
7852 :
7853 4216 : is_sorted = pathkeys_count_contained_in(groupby_pathkeys,
7854 : path->pathkeys,
7855 : &presorted_keys);
7856 :
7857 4216 : if (is_sorted)
7858 2758 : continue;
7859 :
7860 : /*
7861 : * Try at least sorting the cheapest path and also try incrementally
7862 : * sorting any path which is partially sorted already (no need to deal
7863 : * with paths which have presorted keys when incremental sort is
7864 : * disabled unless it's the cheapest input path).
7865 : */
7866 1458 : if (path != cheapest_partial_path &&
7867 0 : (presorted_keys == 0 || !enable_incremental_sort))
7868 0 : continue;
7869 :
7870 : /*
7871 : * We've no need to consider both a sort and incremental sort. We'll
7872 : * just do a sort if there are no presorted keys and an incremental
7873 : * sort when there are presorted keys.
7874 : */
7875 1458 : if (presorted_keys == 0 || !enable_incremental_sort)
7876 1458 : path = (Path *) create_sort_path(root, rel, path,
7877 : groupby_pathkeys,
7878 : -1.0);
7879 : else
7880 0 : path = (Path *) create_incremental_sort_path(root,
7881 : rel,
7882 : path,
7883 : groupby_pathkeys,
7884 : presorted_keys,
7885 : -1.0);
7886 1458 : total_groups = compute_gather_rows(path);
7887 : path = (Path *)
7888 1458 : create_gather_merge_path(root,
7889 : rel,
7890 : path,
7891 1458 : rel->reltarget,
7892 : groupby_pathkeys,
7893 : NULL,
7894 : &total_groups);
7895 :
7896 1458 : add_path(rel, path);
7897 : }
7898 3088 : }
7899 :
7900 : /*
7901 : * can_partial_agg
7902 : *
7903 : * Determines whether or not partial grouping and/or aggregation is possible.
7904 : * Returns true when possible, false otherwise.
7905 : */
7906 : static bool
7907 45936 : can_partial_agg(PlannerInfo *root)
7908 : {
7909 45936 : Query *parse = root->parse;
7910 :
7911 45936 : if (!parse->hasAggs && parse->groupClause == NIL)
7912 : {
7913 : /*
7914 : * We don't know how to do parallel aggregation unless we have either
7915 : * some aggregates or a grouping clause.
7916 : */
7917 0 : return false;
7918 : }
7919 45936 : else if (parse->groupingSets)
7920 : {
7921 : /* We don't know how to do grouping sets in parallel. */
7922 996 : return false;
7923 : }
7924 44940 : else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7925 : {
7926 : /* Insufficient support for partial mode. */
7927 3900 : return false;
7928 : }
7929 :
7930 : /* Everything looks good. */
7931 41040 : return true;
7932 : }
7933 :
7934 : /*
7935 : * apply_scanjoin_target_to_paths
7936 : *
7937 : * Adjust the final scan/join relation, and recursively all of its children,
7938 : * to generate the final scan/join target. It would be more correct to model
7939 : * this as a separate planning step with a new RelOptInfo at the toplevel and
7940 : * for each child relation, but doing it this way is noticeably cheaper.
7941 : * Maybe that problem can be solved at some point, but for now we do this.
7942 : *
7943 : * If tlist_same_exprs is true, then the scan/join target to be applied has
7944 : * the same expressions as the existing reltarget, so we need only insert the
7945 : * appropriate sortgroupref information. By avoiding the creation of
7946 : * projection paths we save effort both immediately and at plan creation time.
7947 : */
7948 : static void
7949 568576 : apply_scanjoin_target_to_paths(PlannerInfo *root,
7950 : RelOptInfo *rel,
7951 : List *scanjoin_targets,
7952 : List *scanjoin_targets_contain_srfs,
7953 : bool scanjoin_target_parallel_safe,
7954 : bool tlist_same_exprs)
7955 : {
7956 568576 : bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7957 : PathTarget *scanjoin_target;
7958 : ListCell *lc;
7959 :
7960 : /* This recurses, so be paranoid. */
7961 568576 : check_stack_depth();
7962 :
7963 : /*
7964 : * If the rel only has Append and MergeAppend paths, we want to drop its
7965 : * existing paths and generate new ones. This function would still be
7966 : * correct if we kept the existing paths: we'd modify them to generate the
7967 : * correct target above the partitioning Append, and then they'd compete
7968 : * on cost with paths generating the target below the Append. However, in
7969 : * our current cost model the latter way is always the same or cheaper
7970 : * cost, so modifying the existing paths would just be useless work.
7971 : * Moreover, when the cost is the same, varying roundoff errors might
7972 : * sometimes allow an existing path to be picked, resulting in undesirable
7973 : * cross-platform plan variations. So we drop old paths and thereby force
7974 : * the work to be done below the Append.
7975 : *
7976 : * However, there are several cases when this optimization is not safe. If
7977 : * the rel isn't partitioned, then none of the paths will be Append or
7978 : * MergeAppend paths, so we should definitely not do this. If it is
7979 : * partitioned but is a joinrel, it may have Append and MergeAppend paths,
7980 : * but it can also have join paths that we can't afford to discard.
7981 : *
7982 : * Some care is needed, because we have to allow
7983 : * generate_useful_gather_paths to see the old partial paths in the next
7984 : * stanza. Hence, zap the main pathlist here, then allow
7985 : * generate_useful_gather_paths to add path(s) to the main list, and
7986 : * finally zap the partial pathlist.
7987 : */
7988 568576 : if (rel_is_partitioned && IS_SIMPLE_REL(rel))
7989 11722 : rel->pathlist = NIL;
7990 :
7991 : /*
7992 : * If the scan/join target is not parallel-safe, partial paths cannot
7993 : * generate it.
7994 : */
7995 568576 : if (!scanjoin_target_parallel_safe)
7996 : {
7997 : /*
7998 : * Since we can't generate the final scan/join target in parallel
7999 : * workers, this is our last opportunity to use any partial paths that
8000 : * exist; so build Gather path(s) that use them and emit whatever the
8001 : * current reltarget is. We don't do this in the case where the
8002 : * target is parallel-safe, since we will be able to generate superior
8003 : * paths by doing it after the final scan/join target has been
8004 : * applied.
8005 : */
8006 81372 : generate_useful_gather_paths(root, rel, false);
8007 :
8008 : /* Can't use parallel query above this level. */
8009 81372 : rel->partial_pathlist = NIL;
8010 81372 : rel->consider_parallel = false;
8011 : }
8012 :
8013 : /* Finish dropping old paths for a partitioned rel, per comment above */
8014 568576 : if (rel_is_partitioned && IS_SIMPLE_REL(rel))
8015 11722 : rel->partial_pathlist = NIL;
8016 :
8017 : /* Extract SRF-free scan/join target. */
8018 568576 : scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
8019 :
8020 : /*
8021 : * Apply the SRF-free scan/join target to each existing path.
8022 : *
8023 : * If the tlist exprs are the same, we can just inject the sortgroupref
8024 : * information into the existing pathtargets. Otherwise, replace each
8025 : * path with a projection path that generates the SRF-free scan/join
8026 : * target. This can't change the ordering of paths within rel->pathlist,
8027 : * so we just modify the list in place.
8028 : */
8029 1181092 : foreach(lc, rel->pathlist)
8030 : {
8031 612516 : Path *subpath = (Path *) lfirst(lc);
8032 :
8033 : /* Shouldn't have any parameterized paths anymore */
8034 : Assert(subpath->param_info == NULL);
8035 :
8036 612516 : if (tlist_same_exprs)
8037 216344 : subpath->pathtarget->sortgrouprefs =
8038 216344 : scanjoin_target->sortgrouprefs;
8039 : else
8040 : {
8041 : Path *newpath;
8042 :
8043 396172 : newpath = (Path *) create_projection_path(root, rel, subpath,
8044 : scanjoin_target);
8045 396172 : lfirst(lc) = newpath;
8046 : }
8047 : }
8048 :
8049 : /* Likewise adjust the targets for any partial paths. */
8050 593688 : foreach(lc, rel->partial_pathlist)
8051 : {
8052 25112 : Path *subpath = (Path *) lfirst(lc);
8053 :
8054 : /* Shouldn't have any parameterized paths anymore */
8055 : Assert(subpath->param_info == NULL);
8056 :
8057 25112 : if (tlist_same_exprs)
8058 20108 : subpath->pathtarget->sortgrouprefs =
8059 20108 : scanjoin_target->sortgrouprefs;
8060 : else
8061 : {
8062 : Path *newpath;
8063 :
8064 5004 : newpath = (Path *) create_projection_path(root, rel, subpath,
8065 : scanjoin_target);
8066 5004 : lfirst(lc) = newpath;
8067 : }
8068 : }
8069 :
8070 : /*
8071 : * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
8072 : * atop each existing path. (Note that this function doesn't look at the
8073 : * cheapest-path fields, which is a good thing because they're bogus right
8074 : * now.)
8075 : */
8076 568576 : if (root->parse->hasTargetSRFs)
8077 12088 : adjust_paths_for_srfs(root, rel,
8078 : scanjoin_targets,
8079 : scanjoin_targets_contain_srfs);
8080 :
8081 : /*
8082 : * Update the rel's target to be the final (with SRFs) scan/join target.
8083 : * This now matches the actual output of all the paths, and we might get
8084 : * confused in createplan.c if they don't agree. We must do this now so
8085 : * that any append paths made in the next part will use the correct
8086 : * pathtarget (cf. create_append_path).
8087 : *
8088 : * Note that this is also necessary if GetForeignUpperPaths() gets called
8089 : * on the final scan/join relation or on any of its children, since the
8090 : * FDW might look at the rel's target to create ForeignPaths.
8091 : */
8092 568576 : rel->reltarget = llast_node(PathTarget, scanjoin_targets);
8093 :
8094 : /*
8095 : * If the relation is partitioned, recursively apply the scan/join target
8096 : * to all partitions, and generate brand-new Append paths in which the
8097 : * scan/join target is computed below the Append rather than above it.
8098 : * Since Append is not projection-capable, that might save a separate
8099 : * Result node, and it also is important for partitionwise aggregate.
8100 : */
8101 568576 : if (rel_is_partitioned)
8102 : {
8103 13288 : List *live_children = NIL;
8104 : int i;
8105 :
8106 : /* Adjust each partition. */
8107 13288 : i = -1;
8108 38016 : while ((i = bms_next_member(rel->live_parts, i)) >= 0)
8109 : {
8110 24728 : RelOptInfo *child_rel = rel->part_rels[i];
8111 : AppendRelInfo **appinfos;
8112 : int nappinfos;
8113 24728 : List *child_scanjoin_targets = NIL;
8114 :
8115 : Assert(child_rel != NULL);
8116 :
8117 : /* Dummy children can be ignored. */
8118 24728 : if (IS_DUMMY_REL(child_rel))
8119 42 : continue;
8120 :
8121 : /* Translate scan/join targets for this child. */
8122 24686 : appinfos = find_appinfos_by_relids(root, child_rel->relids,
8123 : &nappinfos);
8124 49372 : foreach(lc, scanjoin_targets)
8125 : {
8126 24686 : PathTarget *target = lfirst_node(PathTarget, lc);
8127 :
8128 24686 : target = copy_pathtarget(target);
8129 24686 : target->exprs = (List *)
8130 24686 : adjust_appendrel_attrs(root,
8131 24686 : (Node *) target->exprs,
8132 : nappinfos, appinfos);
8133 24686 : child_scanjoin_targets = lappend(child_scanjoin_targets,
8134 : target);
8135 : }
8136 24686 : pfree(appinfos);
8137 :
8138 : /* Recursion does the real work. */
8139 24686 : apply_scanjoin_target_to_paths(root, child_rel,
8140 : child_scanjoin_targets,
8141 : scanjoin_targets_contain_srfs,
8142 : scanjoin_target_parallel_safe,
8143 : tlist_same_exprs);
8144 :
8145 : /* Save non-dummy children for Append paths. */
8146 24686 : if (!IS_DUMMY_REL(child_rel))
8147 24686 : live_children = lappend(live_children, child_rel);
8148 : }
8149 :
8150 : /* Build new paths for this relation by appending child paths. */
8151 13288 : add_paths_to_append_rel(root, rel, live_children);
8152 : }
8153 :
8154 : /*
8155 : * Consider generating Gather or Gather Merge paths. We must only do this
8156 : * if the relation is parallel safe, and we don't do it for child rels to
8157 : * avoid creating multiple Gather nodes within the same plan. We must do
8158 : * this after all paths have been generated and before set_cheapest, since
8159 : * one of the generated paths may turn out to be the cheapest one.
8160 : */
8161 568576 : if (rel->consider_parallel && !IS_OTHER_REL(rel))
8162 180916 : generate_useful_gather_paths(root, rel, false);
8163 :
8164 : /*
8165 : * Reassess which paths are the cheapest, now that we've potentially added
8166 : * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
8167 : * this relation.
8168 : */
8169 568576 : set_cheapest(rel);
8170 568576 : }
8171 :
8172 : /*
8173 : * create_partitionwise_grouping_paths
8174 : *
8175 : * If the partition keys of input relation are part of the GROUP BY clause, all
8176 : * the rows belonging to a given group come from a single partition. This
8177 : * allows aggregation/grouping over a partitioned relation to be broken down
8178 : * into aggregation/grouping on each partition. This should be no worse, and
8179 : * often better, than the normal approach.
8180 : *
8181 : * However, if the GROUP BY clause does not contain all the partition keys,
8182 : * rows from a given group may be spread across multiple partitions. In that
8183 : * case, we perform partial aggregation for each group, append the results,
8184 : * and then finalize aggregation. This is less certain to win than the
8185 : * previous case. It may win if the PartialAggregate stage greatly reduces
8186 : * the number of groups, because fewer rows will pass through the Append node.
8187 : * It may lose if we have lots of small groups.
8188 : */
8189 : static void
8190 826 : create_partitionwise_grouping_paths(PlannerInfo *root,
8191 : RelOptInfo *input_rel,
8192 : RelOptInfo *grouped_rel,
8193 : RelOptInfo *partially_grouped_rel,
8194 : const AggClauseCosts *agg_costs,
8195 : grouping_sets_data *gd,
8196 : PartitionwiseAggregateType patype,
8197 : GroupPathExtraData *extra)
8198 : {
8199 826 : List *grouped_live_children = NIL;
8200 826 : List *partially_grouped_live_children = NIL;
8201 826 : PathTarget *target = grouped_rel->reltarget;
8202 826 : bool partial_grouping_valid = true;
8203 : int i;
8204 :
8205 : Assert(patype != PARTITIONWISE_AGGREGATE_NONE);
8206 : Assert(patype != PARTITIONWISE_AGGREGATE_PARTIAL ||
8207 : partially_grouped_rel != NULL);
8208 :
8209 : /* Add paths for partitionwise aggregation/grouping. */
8210 826 : i = -1;
8211 2992 : while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
8212 : {
8213 2166 : RelOptInfo *child_input_rel = input_rel->part_rels[i];
8214 : PathTarget *child_target;
8215 : AppendRelInfo **appinfos;
8216 : int nappinfos;
8217 : GroupPathExtraData child_extra;
8218 : RelOptInfo *child_grouped_rel;
8219 : RelOptInfo *child_partially_grouped_rel;
8220 :
8221 : Assert(child_input_rel != NULL);
8222 :
8223 : /* Dummy children can be ignored. */
8224 2166 : if (IS_DUMMY_REL(child_input_rel))
8225 0 : continue;
8226 :
8227 2166 : child_target = copy_pathtarget(target);
8228 :
8229 : /*
8230 : * Copy the given "extra" structure as is and then override the
8231 : * members specific to this child.
8232 : */
8233 2166 : memcpy(&child_extra, extra, sizeof(child_extra));
8234 :
8235 2166 : appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
8236 : &nappinfos);
8237 :
8238 2166 : child_target->exprs = (List *)
8239 2166 : adjust_appendrel_attrs(root,
8240 2166 : (Node *) target->exprs,
8241 : nappinfos, appinfos);
8242 :
8243 : /* Translate havingQual and targetList. */
8244 2166 : child_extra.havingQual = (Node *)
8245 : adjust_appendrel_attrs(root,
8246 : extra->havingQual,
8247 : nappinfos, appinfos);
8248 2166 : child_extra.targetList = (List *)
8249 2166 : adjust_appendrel_attrs(root,
8250 2166 : (Node *) extra->targetList,
8251 : nappinfos, appinfos);
8252 :
8253 : /*
8254 : * extra->patype was the value computed for our parent rel; patype is
8255 : * the value for this relation. For the child, our value is its
8256 : * parent rel's value.
8257 : */
8258 2166 : child_extra.patype = patype;
8259 :
8260 : /*
8261 : * Create grouping relation to hold fully aggregated grouping and/or
8262 : * aggregation paths for the child.
8263 : */
8264 2166 : child_grouped_rel = make_grouping_rel(root, child_input_rel,
8265 : child_target,
8266 2166 : extra->target_parallel_safe,
8267 : child_extra.havingQual);
8268 :
8269 : /* Create grouping paths for this child relation. */
8270 2166 : create_ordinary_grouping_paths(root, child_input_rel,
8271 : child_grouped_rel,
8272 : agg_costs, gd, &child_extra,
8273 : &child_partially_grouped_rel);
8274 :
8275 2166 : if (child_partially_grouped_rel)
8276 : {
8277 : partially_grouped_live_children =
8278 1542 : lappend(partially_grouped_live_children,
8279 : child_partially_grouped_rel);
8280 : }
8281 : else
8282 624 : partial_grouping_valid = false;
8283 :
8284 2166 : if (patype == PARTITIONWISE_AGGREGATE_FULL)
8285 : {
8286 1308 : set_cheapest(child_grouped_rel);
8287 1308 : grouped_live_children = lappend(grouped_live_children,
8288 : child_grouped_rel);
8289 : }
8290 :
8291 2166 : pfree(appinfos);
8292 : }
8293 :
8294 : /*
8295 : * Try to create append paths for partially grouped children. For full
8296 : * partitionwise aggregation, we might have paths in the partial_pathlist
8297 : * if parallel aggregation is possible. For partial partitionwise
8298 : * aggregation, we may have paths in both pathlist and partial_pathlist.
8299 : *
8300 : * NB: We must have a partially grouped path for every child in order to
8301 : * generate a partially grouped path for this relation.
8302 : */
8303 826 : if (partially_grouped_rel && partial_grouping_valid)
8304 : {
8305 : Assert(partially_grouped_live_children != NIL);
8306 :
8307 602 : add_paths_to_append_rel(root, partially_grouped_rel,
8308 : partially_grouped_live_children);
8309 : }
8310 :
8311 : /* If possible, create append paths for fully grouped children. */
8312 826 : if (patype == PARTITIONWISE_AGGREGATE_FULL)
8313 : {
8314 : Assert(grouped_live_children != NIL);
8315 :
8316 488 : add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
8317 : }
8318 826 : }
8319 :
8320 : /*
8321 : * group_by_has_partkey
8322 : *
8323 : * Returns true if all the partition keys of the given relation are part of
8324 : * the GROUP BY clauses, including having matching collation, false otherwise.
8325 : */
8326 : static bool
8327 772 : group_by_has_partkey(RelOptInfo *input_rel,
8328 : List *targetList,
8329 : List *groupClause)
8330 : {
8331 772 : List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8332 772 : int cnt = 0;
8333 : int partnatts;
8334 :
8335 : /* Input relation should be partitioned. */
8336 : Assert(input_rel->part_scheme);
8337 :
8338 : /* Rule out early, if there are no partition keys present. */
8339 772 : if (!input_rel->partexprs)
8340 0 : return false;
8341 :
8342 772 : partnatts = input_rel->part_scheme->partnatts;
8343 :
8344 1296 : for (cnt = 0; cnt < partnatts; cnt++)
8345 : {
8346 808 : List *partexprs = input_rel->partexprs[cnt];
8347 : ListCell *lc;
8348 808 : bool found = false;
8349 :
8350 1206 : foreach(lc, partexprs)
8351 : {
8352 : ListCell *lg;
8353 934 : Expr *partexpr = lfirst(lc);
8354 934 : Oid partcoll = input_rel->part_scheme->partcollation[cnt];
8355 :
8356 1452 : foreach(lg, groupexprs)
8357 : {
8358 1054 : Expr *groupexpr = lfirst(lg);
8359 1054 : Oid groupcoll = exprCollation((Node *) groupexpr);
8360 :
8361 : /*
8362 : * Note: we can assume there is at most one RelabelType node;
8363 : * eval_const_expressions() will have simplified if more than
8364 : * one.
8365 : */
8366 1054 : if (IsA(groupexpr, RelabelType))
8367 24 : groupexpr = ((RelabelType *) groupexpr)->arg;
8368 :
8369 1054 : if (equal(groupexpr, partexpr))
8370 : {
8371 : /*
8372 : * Reject a match if the grouping collation does not match
8373 : * the partitioning collation.
8374 : */
8375 536 : if (OidIsValid(partcoll) && OidIsValid(groupcoll) &&
8376 : partcoll != groupcoll)
8377 12 : return false;
8378 :
8379 524 : found = true;
8380 524 : break;
8381 : }
8382 : }
8383 :
8384 922 : if (found)
8385 524 : break;
8386 : }
8387 :
8388 : /*
8389 : * If none of the partition key expressions match with any of the
8390 : * GROUP BY expression, return false.
8391 : */
8392 796 : if (!found)
8393 272 : return false;
8394 : }
8395 :
8396 488 : return true;
8397 : }
8398 :
8399 : /*
8400 : * generate_setop_child_grouplist
8401 : * Build a SortGroupClause list defining the sort/grouping properties
8402 : * of the child of a set operation.
8403 : *
8404 : * This is similar to generate_setop_grouplist() but differs as the setop
8405 : * child query's targetlist entries may already have a tleSortGroupRef
8406 : * assigned for other purposes, such as GROUP BYs. Here we keep the
8407 : * SortGroupClause list in the same order as 'op' groupClauses and just adjust
8408 : * the tleSortGroupRef to reference the TargetEntry's 'ressortgroupref'. If
8409 : * any of the columns in the targetlist don't match to the setop's colTypes
8410 : * then we return an empty list. This may leave some TLEs with unreferenced
8411 : * ressortgroupref markings, but that's harmless.
8412 : */
8413 : static List *
8414 12776 : generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist)
8415 : {
8416 12776 : List *grouplist = copyObject(op->groupClauses);
8417 : ListCell *lg;
8418 : ListCell *lt;
8419 : ListCell *ct;
8420 :
8421 12776 : lg = list_head(grouplist);
8422 12776 : ct = list_head(op->colTypes);
8423 49254 : foreach(lt, targetlist)
8424 : {
8425 36892 : TargetEntry *tle = (TargetEntry *) lfirst(lt);
8426 : SortGroupClause *sgc;
8427 : Oid coltype;
8428 :
8429 : /* resjunk columns could have sortgrouprefs. Leave these alone */
8430 36892 : if (tle->resjunk)
8431 0 : continue;
8432 :
8433 : /*
8434 : * We expect every non-resjunk target to have a SortGroupClause and
8435 : * colTypes.
8436 : */
8437 : Assert(lg != NULL);
8438 : Assert(ct != NULL);
8439 36892 : sgc = (SortGroupClause *) lfirst(lg);
8440 36892 : coltype = lfirst_oid(ct);
8441 :
8442 : /* reject if target type isn't the same as the setop target type */
8443 36892 : if (coltype != exprType((Node *) tle->expr))
8444 414 : return NIL;
8445 :
8446 36478 : lg = lnext(grouplist, lg);
8447 36478 : ct = lnext(op->colTypes, ct);
8448 :
8449 : /* assign a tleSortGroupRef, or reuse the existing one */
8450 36478 : sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8451 : }
8452 :
8453 : Assert(lg == NULL);
8454 : Assert(ct == NULL);
8455 :
8456 12362 : return grouplist;
8457 : }
8458 :
8459 : /*
8460 : * create_unique_paths
8461 : * Build a new RelOptInfo containing Paths that represent elimination of
8462 : * distinct rows from the input data. Distinct-ness is defined according to
8463 : * the needs of the semijoin represented by sjinfo. If it is not possible
8464 : * to identify how to make the data unique, NULL is returned.
8465 : *
8466 : * If used at all, this is likely to be called repeatedly on the same rel,
8467 : * so we cache the result.
8468 : */
8469 : RelOptInfo *
8470 8926 : create_unique_paths(PlannerInfo *root, RelOptInfo *rel, SpecialJoinInfo *sjinfo)
8471 : {
8472 : RelOptInfo *unique_rel;
8473 8926 : List *sortPathkeys = NIL;
8474 8926 : List *groupClause = NIL;
8475 : MemoryContext oldcontext;
8476 :
8477 : /* Caller made a mistake if SpecialJoinInfo is the wrong one */
8478 : Assert(sjinfo->jointype == JOIN_SEMI);
8479 : Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
8480 :
8481 : /* If result already cached, return it */
8482 8926 : if (rel->unique_rel)
8483 1836 : return rel->unique_rel;
8484 :
8485 : /* If it's not possible to unique-ify, return NULL */
8486 7090 : if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
8487 132 : return NULL;
8488 :
8489 : /*
8490 : * Punt if this is a child relation and we failed to build a unique-ified
8491 : * relation for its parent. This can happen if all the RHS columns were
8492 : * found to be equated to constants when unique-ifying the parent table,
8493 : * leaving no columns to unique-ify.
8494 : */
8495 6958 : if (IS_OTHER_REL(rel) && rel->top_parent->unique_rel == NULL)
8496 12 : return NULL;
8497 :
8498 : /*
8499 : * When called during GEQO join planning, we are in a short-lived memory
8500 : * context. We must make sure that the unique rel and any subsidiary data
8501 : * structures created for a baserel survive the GEQO cycle, else the
8502 : * baserel is trashed for future GEQO cycles. On the other hand, when we
8503 : * are creating those for a joinrel during GEQO, we don't want them to
8504 : * clutter the main planning context. Upshot is that the best solution is
8505 : * to explicitly allocate memory in the same context the given RelOptInfo
8506 : * is in.
8507 : */
8508 6946 : oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
8509 :
8510 6946 : unique_rel = makeNode(RelOptInfo);
8511 6946 : memcpy(unique_rel, rel, sizeof(RelOptInfo));
8512 :
8513 : /*
8514 : * clear path info
8515 : */
8516 6946 : unique_rel->pathlist = NIL;
8517 6946 : unique_rel->ppilist = NIL;
8518 6946 : unique_rel->partial_pathlist = NIL;
8519 6946 : unique_rel->cheapest_startup_path = NULL;
8520 6946 : unique_rel->cheapest_total_path = NULL;
8521 6946 : unique_rel->cheapest_parameterized_paths = NIL;
8522 :
8523 : /*
8524 : * Build the target list for the unique rel. We also build the pathkeys
8525 : * that represent the ordering requirements for the sort-based
8526 : * implementation, and the list of SortGroupClause nodes that represent
8527 : * the columns to be grouped on for the hash-based implementation.
8528 : *
8529 : * For a child rel, we can construct these fields from those of its
8530 : * parent.
8531 : */
8532 6946 : if (IS_OTHER_REL(rel))
8533 432 : {
8534 : PathTarget *child_unique_target;
8535 : PathTarget *parent_unique_target;
8536 :
8537 432 : parent_unique_target = rel->top_parent->unique_rel->reltarget;
8538 :
8539 432 : child_unique_target = copy_pathtarget(parent_unique_target);
8540 :
8541 : /* Translate the target expressions */
8542 432 : child_unique_target->exprs = (List *)
8543 432 : adjust_appendrel_attrs_multilevel(root,
8544 432 : (Node *) parent_unique_target->exprs,
8545 : rel,
8546 432 : rel->top_parent);
8547 :
8548 432 : unique_rel->reltarget = child_unique_target;
8549 :
8550 432 : sortPathkeys = rel->top_parent->unique_pathkeys;
8551 432 : groupClause = rel->top_parent->unique_groupclause;
8552 : }
8553 : else
8554 : {
8555 : List *newtlist;
8556 : int nextresno;
8557 6514 : List *sortList = NIL;
8558 : ListCell *lc1;
8559 : ListCell *lc2;
8560 :
8561 : /*
8562 : * The values we are supposed to unique-ify may be expressions in the
8563 : * variables of the input rel's targetlist. We have to add any such
8564 : * expressions to the unique rel's targetlist.
8565 : *
8566 : * To complicate matters, some of the values to be unique-ified may be
8567 : * known redundant by the EquivalenceClass machinery (e.g., because
8568 : * they have been equated to constants). There is no need to compare
8569 : * such values during unique-ification, and indeed we had better not
8570 : * try because the Vars involved may not have propagated as high as
8571 : * the semijoin's level. We use make_pathkeys_for_sortclauses to
8572 : * detect such cases, which is a tad inefficient but it doesn't seem
8573 : * worth building specialized infrastructure for this.
8574 : */
8575 6514 : newtlist = make_tlist_from_pathtarget(rel->reltarget);
8576 6514 : nextresno = list_length(newtlist) + 1;
8577 :
8578 13262 : forboth(lc1, sjinfo->semi_rhs_exprs, lc2, sjinfo->semi_operators)
8579 : {
8580 6748 : Expr *uniqexpr = lfirst(lc1);
8581 6748 : Oid in_oper = lfirst_oid(lc2);
8582 : Oid sortop;
8583 : TargetEntry *tle;
8584 6748 : bool made_tle = false;
8585 :
8586 6748 : tle = tlist_member(uniqexpr, newtlist);
8587 6748 : if (!tle)
8588 : {
8589 3268 : tle = makeTargetEntry(uniqexpr,
8590 : nextresno,
8591 : NULL,
8592 : false);
8593 3268 : newtlist = lappend(newtlist, tle);
8594 3268 : nextresno++;
8595 3268 : made_tle = true;
8596 : }
8597 :
8598 : /*
8599 : * Try to build an ORDER BY list to sort the input compatibly. We
8600 : * do this for each sortable clause even when the clauses are not
8601 : * all sortable, so that we can detect clauses that are redundant
8602 : * according to the pathkey machinery.
8603 : */
8604 6748 : sortop = get_ordering_op_for_equality_op(in_oper, false);
8605 6748 : if (OidIsValid(sortop))
8606 : {
8607 : Oid eqop;
8608 : SortGroupClause *sortcl;
8609 :
8610 : /*
8611 : * The Unique node will need equality operators. Normally
8612 : * these are the same as the IN clause operators, but if those
8613 : * are cross-type operators then the equality operators are
8614 : * the ones for the IN clause operators' RHS datatype.
8615 : */
8616 6748 : eqop = get_equality_op_for_ordering_op(sortop, NULL);
8617 6748 : if (!OidIsValid(eqop)) /* shouldn't happen */
8618 0 : elog(ERROR, "could not find equality operator for ordering operator %u",
8619 : sortop);
8620 :
8621 6748 : sortcl = makeNode(SortGroupClause);
8622 6748 : sortcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8623 6748 : sortcl->eqop = eqop;
8624 6748 : sortcl->sortop = sortop;
8625 6748 : sortcl->reverse_sort = false;
8626 6748 : sortcl->nulls_first = false;
8627 6748 : sortcl->hashable = false; /* no need to make this accurate */
8628 6748 : sortList = lappend(sortList, sortcl);
8629 :
8630 : /*
8631 : * At each step, convert the SortGroupClause list to pathkey
8632 : * form. If the just-added SortGroupClause is redundant, the
8633 : * result will be shorter than the SortGroupClause list.
8634 : */
8635 6748 : sortPathkeys = make_pathkeys_for_sortclauses(root, sortList,
8636 : newtlist);
8637 6748 : if (list_length(sortPathkeys) != list_length(sortList))
8638 : {
8639 : /* Drop the redundant SortGroupClause */
8640 2052 : sortList = list_delete_last(sortList);
8641 : Assert(list_length(sortPathkeys) == list_length(sortList));
8642 : /* Undo tlist addition, if we made one */
8643 2052 : if (made_tle)
8644 : {
8645 12 : newtlist = list_delete_last(newtlist);
8646 12 : nextresno--;
8647 : }
8648 : /* We need not consider this clause for hashing, either */
8649 2052 : continue;
8650 : }
8651 : }
8652 0 : else if (sjinfo->semi_can_btree) /* shouldn't happen */
8653 0 : elog(ERROR, "could not find ordering operator for equality operator %u",
8654 : in_oper);
8655 :
8656 4696 : if (sjinfo->semi_can_hash)
8657 : {
8658 : /* Create a GROUP BY list for the Agg node to use */
8659 : Oid eq_oper;
8660 : SortGroupClause *groupcl;
8661 :
8662 : /*
8663 : * Get the hashable equality operators for the Agg node to
8664 : * use. Normally these are the same as the IN clause
8665 : * operators, but if those are cross-type operators then the
8666 : * equality operators are the ones for the IN clause
8667 : * operators' RHS datatype.
8668 : */
8669 4696 : if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
8670 0 : elog(ERROR, "could not find compatible hash operator for operator %u",
8671 : in_oper);
8672 :
8673 4696 : groupcl = makeNode(SortGroupClause);
8674 4696 : groupcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8675 4696 : groupcl->eqop = eq_oper;
8676 4696 : groupcl->sortop = sortop;
8677 4696 : groupcl->reverse_sort = false;
8678 4696 : groupcl->nulls_first = false;
8679 4696 : groupcl->hashable = true;
8680 4696 : groupClause = lappend(groupClause, groupcl);
8681 : }
8682 : }
8683 :
8684 : /*
8685 : * Done building the sortPathkeys and groupClause. But the
8686 : * sortPathkeys are bogus if not all the clauses were sortable.
8687 : */
8688 6514 : if (!sjinfo->semi_can_btree)
8689 0 : sortPathkeys = NIL;
8690 :
8691 : /*
8692 : * It can happen that all the RHS columns are equated to constants.
8693 : * We'd have to do something special to unique-ify in that case, and
8694 : * it's such an unlikely-in-the-real-world case that it's not worth
8695 : * the effort. So just punt if we found no columns to unique-ify.
8696 : */
8697 6514 : if (sortPathkeys == NIL && groupClause == NIL)
8698 : {
8699 1950 : MemoryContextSwitchTo(oldcontext);
8700 1950 : return NULL;
8701 : }
8702 :
8703 : /* Convert the required targetlist back to PathTarget form */
8704 4564 : unique_rel->reltarget = create_pathtarget(root, newtlist);
8705 : }
8706 :
8707 : /* build unique paths based on input rel's pathlist */
8708 4996 : create_final_unique_paths(root, rel, sortPathkeys, groupClause,
8709 : sjinfo, unique_rel);
8710 :
8711 : /* build unique paths based on input rel's partial_pathlist */
8712 4996 : create_partial_unique_paths(root, rel, sortPathkeys, groupClause,
8713 : sjinfo, unique_rel);
8714 :
8715 : /* Now choose the best path(s) */
8716 4996 : set_cheapest(unique_rel);
8717 :
8718 : /*
8719 : * There shouldn't be any partial paths for the unique relation;
8720 : * otherwise, we won't be able to properly guarantee uniqueness.
8721 : */
8722 : Assert(unique_rel->partial_pathlist == NIL);
8723 :
8724 : /* Cache the result */
8725 4996 : rel->unique_rel = unique_rel;
8726 4996 : rel->unique_pathkeys = sortPathkeys;
8727 4996 : rel->unique_groupclause = groupClause;
8728 :
8729 4996 : MemoryContextSwitchTo(oldcontext);
8730 :
8731 4996 : return unique_rel;
8732 : }
8733 :
8734 : /*
8735 : * create_final_unique_paths
8736 : * Create unique paths in 'unique_rel' based on 'input_rel' pathlist
8737 : */
8738 : static void
8739 8744 : create_final_unique_paths(PlannerInfo *root, RelOptInfo *input_rel,
8740 : List *sortPathkeys, List *groupClause,
8741 : SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
8742 : {
8743 8744 : Path *cheapest_input_path = input_rel->cheapest_total_path;
8744 :
8745 : /* Estimate number of output rows */
8746 8744 : unique_rel->rows = estimate_num_groups(root,
8747 : sjinfo->semi_rhs_exprs,
8748 : cheapest_input_path->rows,
8749 : NULL,
8750 : NULL);
8751 :
8752 : /* Consider sort-based implementations, if possible. */
8753 8744 : if (sjinfo->semi_can_btree)
8754 : {
8755 : ListCell *lc;
8756 :
8757 : /*
8758 : * Use any available suitably-sorted path as input, and also consider
8759 : * sorting the cheapest-total path and incremental sort on any paths
8760 : * with presorted keys.
8761 : *
8762 : * To save planning time, we ignore parameterized input paths unless
8763 : * they are the cheapest-total path.
8764 : */
8765 19056 : foreach(lc, input_rel->pathlist)
8766 : {
8767 10312 : Path *input_path = (Path *) lfirst(lc);
8768 : Path *path;
8769 : bool is_sorted;
8770 : int presorted_keys;
8771 :
8772 : /*
8773 : * Ignore parameterized paths that are not the cheapest-total
8774 : * path.
8775 : */
8776 10312 : if (input_path->param_info &&
8777 : input_path != cheapest_input_path)
8778 922 : continue;
8779 :
8780 9440 : is_sorted = pathkeys_count_contained_in(sortPathkeys,
8781 : input_path->pathkeys,
8782 : &presorted_keys);
8783 :
8784 : /*
8785 : * Ignore paths that are not suitably or partially sorted, unless
8786 : * they are the cheapest total path (no need to deal with paths
8787 : * which have presorted keys when incremental sort is disabled).
8788 : */
8789 9440 : if (!is_sorted && input_path != cheapest_input_path &&
8790 98 : (presorted_keys == 0 || !enable_incremental_sort))
8791 50 : continue;
8792 :
8793 : /*
8794 : * Make a separate ProjectionPath in case we need a Result node.
8795 : */
8796 9390 : path = (Path *) create_projection_path(root,
8797 : unique_rel,
8798 : input_path,
8799 9390 : unique_rel->reltarget);
8800 :
8801 9390 : if (!is_sorted)
8802 : {
8803 : /*
8804 : * We've no need to consider both a sort and incremental sort.
8805 : * We'll just do a sort if there are no presorted keys and an
8806 : * incremental sort when there are presorted keys.
8807 : */
8808 4982 : if (presorted_keys == 0 || !enable_incremental_sort)
8809 4934 : path = (Path *) create_sort_path(root,
8810 : unique_rel,
8811 : path,
8812 : sortPathkeys,
8813 : -1.0);
8814 : else
8815 48 : path = (Path *) create_incremental_sort_path(root,
8816 : unique_rel,
8817 : path,
8818 : sortPathkeys,
8819 : presorted_keys,
8820 : -1.0);
8821 : }
8822 :
8823 9390 : path = (Path *) create_unique_path(root, unique_rel, path,
8824 : list_length(sortPathkeys),
8825 : unique_rel->rows);
8826 :
8827 9390 : add_path(unique_rel, path);
8828 : }
8829 : }
8830 :
8831 : /* Consider hash-based implementation, if possible. */
8832 8744 : if (sjinfo->semi_can_hash)
8833 : {
8834 : Path *path;
8835 :
8836 : /*
8837 : * Make a separate ProjectionPath in case we need a Result node.
8838 : */
8839 8744 : path = (Path *) create_projection_path(root,
8840 : unique_rel,
8841 : cheapest_input_path,
8842 8744 : unique_rel->reltarget);
8843 :
8844 8744 : path = (Path *) create_agg_path(root,
8845 : unique_rel,
8846 : path,
8847 : cheapest_input_path->pathtarget,
8848 : AGG_HASHED,
8849 : AGGSPLIT_SIMPLE,
8850 : groupClause,
8851 : NIL,
8852 : NULL,
8853 : unique_rel->rows);
8854 :
8855 8744 : add_path(unique_rel, path);
8856 : }
8857 8744 : }
8858 :
8859 : /*
8860 : * create_partial_unique_paths
8861 : * Create unique paths in 'unique_rel' based on 'input_rel' partial_pathlist
8862 : */
8863 : static void
8864 4996 : create_partial_unique_paths(PlannerInfo *root, RelOptInfo *input_rel,
8865 : List *sortPathkeys, List *groupClause,
8866 : SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
8867 : {
8868 : RelOptInfo *partial_unique_rel;
8869 : Path *cheapest_partial_path;
8870 :
8871 : /* nothing to do when there are no partial paths in the input rel */
8872 4996 : if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
8873 1248 : return;
8874 :
8875 : /*
8876 : * nothing to do if there's anything in the targetlist that's
8877 : * parallel-restricted.
8878 : */
8879 3748 : if (!is_parallel_safe(root, (Node *) unique_rel->reltarget->exprs))
8880 0 : return;
8881 :
8882 3748 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
8883 :
8884 3748 : partial_unique_rel = makeNode(RelOptInfo);
8885 3748 : memcpy(partial_unique_rel, input_rel, sizeof(RelOptInfo));
8886 :
8887 : /*
8888 : * clear path info
8889 : */
8890 3748 : partial_unique_rel->pathlist = NIL;
8891 3748 : partial_unique_rel->ppilist = NIL;
8892 3748 : partial_unique_rel->partial_pathlist = NIL;
8893 3748 : partial_unique_rel->cheapest_startup_path = NULL;
8894 3748 : partial_unique_rel->cheapest_total_path = NULL;
8895 3748 : partial_unique_rel->cheapest_parameterized_paths = NIL;
8896 :
8897 : /* Estimate number of output rows */
8898 3748 : partial_unique_rel->rows = estimate_num_groups(root,
8899 : sjinfo->semi_rhs_exprs,
8900 : cheapest_partial_path->rows,
8901 : NULL,
8902 : NULL);
8903 3748 : partial_unique_rel->reltarget = unique_rel->reltarget;
8904 :
8905 : /* Consider sort-based implementations, if possible. */
8906 3748 : if (sjinfo->semi_can_btree)
8907 : {
8908 : ListCell *lc;
8909 :
8910 : /*
8911 : * Use any available suitably-sorted path as input, and also consider
8912 : * sorting the cheapest partial path and incremental sort on any paths
8913 : * with presorted keys.
8914 : */
8915 7808 : foreach(lc, input_rel->partial_pathlist)
8916 : {
8917 4060 : Path *input_path = (Path *) lfirst(lc);
8918 : Path *path;
8919 : bool is_sorted;
8920 : int presorted_keys;
8921 :
8922 4060 : is_sorted = pathkeys_count_contained_in(sortPathkeys,
8923 : input_path->pathkeys,
8924 : &presorted_keys);
8925 :
8926 : /*
8927 : * Ignore paths that are not suitably or partially sorted, unless
8928 : * they are the cheapest partial path (no need to deal with paths
8929 : * which have presorted keys when incremental sort is disabled).
8930 : */
8931 4060 : if (!is_sorted && input_path != cheapest_partial_path &&
8932 0 : (presorted_keys == 0 || !enable_incremental_sort))
8933 0 : continue;
8934 :
8935 : /*
8936 : * Make a separate ProjectionPath in case we need a Result node.
8937 : */
8938 4060 : path = (Path *) create_projection_path(root,
8939 : partial_unique_rel,
8940 : input_path,
8941 4060 : partial_unique_rel->reltarget);
8942 :
8943 4060 : if (!is_sorted)
8944 : {
8945 : /*
8946 : * We've no need to consider both a sort and incremental sort.
8947 : * We'll just do a sort if there are no presorted keys and an
8948 : * incremental sort when there are presorted keys.
8949 : */
8950 3700 : if (presorted_keys == 0 || !enable_incremental_sort)
8951 3700 : path = (Path *) create_sort_path(root,
8952 : partial_unique_rel,
8953 : path,
8954 : sortPathkeys,
8955 : -1.0);
8956 : else
8957 0 : path = (Path *) create_incremental_sort_path(root,
8958 : partial_unique_rel,
8959 : path,
8960 : sortPathkeys,
8961 : presorted_keys,
8962 : -1.0);
8963 : }
8964 :
8965 4060 : path = (Path *) create_unique_path(root, partial_unique_rel, path,
8966 : list_length(sortPathkeys),
8967 : partial_unique_rel->rows);
8968 :
8969 4060 : add_partial_path(partial_unique_rel, path);
8970 : }
8971 : }
8972 :
8973 : /* Consider hash-based implementation, if possible. */
8974 3748 : if (sjinfo->semi_can_hash)
8975 : {
8976 : Path *path;
8977 :
8978 : /*
8979 : * Make a separate ProjectionPath in case we need a Result node.
8980 : */
8981 3748 : path = (Path *) create_projection_path(root,
8982 : partial_unique_rel,
8983 : cheapest_partial_path,
8984 3748 : partial_unique_rel->reltarget);
8985 :
8986 3748 : path = (Path *) create_agg_path(root,
8987 : partial_unique_rel,
8988 : path,
8989 : cheapest_partial_path->pathtarget,
8990 : AGG_HASHED,
8991 : AGGSPLIT_SIMPLE,
8992 : groupClause,
8993 : NIL,
8994 : NULL,
8995 : partial_unique_rel->rows);
8996 :
8997 3748 : add_partial_path(partial_unique_rel, path);
8998 : }
8999 :
9000 3748 : if (partial_unique_rel->partial_pathlist != NIL)
9001 : {
9002 3748 : generate_useful_gather_paths(root, partial_unique_rel, true);
9003 3748 : set_cheapest(partial_unique_rel);
9004 :
9005 : /*
9006 : * Finally, create paths to unique-ify the final result. This step is
9007 : * needed to remove any duplicates due to combining rows from parallel
9008 : * workers.
9009 : */
9010 3748 : create_final_unique_paths(root, partial_unique_rel,
9011 : sortPathkeys, groupClause,
9012 : sjinfo, unique_rel);
9013 : }
9014 : }
9015 :
9016 : /*
9017 : * Choose a unique name for some subroot.
9018 : *
9019 : * Modifies glob->subplanNames to track names already used.
9020 : */
9021 : char *
9022 85462 : choose_plan_name(PlannerGlobal *glob, const char *name, bool always_number)
9023 : {
9024 : unsigned n;
9025 :
9026 : /*
9027 : * If a numeric suffix is not required, then search the list of
9028 : * previously-assigned names for a match. If none is found, then we can
9029 : * use the provided name without modification.
9030 : */
9031 85462 : if (!always_number)
9032 : {
9033 26602 : bool found = false;
9034 :
9035 63762 : foreach_ptr(char, subplan_name, glob->subplanNames)
9036 : {
9037 16294 : if (strcmp(subplan_name, name) == 0)
9038 : {
9039 5736 : found = true;
9040 5736 : break;
9041 : }
9042 : }
9043 :
9044 26602 : if (!found)
9045 : {
9046 : /* pstrdup here is just to avoid cast-away-const */
9047 20866 : char *chosen_name = pstrdup(name);
9048 :
9049 20866 : glob->subplanNames = lappend(glob->subplanNames, chosen_name);
9050 20866 : return chosen_name;
9051 : }
9052 : }
9053 :
9054 : /*
9055 : * If a numeric suffix is required or if the un-suffixed name is already
9056 : * in use, then loop until we find a positive integer that produces a
9057 : * novel name.
9058 : */
9059 64596 : for (n = 1; true; ++n)
9060 55576 : {
9061 120172 : char *proposed_name = psprintf("%s_%u", name, n);
9062 120172 : bool found = false;
9063 :
9064 458462 : foreach_ptr(char, subplan_name, glob->subplanNames)
9065 : {
9066 273694 : if (strcmp(subplan_name, proposed_name) == 0)
9067 : {
9068 55576 : found = true;
9069 55576 : break;
9070 : }
9071 : }
9072 :
9073 120172 : if (!found)
9074 : {
9075 64596 : glob->subplanNames = lappend(glob->subplanNames, proposed_name);
9076 64596 : return proposed_name;
9077 : }
9078 :
9079 55576 : pfree(proposed_name);
9080 : }
9081 : }
|