Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * planner.c
4 : * The query optimizer external interface.
5 : *
6 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/optimizer/plan/planner.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 :
16 : #include "postgres.h"
17 :
18 : #include <limits.h>
19 : #include <math.h>
20 :
21 : #include "access/genam.h"
22 : #include "access/parallel.h"
23 : #include "access/sysattr.h"
24 : #include "access/table.h"
25 : #include "catalog/pg_aggregate.h"
26 : #include "catalog/pg_inherits.h"
27 : #include "catalog/pg_proc.h"
28 : #include "catalog/pg_type.h"
29 : #include "executor/executor.h"
30 : #include "foreign/fdwapi.h"
31 : #include "jit/jit.h"
32 : #include "lib/bipartite_match.h"
33 : #include "lib/knapsack.h"
34 : #include "miscadmin.h"
35 : #include "nodes/makefuncs.h"
36 : #include "nodes/nodeFuncs.h"
37 : #ifdef OPTIMIZER_DEBUG
38 : #include "nodes/print.h"
39 : #endif
40 : #include "nodes/supportnodes.h"
41 : #include "optimizer/appendinfo.h"
42 : #include "optimizer/clauses.h"
43 : #include "optimizer/cost.h"
44 : #include "optimizer/optimizer.h"
45 : #include "optimizer/paramassign.h"
46 : #include "optimizer/pathnode.h"
47 : #include "optimizer/paths.h"
48 : #include "optimizer/plancat.h"
49 : #include "optimizer/planmain.h"
50 : #include "optimizer/planner.h"
51 : #include "optimizer/prep.h"
52 : #include "optimizer/subselect.h"
53 : #include "optimizer/tlist.h"
54 : #include "parser/analyze.h"
55 : #include "parser/parse_agg.h"
56 : #include "parser/parse_clause.h"
57 : #include "parser/parse_relation.h"
58 : #include "parser/parsetree.h"
59 : #include "partitioning/partdesc.h"
60 : #include "rewrite/rewriteManip.h"
61 : #include "utils/acl.h"
62 : #include "utils/backend_status.h"
63 : #include "utils/lsyscache.h"
64 : #include "utils/rel.h"
65 : #include "utils/selfuncs.h"
66 :
67 : /* GUC parameters */
68 : double cursor_tuple_fraction = DEFAULT_CURSOR_TUPLE_FRACTION;
69 : int debug_parallel_query = DEBUG_PARALLEL_OFF;
70 : bool parallel_leader_participation = true;
71 : bool enable_distinct_reordering = true;
72 :
73 : /* Hook for plugins to get control in planner() */
74 : planner_hook_type planner_hook = NULL;
75 :
76 : /* Hook for plugins to get control after PlannerGlobal is initialized */
77 : planner_setup_hook_type planner_setup_hook = NULL;
78 :
79 : /* Hook for plugins to get control before PlannerGlobal is discarded */
80 : planner_shutdown_hook_type planner_shutdown_hook = NULL;
81 :
82 : /* Hook for plugins to get control when grouping_planner() plans upper rels */
83 : create_upper_paths_hook_type create_upper_paths_hook = NULL;
84 :
85 :
86 : /* Expression kind codes for preprocess_expression */
87 : #define EXPRKIND_QUAL 0
88 : #define EXPRKIND_TARGET 1
89 : #define EXPRKIND_RTFUNC 2
90 : #define EXPRKIND_RTFUNC_LATERAL 3
91 : #define EXPRKIND_VALUES 4
92 : #define EXPRKIND_VALUES_LATERAL 5
93 : #define EXPRKIND_LIMIT 6
94 : #define EXPRKIND_APPINFO 7
95 : #define EXPRKIND_PHV 8
96 : #define EXPRKIND_TABLESAMPLE 9
97 : #define EXPRKIND_ARBITER_ELEM 10
98 : #define EXPRKIND_TABLEFUNC 11
99 : #define EXPRKIND_TABLEFUNC_LATERAL 12
100 : #define EXPRKIND_GROUPEXPR 13
101 :
102 : /*
103 : * Data specific to grouping sets
104 : */
105 : typedef struct
106 : {
107 : List *rollups;
108 : List *hash_sets_idx;
109 : double dNumHashGroups;
110 : bool any_hashable;
111 : Bitmapset *unsortable_refs;
112 : Bitmapset *unhashable_refs;
113 : List *unsortable_sets;
114 : int *tleref_to_colnum_map;
115 : } grouping_sets_data;
116 :
117 : /*
118 : * Temporary structure for use during WindowClause reordering in order to be
119 : * able to sort WindowClauses on partitioning/ordering prefix.
120 : */
121 : typedef struct
122 : {
123 : WindowClause *wc;
124 : List *uniqueOrder; /* A List of unique ordering/partitioning
125 : * clauses per Window */
126 : } WindowClauseSortData;
127 :
128 : /* Passthrough data for standard_qp_callback */
129 : typedef struct
130 : {
131 : List *activeWindows; /* active windows, if any */
132 : grouping_sets_data *gset_data; /* grouping sets data, if any */
133 : SetOperationStmt *setop; /* parent set operation or NULL if not a
134 : * subquery belonging to a set operation */
135 : } standard_qp_extra;
136 :
137 : /* Local functions */
138 : static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
139 : static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
140 : static void grouping_planner(PlannerInfo *root, double tuple_fraction,
141 : SetOperationStmt *setops);
142 : static grouping_sets_data *preprocess_grouping_sets(PlannerInfo *root);
143 : static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
144 : int *tleref_to_colnum_map);
145 : static void preprocess_rowmarks(PlannerInfo *root);
146 : static double preprocess_limit(PlannerInfo *root,
147 : double tuple_fraction,
148 : int64 *offset_est, int64 *count_est);
149 : static List *preprocess_groupclause(PlannerInfo *root, List *force);
150 : static List *extract_rollup_sets(List *groupingSets);
151 : static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
152 : static void standard_qp_callback(PlannerInfo *root, void *extra);
153 : static double get_number_of_groups(PlannerInfo *root,
154 : double path_rows,
155 : grouping_sets_data *gd,
156 : List *target_list);
157 : static RelOptInfo *create_grouping_paths(PlannerInfo *root,
158 : RelOptInfo *input_rel,
159 : PathTarget *target,
160 : bool target_parallel_safe,
161 : grouping_sets_data *gd);
162 : static bool is_degenerate_grouping(PlannerInfo *root);
163 : static void create_degenerate_grouping_paths(PlannerInfo *root,
164 : RelOptInfo *input_rel,
165 : RelOptInfo *grouped_rel);
166 : static RelOptInfo *make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
167 : PathTarget *target, bool target_parallel_safe,
168 : Node *havingQual);
169 : static void create_ordinary_grouping_paths(PlannerInfo *root,
170 : RelOptInfo *input_rel,
171 : RelOptInfo *grouped_rel,
172 : const AggClauseCosts *agg_costs,
173 : grouping_sets_data *gd,
174 : GroupPathExtraData *extra,
175 : RelOptInfo **partially_grouped_rel_p);
176 : static void consider_groupingsets_paths(PlannerInfo *root,
177 : RelOptInfo *grouped_rel,
178 : Path *path,
179 : bool is_sorted,
180 : bool can_hash,
181 : grouping_sets_data *gd,
182 : const AggClauseCosts *agg_costs,
183 : double dNumGroups);
184 : static RelOptInfo *create_window_paths(PlannerInfo *root,
185 : RelOptInfo *input_rel,
186 : PathTarget *input_target,
187 : PathTarget *output_target,
188 : bool output_target_parallel_safe,
189 : WindowFuncLists *wflists,
190 : List *activeWindows);
191 : static void create_one_window_path(PlannerInfo *root,
192 : RelOptInfo *window_rel,
193 : Path *path,
194 : PathTarget *input_target,
195 : PathTarget *output_target,
196 : WindowFuncLists *wflists,
197 : List *activeWindows);
198 : static RelOptInfo *create_distinct_paths(PlannerInfo *root,
199 : RelOptInfo *input_rel,
200 : PathTarget *target);
201 : static void create_partial_distinct_paths(PlannerInfo *root,
202 : RelOptInfo *input_rel,
203 : RelOptInfo *final_distinct_rel,
204 : PathTarget *target);
205 : static RelOptInfo *create_final_distinct_paths(PlannerInfo *root,
206 : RelOptInfo *input_rel,
207 : RelOptInfo *distinct_rel);
208 : static List *get_useful_pathkeys_for_distinct(PlannerInfo *root,
209 : List *needed_pathkeys,
210 : List *path_pathkeys);
211 : static RelOptInfo *create_ordered_paths(PlannerInfo *root,
212 : RelOptInfo *input_rel,
213 : PathTarget *target,
214 : bool target_parallel_safe,
215 : double limit_tuples);
216 : static PathTarget *make_group_input_target(PlannerInfo *root,
217 : PathTarget *final_target);
218 : static PathTarget *make_partial_grouping_target(PlannerInfo *root,
219 : PathTarget *grouping_target,
220 : Node *havingQual);
221 : static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
222 : static void optimize_window_clauses(PlannerInfo *root,
223 : WindowFuncLists *wflists);
224 : static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
225 : static void name_active_windows(List *activeWindows);
226 : static PathTarget *make_window_input_target(PlannerInfo *root,
227 : PathTarget *final_target,
228 : List *activeWindows);
229 : static List *make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
230 : List *tlist);
231 : static PathTarget *make_sort_input_target(PlannerInfo *root,
232 : PathTarget *final_target,
233 : bool *have_postponed_srfs);
234 : static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
235 : List *targets, List *targets_contain_srfs);
236 : static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
237 : RelOptInfo *grouped_rel,
238 : RelOptInfo *partially_grouped_rel,
239 : const AggClauseCosts *agg_costs,
240 : grouping_sets_data *gd,
241 : GroupPathExtraData *extra);
242 : static RelOptInfo *create_partial_grouping_paths(PlannerInfo *root,
243 : RelOptInfo *grouped_rel,
244 : RelOptInfo *input_rel,
245 : grouping_sets_data *gd,
246 : GroupPathExtraData *extra,
247 : bool force_rel_creation);
248 : static Path *make_ordered_path(PlannerInfo *root,
249 : RelOptInfo *rel,
250 : Path *path,
251 : Path *cheapest_path,
252 : List *pathkeys,
253 : double limit_tuples);
254 : static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel);
255 : static bool can_partial_agg(PlannerInfo *root);
256 : static void apply_scanjoin_target_to_paths(PlannerInfo *root,
257 : RelOptInfo *rel,
258 : List *scanjoin_targets,
259 : List *scanjoin_targets_contain_srfs,
260 : bool scanjoin_target_parallel_safe,
261 : bool tlist_same_exprs);
262 : static void create_partitionwise_grouping_paths(PlannerInfo *root,
263 : RelOptInfo *input_rel,
264 : RelOptInfo *grouped_rel,
265 : RelOptInfo *partially_grouped_rel,
266 : const AggClauseCosts *agg_costs,
267 : grouping_sets_data *gd,
268 : PartitionwiseAggregateType patype,
269 : GroupPathExtraData *extra);
270 : static bool group_by_has_partkey(RelOptInfo *input_rel,
271 : List *targetList,
272 : List *groupClause);
273 : static int common_prefix_cmp(const void *a, const void *b);
274 : static List *generate_setop_child_grouplist(SetOperationStmt *op,
275 : List *targetlist);
276 : static void create_final_unique_paths(PlannerInfo *root, RelOptInfo *input_rel,
277 : List *sortPathkeys, List *groupClause,
278 : SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel);
279 : static void create_partial_unique_paths(PlannerInfo *root, RelOptInfo *input_rel,
280 : List *sortPathkeys, List *groupClause,
281 : SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel);
282 :
283 :
284 : /*****************************************************************************
285 : *
286 : * Query optimizer entry point
287 : *
288 : * Inputs:
289 : * parse: an analyzed-and-rewritten query tree for an optimizable statement
290 : * query_string: source text for the query tree (used for error reports)
291 : * cursorOptions: bitmask of CURSOR_OPT_XXX flags, see parsenodes.h
292 : * boundParams: passed-in parameter values, or NULL if none
293 : * es: ExplainState if being called from EXPLAIN, else NULL
294 : *
295 : * The result is a PlannedStmt tree.
296 : *
297 : * PARAM_EXTERN Param nodes within the parse tree can be replaced by Consts
298 : * using values from boundParams, if those values are marked PARAM_FLAG_CONST.
299 : * Parameter values not so marked are still relied on for estimation purposes.
300 : *
301 : * The ExplainState pointer is not currently used by the core planner, but it
302 : * is passed through to some planner hooks so that they can report information
303 : * back to EXPLAIN extension hooks.
304 : *
305 : * To support loadable plugins that monitor or modify planner behavior,
306 : * we provide a hook variable that lets a plugin get control before and
307 : * after the standard planning process. The plugin would normally call
308 : * standard_planner().
309 : *
310 : * Note to plugin authors: standard_planner() scribbles on its Query input,
311 : * so you'd better copy that data structure if you want to plan more than once.
312 : *
313 : *****************************************************************************/
314 : PlannedStmt *
315 234740 : planner(Query *parse, const char *query_string, int cursorOptions,
316 : ParamListInfo boundParams, ExplainState *es)
317 : {
318 : PlannedStmt *result;
319 :
320 234740 : if (planner_hook)
321 48718 : result = (*planner_hook) (parse, query_string, cursorOptions,
322 : boundParams, es);
323 : else
324 186022 : result = standard_planner(parse, query_string, cursorOptions,
325 : boundParams, es);
326 :
327 232304 : pgstat_report_plan_id(result->planId, false);
328 :
329 232304 : return result;
330 : }
331 :
332 : PlannedStmt *
333 234740 : standard_planner(Query *parse, const char *query_string, int cursorOptions,
334 : ParamListInfo boundParams, ExplainState *es)
335 : {
336 : PlannedStmt *result;
337 : PlannerGlobal *glob;
338 : double tuple_fraction;
339 : PlannerInfo *root;
340 : RelOptInfo *final_rel;
341 : Path *best_path;
342 : Plan *top_plan;
343 : ListCell *lp,
344 : *lr;
345 :
346 : /*
347 : * Set up global state for this planner invocation. This data is needed
348 : * across all levels of sub-Query that might exist in the given command,
349 : * so we keep it in a separate struct that's linked to by each per-Query
350 : * PlannerInfo.
351 : */
352 234740 : glob = makeNode(PlannerGlobal);
353 :
354 234740 : glob->boundParams = boundParams;
355 234740 : glob->subplans = NIL;
356 234740 : glob->subpaths = NIL;
357 234740 : glob->subroots = NIL;
358 234740 : glob->rewindPlanIDs = NULL;
359 234740 : glob->finalrtable = NIL;
360 234740 : glob->allRelids = NULL;
361 234740 : glob->prunableRelids = NULL;
362 234740 : glob->finalrteperminfos = NIL;
363 234740 : glob->finalrowmarks = NIL;
364 234740 : glob->resultRelations = NIL;
365 234740 : glob->appendRelations = NIL;
366 234740 : glob->partPruneInfos = NIL;
367 234740 : glob->relationOids = NIL;
368 234740 : glob->invalItems = NIL;
369 234740 : glob->paramExecTypes = NIL;
370 234740 : glob->lastPHId = 0;
371 234740 : glob->lastRowMarkId = 0;
372 234740 : glob->lastPlanNodeId = 0;
373 234740 : glob->transientPlan = false;
374 234740 : glob->dependsOnRole = false;
375 234740 : glob->partition_directory = NULL;
376 234740 : glob->rel_notnullatts_hash = NULL;
377 :
378 : /*
379 : * Assess whether it's feasible to use parallel mode for this query. We
380 : * can't do this in a standalone backend, or if the command will try to
381 : * modify any data, or if this is a cursor operation, or if GUCs are set
382 : * to values that don't permit parallelism, or if parallel-unsafe
383 : * functions are present in the query tree.
384 : *
385 : * (Note that we do allow CREATE TABLE AS, SELECT INTO, and CREATE
386 : * MATERIALIZED VIEW to use parallel plans, but this is safe only because
387 : * the command is writing into a completely new table which workers won't
388 : * be able to see. If the workers could see the table, the fact that
389 : * group locking would cause them to ignore the leader's heavyweight GIN
390 : * page locks would make this unsafe. We'll have to fix that somehow if
391 : * we want to allow parallel inserts in general; updates and deletes have
392 : * additional problems especially around combo CIDs.)
393 : *
394 : * For now, we don't try to use parallel mode if we're running inside a
395 : * parallel worker. We might eventually be able to relax this
396 : * restriction, but for now it seems best not to have parallel workers
397 : * trying to create their own parallel workers.
398 : */
399 234740 : if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
400 220434 : IsUnderPostmaster &&
401 220434 : parse->commandType == CMD_SELECT &&
402 176740 : !parse->hasModifyingCTE &&
403 176666 : max_parallel_workers_per_gather > 0 &&
404 176349 : !IsParallelWorker())
405 : {
406 : /* all the cheap tests pass, so scan the query tree */
407 176325 : glob->maxParallelHazard = max_parallel_hazard(parse);
408 176325 : glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE);
409 : }
410 : else
411 : {
412 : /* skip the query tree scan, just assume it's unsafe */
413 58415 : glob->maxParallelHazard = PROPARALLEL_UNSAFE;
414 58415 : glob->parallelModeOK = false;
415 : }
416 :
417 : /*
418 : * glob->parallelModeNeeded is normally set to false here and changed to
419 : * true during plan creation if a Gather or Gather Merge plan is actually
420 : * created (cf. create_gather_plan, create_gather_merge_plan).
421 : *
422 : * However, if debug_parallel_query = on or debug_parallel_query =
423 : * regress, then we impose parallel mode whenever it's safe to do so, even
424 : * if the final plan doesn't use parallelism. It's not safe to do so if
425 : * the query contains anything parallel-unsafe; parallelModeOK will be
426 : * false in that case. Note that parallelModeOK can't change after this
427 : * point. Otherwise, everything in the query is either parallel-safe or
428 : * parallel-restricted, and in either case it should be OK to impose
429 : * parallel-mode restrictions. If that ends up breaking something, then
430 : * either some function the user included in the query is incorrectly
431 : * labeled as parallel-safe or parallel-restricted when in reality it's
432 : * parallel-unsafe, or else the query planner itself has a bug.
433 : */
434 383018 : glob->parallelModeNeeded = glob->parallelModeOK &&
435 148278 : (debug_parallel_query != DEBUG_PARALLEL_OFF);
436 :
437 : /* Determine what fraction of the plan is likely to be scanned */
438 234740 : if (cursorOptions & CURSOR_OPT_FAST_PLAN)
439 : {
440 : /*
441 : * We have no real idea how many tuples the user will ultimately FETCH
442 : * from a cursor, but it is often the case that he doesn't want 'em
443 : * all, or would prefer a fast-start plan anyway so that he can
444 : * process some of the tuples sooner. Use a GUC parameter to decide
445 : * what fraction to optimize for.
446 : */
447 2356 : tuple_fraction = cursor_tuple_fraction;
448 :
449 : /*
450 : * We document cursor_tuple_fraction as simply being a fraction, which
451 : * means the edge cases 0 and 1 have to be treated specially here. We
452 : * convert 1 to 0 ("all the tuples") and 0 to a very small fraction.
453 : */
454 2356 : if (tuple_fraction >= 1.0)
455 0 : tuple_fraction = 0.0;
456 2356 : else if (tuple_fraction <= 0.0)
457 0 : tuple_fraction = 1e-10;
458 : }
459 : else
460 : {
461 : /* Default assumption is we need all the tuples */
462 232384 : tuple_fraction = 0.0;
463 : }
464 :
465 : /*
466 : * Compute the initial path generation strategy mask.
467 : *
468 : * Some strategies, such as PGS_FOREIGNJOIN, have no corresponding enable_*
469 : * GUC, and so the corresponding bits are always set in the default
470 : * strategy mask.
471 : *
472 : * It may seem surprising that enable_indexscan sets both PGS_INDEXSCAN
473 : * and PGS_INDEXONLYSCAN. However, the historical behavior of this GUC
474 : * corresponds to this exactly: enable_indexscan=off disables both
475 : * index-scan and index-only scan paths, whereas enable_indexonlyscan=off
476 : * converts the index-only scan paths that we would have considered into
477 : * index scan paths.
478 : */
479 234740 : glob->default_pgs_mask = PGS_APPEND | PGS_MERGE_APPEND | PGS_FOREIGNJOIN |
480 : PGS_GATHER | PGS_CONSIDER_NONPARTIAL;
481 234740 : if (enable_tidscan)
482 234740 : glob->default_pgs_mask |= PGS_TIDSCAN;
483 234740 : if (enable_seqscan)
484 222126 : glob->default_pgs_mask |= PGS_SEQSCAN;
485 234740 : if (enable_indexscan)
486 233401 : glob->default_pgs_mask |= PGS_INDEXSCAN | PGS_INDEXONLYSCAN;
487 234740 : if (enable_indexonlyscan)
488 233918 : glob->default_pgs_mask |= PGS_CONSIDER_INDEXONLY;
489 234740 : if (enable_bitmapscan)
490 228918 : glob->default_pgs_mask |= PGS_BITMAPSCAN;
491 234740 : if (enable_mergejoin)
492 : {
493 233621 : glob->default_pgs_mask |= PGS_MERGEJOIN_PLAIN;
494 233621 : if (enable_material)
495 233580 : glob->default_pgs_mask |= PGS_MERGEJOIN_MATERIALIZE;
496 : }
497 234740 : if (enable_nestloop)
498 : {
499 234542 : glob->default_pgs_mask |= PGS_NESTLOOP_PLAIN;
500 234542 : if (enable_material)
501 234428 : glob->default_pgs_mask |= PGS_NESTLOOP_MATERIALIZE;
502 234542 : if (enable_memoize)
503 234476 : glob->default_pgs_mask |= PGS_NESTLOOP_MEMOIZE;
504 : }
505 234740 : if (enable_hashjoin)
506 233408 : glob->default_pgs_mask |= PGS_HASHJOIN;
507 234740 : if (enable_gathermerge)
508 234740 : glob->default_pgs_mask |= PGS_GATHER_MERGE;
509 234740 : if (enable_partitionwise_join)
510 1251 : glob->default_pgs_mask |= PGS_CONSIDER_PARTITIONWISE;
511 :
512 : /* Allow plugins to take control after we've initialized "glob" */
513 234740 : if (planner_setup_hook)
514 0 : (*planner_setup_hook) (glob, parse, query_string, cursorOptions,
515 : &tuple_fraction, es);
516 :
517 : /* primary planning entry point (may recurse for subqueries) */
518 234740 : root = subquery_planner(glob, parse, NULL, NULL, false, tuple_fraction,
519 : NULL);
520 :
521 : /* Select best Path and turn it into a Plan */
522 232508 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
523 232508 : best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
524 :
525 232508 : top_plan = create_plan(root, best_path);
526 :
527 : /*
528 : * If creating a plan for a scrollable cursor, make sure it can run
529 : * backwards on demand. Add a Material node at the top at need.
530 : */
531 232304 : if (cursorOptions & CURSOR_OPT_SCROLL)
532 : {
533 133 : if (!ExecSupportsBackwardScan(top_plan))
534 16 : top_plan = materialize_finished_plan(top_plan);
535 : }
536 :
537 : /*
538 : * Optionally add a Gather node for testing purposes, provided this is
539 : * actually a safe thing to do.
540 : *
541 : * We can add Gather even when top_plan has parallel-safe initPlans, but
542 : * then we have to move the initPlans to the Gather node because of
543 : * SS_finalize_plan's limitations. That would cause cosmetic breakage of
544 : * regression tests when debug_parallel_query = regress, because initPlans
545 : * that would normally appear on the top_plan move to the Gather, causing
546 : * them to disappear from EXPLAIN output. That doesn't seem worth kluging
547 : * EXPLAIN to hide, so skip it when debug_parallel_query = regress.
548 : */
549 232304 : if (debug_parallel_query != DEBUG_PARALLEL_OFF &&
550 97 : top_plan->parallel_safe &&
551 64 : (top_plan->initPlan == NIL ||
552 0 : debug_parallel_query != DEBUG_PARALLEL_REGRESS))
553 : {
554 64 : Gather *gather = makeNode(Gather);
555 : Cost initplan_cost;
556 : bool unsafe_initplans;
557 :
558 64 : gather->plan.targetlist = top_plan->targetlist;
559 64 : gather->plan.qual = NIL;
560 64 : gather->plan.lefttree = top_plan;
561 64 : gather->plan.righttree = NULL;
562 64 : gather->num_workers = 1;
563 64 : gather->single_copy = true;
564 64 : gather->invisible = (debug_parallel_query == DEBUG_PARALLEL_REGRESS);
565 :
566 : /* Transfer any initPlans to the new top node */
567 64 : gather->plan.initPlan = top_plan->initPlan;
568 64 : top_plan->initPlan = NIL;
569 :
570 : /*
571 : * Since this Gather has no parallel-aware descendants to signal to,
572 : * we don't need a rescan Param.
573 : */
574 64 : gather->rescan_param = -1;
575 :
576 : /*
577 : * Ideally we'd use cost_gather here, but setting up dummy path data
578 : * to satisfy it doesn't seem much cleaner than knowing what it does.
579 : */
580 64 : gather->plan.startup_cost = top_plan->startup_cost +
581 : parallel_setup_cost;
582 64 : gather->plan.total_cost = top_plan->total_cost +
583 64 : parallel_setup_cost + parallel_tuple_cost * top_plan->plan_rows;
584 64 : gather->plan.plan_rows = top_plan->plan_rows;
585 64 : gather->plan.plan_width = top_plan->plan_width;
586 64 : gather->plan.parallel_aware = false;
587 64 : gather->plan.parallel_safe = false;
588 :
589 : /*
590 : * Delete the initplans' cost from top_plan. We needn't add it to the
591 : * Gather node, since the above coding already included it there.
592 : */
593 64 : SS_compute_initplan_cost(gather->plan.initPlan,
594 : &initplan_cost, &unsafe_initplans);
595 64 : top_plan->startup_cost -= initplan_cost;
596 64 : top_plan->total_cost -= initplan_cost;
597 :
598 : /* use parallel mode for parallel plans. */
599 64 : root->glob->parallelModeNeeded = true;
600 :
601 64 : top_plan = &gather->plan;
602 : }
603 :
604 : /*
605 : * If any Params were generated, run through the plan tree and compute
606 : * each plan node's extParam/allParam sets. Ideally we'd merge this into
607 : * set_plan_references' tree traversal, but for now it has to be separate
608 : * because we need to visit subplans before not after main plan.
609 : */
610 232304 : if (glob->paramExecTypes != NIL)
611 : {
612 : Assert(list_length(glob->subplans) == list_length(glob->subroots));
613 105346 : forboth(lp, glob->subplans, lr, glob->subroots)
614 : {
615 22217 : Plan *subplan = (Plan *) lfirst(lp);
616 22217 : PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
617 :
618 22217 : SS_finalize_plan(subroot, subplan);
619 : }
620 83129 : SS_finalize_plan(root, top_plan);
621 : }
622 :
623 : /* final cleanup of the plan */
624 : Assert(glob->finalrtable == NIL);
625 : Assert(glob->finalrteperminfos == NIL);
626 : Assert(glob->finalrowmarks == NIL);
627 : Assert(glob->resultRelations == NIL);
628 : Assert(glob->appendRelations == NIL);
629 232304 : top_plan = set_plan_references(root, top_plan);
630 : /* ... and the subplans (both regular subplans and initplans) */
631 : Assert(list_length(glob->subplans) == list_length(glob->subroots));
632 254521 : forboth(lp, glob->subplans, lr, glob->subroots)
633 : {
634 22217 : Plan *subplan = (Plan *) lfirst(lp);
635 22217 : PlannerInfo *subroot = lfirst_node(PlannerInfo, lr);
636 :
637 22217 : lfirst(lp) = set_plan_references(subroot, subplan);
638 : }
639 :
640 : /* build the PlannedStmt result */
641 232304 : result = makeNode(PlannedStmt);
642 :
643 232304 : result->commandType = parse->commandType;
644 232304 : result->queryId = parse->queryId;
645 232304 : result->planOrigin = PLAN_STMT_STANDARD;
646 232304 : result->hasReturning = (parse->returningList != NIL);
647 232304 : result->hasModifyingCTE = parse->hasModifyingCTE;
648 232304 : result->canSetTag = parse->canSetTag;
649 232304 : result->transientPlan = glob->transientPlan;
650 232304 : result->dependsOnRole = glob->dependsOnRole;
651 232304 : result->parallelModeNeeded = glob->parallelModeNeeded;
652 232304 : result->planTree = top_plan;
653 232304 : result->partPruneInfos = glob->partPruneInfos;
654 232304 : result->rtable = glob->finalrtable;
655 464608 : result->unprunableRelids = bms_difference(glob->allRelids,
656 232304 : glob->prunableRelids);
657 232304 : result->permInfos = glob->finalrteperminfos;
658 232304 : result->subrtinfos = glob->subrtinfos;
659 232304 : result->resultRelations = glob->resultRelations;
660 232304 : result->appendRelations = glob->appendRelations;
661 232304 : result->subplans = glob->subplans;
662 232304 : result->rewindPlanIDs = glob->rewindPlanIDs;
663 232304 : result->rowMarks = glob->finalrowmarks;
664 232304 : result->relationOids = glob->relationOids;
665 232304 : result->invalItems = glob->invalItems;
666 232304 : result->paramExecTypes = glob->paramExecTypes;
667 : /* utilityStmt should be null, but we might as well copy it */
668 232304 : result->utilityStmt = parse->utilityStmt;
669 232304 : result->elidedNodes = glob->elidedNodes;
670 232304 : result->stmt_location = parse->stmt_location;
671 232304 : result->stmt_len = parse->stmt_len;
672 :
673 232304 : result->jitFlags = PGJIT_NONE;
674 232304 : if (jit_enabled && jit_above_cost >= 0 &&
675 231921 : top_plan->total_cost > jit_above_cost)
676 : {
677 466 : result->jitFlags |= PGJIT_PERFORM;
678 :
679 : /*
680 : * Decide how much effort should be put into generating better code.
681 : */
682 466 : if (jit_optimize_above_cost >= 0 &&
683 466 : top_plan->total_cost > jit_optimize_above_cost)
684 184 : result->jitFlags |= PGJIT_OPT3;
685 466 : if (jit_inline_above_cost >= 0 &&
686 466 : top_plan->total_cost > jit_inline_above_cost)
687 184 : result->jitFlags |= PGJIT_INLINE;
688 :
689 : /*
690 : * Decide which operations should be JITed.
691 : */
692 466 : if (jit_expressions)
693 466 : result->jitFlags |= PGJIT_EXPR;
694 466 : if (jit_tuple_deforming)
695 466 : result->jitFlags |= PGJIT_DEFORM;
696 : }
697 :
698 : /* Allow plugins to take control before we discard "glob" */
699 232304 : if (planner_shutdown_hook)
700 0 : (*planner_shutdown_hook) (glob, parse, query_string, result);
701 :
702 232304 : if (glob->partition_directory != NULL)
703 6092 : DestroyPartitionDirectory(glob->partition_directory);
704 :
705 232304 : return result;
706 : }
707 :
708 :
709 : /*--------------------
710 : * subquery_planner
711 : * Invokes the planner on a subquery. We recurse to here for each
712 : * sub-SELECT found in the query tree.
713 : *
714 : * glob is the global state for the current planner run.
715 : * parse is the querytree produced by the parser & rewriter.
716 : * plan_name is the name to assign to this subplan (NULL at the top level).
717 : * parent_root is the immediate parent Query's info (NULL at the top level).
718 : * hasRecursion is true if this is a recursive WITH query.
719 : * tuple_fraction is the fraction of tuples we expect will be retrieved.
720 : * tuple_fraction is interpreted as explained for grouping_planner, below.
721 : * setops is used for set operation subqueries to provide the subquery with
722 : * the context in which it's being used so that Paths correctly sorted for the
723 : * set operation can be generated. NULL when not planning a set operation
724 : * child, or when a child of a set op that isn't interested in sorted input.
725 : *
726 : * Basically, this routine does the stuff that should only be done once
727 : * per Query object. It then calls grouping_planner. At one time,
728 : * grouping_planner could be invoked recursively on the same Query object;
729 : * that's not currently true, but we keep the separation between the two
730 : * routines anyway, in case we need it again someday.
731 : *
732 : * subquery_planner will be called recursively to handle sub-Query nodes
733 : * found within the query's expressions and rangetable.
734 : *
735 : * Returns the PlannerInfo struct ("root") that contains all data generated
736 : * while planning the subquery. In particular, the Path(s) attached to
737 : * the (UPPERREL_FINAL, NULL) upperrel represent our conclusions about the
738 : * cheapest way(s) to implement the query. The top level will select the
739 : * best Path and pass it through createplan.c to produce a finished Plan.
740 : *--------------------
741 : */
742 : PlannerInfo *
743 276976 : subquery_planner(PlannerGlobal *glob, Query *parse, char *plan_name,
744 : PlannerInfo *parent_root, bool hasRecursion,
745 : double tuple_fraction, SetOperationStmt *setops)
746 : {
747 : PlannerInfo *root;
748 : List *newWithCheckOptions;
749 : List *newHaving;
750 : bool hasOuterJoins;
751 : bool hasResultRTEs;
752 : RelOptInfo *final_rel;
753 : ListCell *l;
754 :
755 : /* Create a PlannerInfo data structure for this subquery */
756 276976 : root = makeNode(PlannerInfo);
757 276976 : root->parse = parse;
758 276976 : root->glob = glob;
759 276976 : root->query_level = parent_root ? parent_root->query_level + 1 : 1;
760 276976 : root->plan_name = plan_name;
761 276976 : root->parent_root = parent_root;
762 276976 : root->plan_params = NIL;
763 276976 : root->outer_params = NULL;
764 276976 : root->planner_cxt = CurrentMemoryContext;
765 276976 : root->init_plans = NIL;
766 276976 : root->cte_plan_ids = NIL;
767 276976 : root->multiexpr_params = NIL;
768 276976 : root->join_domains = NIL;
769 276976 : root->eq_classes = NIL;
770 276976 : root->ec_merging_done = false;
771 276976 : root->last_rinfo_serial = 0;
772 276976 : root->all_result_relids =
773 276976 : parse->resultRelation ? bms_make_singleton(parse->resultRelation) : NULL;
774 276976 : root->leaf_result_relids = NULL; /* we'll find out leaf-ness later */
775 276976 : root->append_rel_list = NIL;
776 276976 : root->row_identity_vars = NIL;
777 276976 : root->rowMarks = NIL;
778 276976 : memset(root->upper_rels, 0, sizeof(root->upper_rels));
779 276976 : memset(root->upper_targets, 0, sizeof(root->upper_targets));
780 276976 : root->processed_groupClause = NIL;
781 276976 : root->processed_distinctClause = NIL;
782 276976 : root->processed_tlist = NIL;
783 276976 : root->update_colnos = NIL;
784 276976 : root->grouping_map = NULL;
785 276976 : root->minmax_aggs = NIL;
786 276976 : root->qual_security_level = 0;
787 276976 : root->hasPseudoConstantQuals = false;
788 276976 : root->hasAlternativeSubPlans = false;
789 276976 : root->placeholdersFrozen = false;
790 276976 : root->hasRecursion = hasRecursion;
791 276976 : root->assumeReplanning = false;
792 276976 : if (hasRecursion)
793 471 : root->wt_param_id = assign_special_exec_param(root);
794 : else
795 276505 : root->wt_param_id = -1;
796 276976 : root->non_recursive_path = NULL;
797 :
798 : /*
799 : * Create the top-level join domain. This won't have valid contents until
800 : * deconstruct_jointree fills it in, but the node needs to exist before
801 : * that so we can build EquivalenceClasses referencing it.
802 : */
803 276976 : root->join_domains = list_make1(makeNode(JoinDomain));
804 :
805 : /*
806 : * If there is a WITH list, process each WITH query and either convert it
807 : * to RTE_SUBQUERY RTE(s) or build an initplan SubPlan structure for it.
808 : */
809 276976 : if (parse->cteList)
810 1522 : SS_process_ctes(root);
811 :
812 : /*
813 : * If it's a MERGE command, transform the joinlist as appropriate.
814 : */
815 276973 : transform_MERGE_to_join(parse);
816 :
817 : /*
818 : * Scan the rangetable for relation RTEs and retrieve the necessary
819 : * catalog information for each relation. Using this information, clear
820 : * the inh flag for any relation that has no children, collect not-null
821 : * attribute numbers for any relation that has column not-null
822 : * constraints, and expand virtual generated columns for any relation that
823 : * contains them. Note that this step does not descend into sublinks and
824 : * subqueries; if we pull up any sublinks or subqueries below, their
825 : * relation RTEs are processed just before pulling them up.
826 : */
827 276973 : parse = root->parse = preprocess_relation_rtes(root);
828 :
829 : /*
830 : * If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
831 : * that we don't need so many special cases to deal with that situation.
832 : */
833 276973 : replace_empty_jointree(parse);
834 :
835 : /*
836 : * Look for ANY and EXISTS SubLinks in WHERE and JOIN/ON clauses, and try
837 : * to transform them into joins. Note that this step does not descend
838 : * into subqueries; if we pull up any subqueries below, their SubLinks are
839 : * processed just before pulling them up.
840 : */
841 276973 : if (parse->hasSubLinks)
842 18647 : pull_up_sublinks(root);
843 :
844 : /*
845 : * Scan the rangetable for function RTEs, do const-simplification on them,
846 : * and then inline them if possible (producing subqueries that might get
847 : * pulled up next). Recursion issues here are handled in the same way as
848 : * for SubLinks.
849 : */
850 276973 : preprocess_function_rtes(root);
851 :
852 : /*
853 : * Check to see if any subqueries in the jointree can be merged into this
854 : * query.
855 : */
856 276970 : pull_up_subqueries(root);
857 :
858 : /*
859 : * If this is a simple UNION ALL query, flatten it into an appendrel. We
860 : * do this now because it requires applying pull_up_subqueries to the leaf
861 : * queries of the UNION ALL, which weren't touched above because they
862 : * weren't referenced by the jointree (they will be after we do this).
863 : */
864 276967 : if (parse->setOperations)
865 3507 : flatten_simple_union_all(root);
866 :
867 : /*
868 : * Survey the rangetable to see what kinds of entries are present. We can
869 : * skip some later processing if relevant SQL features are not used; for
870 : * example if there are no JOIN RTEs we can avoid the expense of doing
871 : * flatten_join_alias_vars(). This must be done after we have finished
872 : * adding rangetable entries, of course. (Note: actually, processing of
873 : * inherited or partitioned rels can cause RTEs for their child tables to
874 : * get added later; but those must all be RTE_RELATION entries, so they
875 : * don't invalidate the conclusions drawn here.)
876 : */
877 276967 : root->hasJoinRTEs = false;
878 276967 : root->hasLateralRTEs = false;
879 276967 : root->group_rtindex = 0;
880 276967 : hasOuterJoins = false;
881 276967 : hasResultRTEs = false;
882 756428 : foreach(l, parse->rtable)
883 : {
884 479461 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
885 :
886 479461 : switch (rte->rtekind)
887 : {
888 48634 : case RTE_JOIN:
889 48634 : root->hasJoinRTEs = true;
890 48634 : if (IS_OUTER_JOIN(rte->jointype))
891 25270 : hasOuterJoins = true;
892 48634 : break;
893 104658 : case RTE_RESULT:
894 104658 : hasResultRTEs = true;
895 104658 : break;
896 2538 : case RTE_GROUP:
897 : Assert(parse->hasGroupRTE);
898 2538 : root->group_rtindex = list_cell_number(parse->rtable, l) + 1;
899 2538 : break;
900 323631 : default:
901 : /* No work here for other RTE types */
902 323631 : break;
903 : }
904 :
905 479461 : if (rte->lateral)
906 5718 : root->hasLateralRTEs = true;
907 :
908 : /*
909 : * We can also determine the maximum security level required for any
910 : * securityQuals now. Addition of inheritance-child RTEs won't affect
911 : * this, because child tables don't have their own securityQuals; see
912 : * expand_single_inheritance_child().
913 : */
914 479461 : if (rte->securityQuals)
915 1443 : root->qual_security_level = Max(root->qual_security_level,
916 : list_length(rte->securityQuals));
917 : }
918 :
919 : /*
920 : * If we have now verified that the query target relation is
921 : * non-inheriting, mark it as a leaf target.
922 : */
923 276967 : if (parse->resultRelation)
924 : {
925 47217 : RangeTblEntry *rte = rt_fetch(parse->resultRelation, parse->rtable);
926 :
927 47217 : if (!rte->inh)
928 45741 : root->leaf_result_relids =
929 45741 : bms_make_singleton(parse->resultRelation);
930 : }
931 :
932 : /*
933 : * This would be a convenient time to check access permissions for all
934 : * relations mentioned in the query, since it would be better to fail now,
935 : * before doing any detailed planning. However, for historical reasons,
936 : * we leave this to be done at executor startup.
937 : *
938 : * Note, however, that we do need to check access permissions for any view
939 : * relations mentioned in the query, in order to prevent information being
940 : * leaked by selectivity estimation functions, which only check view owner
941 : * permissions on underlying tables (see all_rows_selectable() and its
942 : * callers). This is a little ugly, because it means that access
943 : * permissions for views will be checked twice, which is another reason
944 : * why it would be better to do all the ACL checks here.
945 : */
946 755836 : foreach(l, parse->rtable)
947 : {
948 479069 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
949 :
950 479069 : if (rte->perminfoindex != 0 &&
951 255971 : rte->relkind == RELKIND_VIEW)
952 : {
953 : RTEPermissionInfo *perminfo;
954 : bool result;
955 :
956 10908 : perminfo = getRTEPermissionInfo(parse->rteperminfos, rte);
957 10908 : result = ExecCheckOneRelPerms(perminfo);
958 10908 : if (!result)
959 200 : aclcheck_error(ACLCHECK_NO_PRIV, OBJECT_VIEW,
960 200 : get_rel_name(perminfo->relid));
961 : }
962 : }
963 :
964 : /*
965 : * Preprocess RowMark information. We need to do this after subquery
966 : * pullup, so that all base relations are present.
967 : */
968 276767 : preprocess_rowmarks(root);
969 :
970 : /*
971 : * Set hasHavingQual to remember if HAVING clause is present. Needed
972 : * because preprocess_expression will reduce a constant-true condition to
973 : * an empty qual list ... but "HAVING TRUE" is not a semantic no-op.
974 : */
975 276767 : root->hasHavingQual = (parse->havingQual != NULL);
976 :
977 : /*
978 : * Do expression preprocessing on targetlist and quals, as well as other
979 : * random expressions in the querytree. Note that we do not need to
980 : * handle sort/group expressions explicitly, because they are actually
981 : * part of the targetlist.
982 : */
983 274777 : parse->targetList = (List *)
984 276767 : preprocess_expression(root, (Node *) parse->targetList,
985 : EXPRKIND_TARGET);
986 :
987 274777 : newWithCheckOptions = NIL;
988 276339 : foreach(l, parse->withCheckOptions)
989 : {
990 1562 : WithCheckOption *wco = lfirst_node(WithCheckOption, l);
991 :
992 1562 : wco->qual = preprocess_expression(root, wco->qual,
993 : EXPRKIND_QUAL);
994 1562 : if (wco->qual != NULL)
995 1362 : newWithCheckOptions = lappend(newWithCheckOptions, wco);
996 : }
997 274777 : parse->withCheckOptions = newWithCheckOptions;
998 :
999 274777 : parse->returningList = (List *)
1000 274777 : preprocess_expression(root, (Node *) parse->returningList,
1001 : EXPRKIND_TARGET);
1002 :
1003 274777 : preprocess_qual_conditions(root, (Node *) parse->jointree);
1004 :
1005 274777 : parse->havingQual = preprocess_expression(root, parse->havingQual,
1006 : EXPRKIND_QUAL);
1007 :
1008 276244 : foreach(l, parse->windowClause)
1009 : {
1010 1467 : WindowClause *wc = lfirst_node(WindowClause, l);
1011 :
1012 : /* partitionClause/orderClause are sort/group expressions */
1013 1467 : wc->startOffset = preprocess_expression(root, wc->startOffset,
1014 : EXPRKIND_LIMIT);
1015 1467 : wc->endOffset = preprocess_expression(root, wc->endOffset,
1016 : EXPRKIND_LIMIT);
1017 : }
1018 :
1019 274777 : parse->limitOffset = preprocess_expression(root, parse->limitOffset,
1020 : EXPRKIND_LIMIT);
1021 274777 : parse->limitCount = preprocess_expression(root, parse->limitCount,
1022 : EXPRKIND_LIMIT);
1023 :
1024 274777 : if (parse->onConflict)
1025 : {
1026 2290 : parse->onConflict->arbiterElems = (List *)
1027 1145 : preprocess_expression(root,
1028 1145 : (Node *) parse->onConflict->arbiterElems,
1029 : EXPRKIND_ARBITER_ELEM);
1030 2290 : parse->onConflict->arbiterWhere =
1031 1145 : preprocess_expression(root,
1032 1145 : parse->onConflict->arbiterWhere,
1033 : EXPRKIND_QUAL);
1034 2290 : parse->onConflict->onConflictSet = (List *)
1035 1145 : preprocess_expression(root,
1036 1145 : (Node *) parse->onConflict->onConflictSet,
1037 : EXPRKIND_TARGET);
1038 1145 : parse->onConflict->onConflictWhere =
1039 1145 : preprocess_expression(root,
1040 1145 : parse->onConflict->onConflictWhere,
1041 : EXPRKIND_QUAL);
1042 : /* exclRelTlist contains only Vars, so no preprocessing needed */
1043 : }
1044 :
1045 276218 : foreach(l, parse->mergeActionList)
1046 : {
1047 1441 : MergeAction *action = (MergeAction *) lfirst(l);
1048 :
1049 1441 : action->targetList = (List *)
1050 1441 : preprocess_expression(root,
1051 1441 : (Node *) action->targetList,
1052 : EXPRKIND_TARGET);
1053 1441 : action->qual =
1054 1441 : preprocess_expression(root,
1055 : (Node *) action->qual,
1056 : EXPRKIND_QUAL);
1057 : }
1058 :
1059 274777 : parse->mergeJoinCondition =
1060 274777 : preprocess_expression(root, parse->mergeJoinCondition, EXPRKIND_QUAL);
1061 :
1062 274777 : root->append_rel_list = (List *)
1063 274777 : preprocess_expression(root, (Node *) root->append_rel_list,
1064 : EXPRKIND_APPINFO);
1065 :
1066 : /* Also need to preprocess expressions within RTEs */
1067 751352 : foreach(l, parse->rtable)
1068 : {
1069 476575 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
1070 : int kind;
1071 : ListCell *lcsq;
1072 :
1073 476575 : if (rte->rtekind == RTE_RELATION)
1074 : {
1075 247217 : if (rte->tablesample)
1076 114 : rte->tablesample = (TableSampleClause *)
1077 114 : preprocess_expression(root,
1078 114 : (Node *) rte->tablesample,
1079 : EXPRKIND_TABLESAMPLE);
1080 : }
1081 229358 : else if (rte->rtekind == RTE_SUBQUERY)
1082 : {
1083 : /*
1084 : * We don't want to do all preprocessing yet on the subquery's
1085 : * expressions, since that will happen when we plan it. But if it
1086 : * contains any join aliases of our level, those have to get
1087 : * expanded now, because planning of the subquery won't do it.
1088 : * That's only possible if the subquery is LATERAL.
1089 : */
1090 41680 : if (rte->lateral && root->hasJoinRTEs)
1091 920 : rte->subquery = (Query *)
1092 920 : flatten_join_alias_vars(root, root->parse,
1093 920 : (Node *) rte->subquery);
1094 : }
1095 187678 : else if (rte->rtekind == RTE_FUNCTION)
1096 : {
1097 : /* Preprocess the function expression(s) fully */
1098 26432 : kind = rte->lateral ? EXPRKIND_RTFUNC_LATERAL : EXPRKIND_RTFUNC;
1099 26432 : rte->functions = (List *)
1100 26432 : preprocess_expression(root, (Node *) rte->functions, kind);
1101 : }
1102 161246 : else if (rte->rtekind == RTE_TABLEFUNC)
1103 : {
1104 : /* Preprocess the function expression(s) fully */
1105 313 : kind = rte->lateral ? EXPRKIND_TABLEFUNC_LATERAL : EXPRKIND_TABLEFUNC;
1106 313 : rte->tablefunc = (TableFunc *)
1107 313 : preprocess_expression(root, (Node *) rte->tablefunc, kind);
1108 : }
1109 160933 : else if (rte->rtekind == RTE_VALUES)
1110 : {
1111 : /* Preprocess the values lists fully */
1112 4298 : kind = rte->lateral ? EXPRKIND_VALUES_LATERAL : EXPRKIND_VALUES;
1113 4298 : rte->values_lists = (List *)
1114 4298 : preprocess_expression(root, (Node *) rte->values_lists, kind);
1115 : }
1116 156635 : else if (rte->rtekind == RTE_GROUP)
1117 : {
1118 : /* Preprocess the groupexprs list fully */
1119 2538 : rte->groupexprs = (List *)
1120 2538 : preprocess_expression(root, (Node *) rte->groupexprs,
1121 : EXPRKIND_GROUPEXPR);
1122 : }
1123 :
1124 : /*
1125 : * Process each element of the securityQuals list as if it were a
1126 : * separate qual expression (as indeed it is). We need to do it this
1127 : * way to get proper canonicalization of AND/OR structure. Note that
1128 : * this converts each element into an implicit-AND sublist.
1129 : */
1130 478215 : foreach(lcsq, rte->securityQuals)
1131 : {
1132 1640 : lfirst(lcsq) = preprocess_expression(root,
1133 1640 : (Node *) lfirst(lcsq),
1134 : EXPRKIND_QUAL);
1135 : }
1136 : }
1137 :
1138 : /*
1139 : * Now that we are done preprocessing expressions, and in particular done
1140 : * flattening join alias variables, get rid of the joinaliasvars lists.
1141 : * They no longer match what expressions in the rest of the tree look
1142 : * like, because we have not preprocessed expressions in those lists (and
1143 : * do not want to; for example, expanding a SubLink there would result in
1144 : * a useless unreferenced subplan). Leaving them in place simply creates
1145 : * a hazard for later scans of the tree. We could try to prevent that by
1146 : * using QTW_IGNORE_JOINALIASES in every tree scan done after this point,
1147 : * but that doesn't sound very reliable.
1148 : */
1149 274777 : if (root->hasJoinRTEs)
1150 : {
1151 167969 : foreach(l, parse->rtable)
1152 : {
1153 138400 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
1154 :
1155 138400 : rte->joinaliasvars = NIL;
1156 : }
1157 : }
1158 :
1159 : /*
1160 : * Replace any Vars in the subquery's targetlist and havingQual that
1161 : * reference GROUP outputs with the underlying grouping expressions.
1162 : *
1163 : * Note that we need to perform this replacement after we've preprocessed
1164 : * the grouping expressions. This is to ensure that there is only one
1165 : * instance of SubPlan for each SubLink contained within the grouping
1166 : * expressions.
1167 : */
1168 274777 : if (parse->hasGroupRTE)
1169 : {
1170 2538 : parse->targetList = (List *)
1171 2538 : flatten_group_exprs(root, root->parse, (Node *) parse->targetList);
1172 2538 : parse->havingQual =
1173 2538 : flatten_group_exprs(root, root->parse, parse->havingQual);
1174 : }
1175 :
1176 : /* Constant-folding might have removed all set-returning functions */
1177 274777 : if (parse->hasTargetSRFs)
1178 6041 : parse->hasTargetSRFs = expression_returns_set((Node *) parse->targetList);
1179 :
1180 : /*
1181 : * If we have grouping sets, expand the groupingSets tree of this query to
1182 : * a flat list of grouping sets. We need to do this before optimizing
1183 : * HAVING, since we can't easily tell if there's an empty grouping set
1184 : * until we have this representation.
1185 : */
1186 274777 : if (parse->groupingSets)
1187 : {
1188 510 : parse->groupingSets =
1189 510 : expand_grouping_sets(parse->groupingSets, parse->groupDistinct, -1);
1190 : }
1191 :
1192 : /*
1193 : * In some cases we may want to transfer a HAVING clause into WHERE. We
1194 : * cannot do so if the HAVING clause contains aggregates (obviously) or
1195 : * volatile functions (since a HAVING clause is supposed to be executed
1196 : * only once per group). We also can't do this if there are any grouping
1197 : * sets and the clause references any columns that are nullable by the
1198 : * grouping sets; the nulled values of those columns are not available
1199 : * before the grouping step. (The test on groupClause might seem wrong,
1200 : * but it's okay: it's just an optimization to avoid running pull_varnos
1201 : * when there cannot be any Vars in the HAVING clause.)
1202 : *
1203 : * Also, it may be that the clause is so expensive to execute that we're
1204 : * better off doing it only once per group, despite the loss of
1205 : * selectivity. This is hard to estimate short of doing the entire
1206 : * planning process twice, so we use a heuristic: clauses containing
1207 : * subplans are left in HAVING. Otherwise, we move or copy the HAVING
1208 : * clause into WHERE, in hopes of eliminating tuples before aggregation
1209 : * instead of after.
1210 : *
1211 : * If the query has no empty grouping set then we can simply move such a
1212 : * clause into WHERE; any group that fails the clause will not be in the
1213 : * output because none of its tuples will reach the grouping or
1214 : * aggregation stage. Otherwise we have to keep the clause in HAVING to
1215 : * ensure that we don't emit a bogus aggregated row. But then the HAVING
1216 : * clause must be degenerate (variable-free), so we can copy it into WHERE
1217 : * so that query_planner() can use it in a gating Result node. (This could
1218 : * be done better, but it seems not worth optimizing.)
1219 : *
1220 : * Note that a HAVING clause may contain expressions that are not fully
1221 : * preprocessed. This can happen if these expressions are part of
1222 : * grouping items. In such cases, they are replaced with GROUP Vars in
1223 : * the parser and then replaced back after we're done with expression
1224 : * preprocessing on havingQual. This is not an issue if the clause
1225 : * remains in HAVING, because these expressions will be matched to lower
1226 : * target items in setrefs.c. However, if the clause is moved or copied
1227 : * into WHERE, we need to ensure that these expressions are fully
1228 : * preprocessed.
1229 : *
1230 : * Note that both havingQual and parse->jointree->quals are in
1231 : * implicitly-ANDed-list form at this point, even though they are declared
1232 : * as Node *.
1233 : */
1234 274777 : newHaving = NIL;
1235 275471 : foreach(l, (List *) parse->havingQual)
1236 : {
1237 694 : Node *havingclause = (Node *) lfirst(l);
1238 :
1239 952 : if (contain_agg_clause(havingclause) ||
1240 516 : contain_volatile_functions(havingclause) ||
1241 258 : contain_subplans(havingclause) ||
1242 318 : (parse->groupClause && parse->groupingSets &&
1243 60 : bms_is_member(root->group_rtindex, pull_varnos(root, havingclause))))
1244 : {
1245 : /* keep it in HAVING */
1246 472 : newHaving = lappend(newHaving, havingclause);
1247 : }
1248 222 : else if (parse->groupClause &&
1249 204 : (parse->groupingSets == NIL ||
1250 24 : (List *) linitial(parse->groupingSets) != NIL))
1251 198 : {
1252 : /* There is GROUP BY, but no empty grouping set */
1253 : Node *whereclause;
1254 :
1255 : /* Preprocess the HAVING clause fully */
1256 198 : whereclause = preprocess_expression(root, havingclause,
1257 : EXPRKIND_QUAL);
1258 : /* ... and move it to WHERE */
1259 198 : parse->jointree->quals = (Node *)
1260 198 : list_concat((List *) parse->jointree->quals,
1261 : (List *) whereclause);
1262 : }
1263 : else
1264 : {
1265 : /* There is an empty grouping set (perhaps implicitly) */
1266 : Node *whereclause;
1267 :
1268 : /* Preprocess the HAVING clause fully */
1269 24 : whereclause = preprocess_expression(root, copyObject(havingclause),
1270 : EXPRKIND_QUAL);
1271 : /* ... and put a copy in WHERE */
1272 48 : parse->jointree->quals = (Node *)
1273 24 : list_concat((List *) parse->jointree->quals,
1274 : (List *) whereclause);
1275 : /* ... and also keep it in HAVING */
1276 24 : newHaving = lappend(newHaving, havingclause);
1277 : }
1278 : }
1279 274777 : parse->havingQual = (Node *) newHaving;
1280 :
1281 : /*
1282 : * If we have any outer joins, try to reduce them to plain inner joins.
1283 : * This step is most easily done after we've done expression
1284 : * preprocessing.
1285 : */
1286 274777 : if (hasOuterJoins)
1287 17477 : reduce_outer_joins(root);
1288 :
1289 : /*
1290 : * If we have any RTE_RESULT relations, see if they can be deleted from
1291 : * the jointree. We also rely on this processing to flatten single-child
1292 : * FromExprs underneath outer joins. This step is most effectively done
1293 : * after we've done expression preprocessing and outer join reduction.
1294 : */
1295 274777 : if (hasResultRTEs || hasOuterJoins)
1296 119359 : remove_useless_result_rtes(root);
1297 :
1298 : /*
1299 : * Do the main planning.
1300 : */
1301 274777 : grouping_planner(root, tuple_fraction, setops);
1302 :
1303 : /*
1304 : * Capture the set of outer-level param IDs we have access to, for use in
1305 : * extParam/allParam calculations later.
1306 : */
1307 274741 : SS_identify_outer_params(root);
1308 :
1309 : /*
1310 : * If any initPlans were created in this query level, adjust the surviving
1311 : * Paths' costs and parallel-safety flags to account for them. The
1312 : * initPlans won't actually get attached to the plan tree till
1313 : * create_plan() runs, but we must include their effects now.
1314 : */
1315 274741 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1316 274741 : SS_charge_for_initplans(root, final_rel);
1317 :
1318 : /*
1319 : * Make sure we've identified the cheapest Path for the final rel. (By
1320 : * doing this here not in grouping_planner, we include initPlan costs in
1321 : * the decision, though it's unlikely that will change anything.)
1322 : */
1323 274741 : set_cheapest(final_rel);
1324 :
1325 274741 : return root;
1326 : }
1327 :
1328 : /*
1329 : * preprocess_expression
1330 : * Do subquery_planner's preprocessing work for an expression,
1331 : * which can be a targetlist, a WHERE clause (including JOIN/ON
1332 : * conditions), a HAVING clause, or a few other things.
1333 : */
1334 : static Node *
1335 2308052 : preprocess_expression(PlannerInfo *root, Node *expr, int kind)
1336 : {
1337 : /*
1338 : * Fall out quickly if expression is empty. This occurs often enough to
1339 : * be worth checking. Note that null->null is the correct conversion for
1340 : * implicit-AND result format, too.
1341 : */
1342 2308052 : if (expr == NULL)
1343 1821019 : return NULL;
1344 :
1345 : /*
1346 : * If the query has any join RTEs, replace join alias variables with
1347 : * base-relation variables. We must do this first, since any expressions
1348 : * we may extract from the joinaliasvars lists have not been preprocessed.
1349 : * For example, if we did this after sublink processing, sublinks expanded
1350 : * out from join aliases would not get processed. But we can skip this in
1351 : * non-lateral RTE functions, VALUES lists, and TABLESAMPLE clauses, since
1352 : * they can't contain any Vars of the current query level.
1353 : */
1354 487033 : if (root->hasJoinRTEs &&
1355 213471 : !(kind == EXPRKIND_RTFUNC ||
1356 106647 : kind == EXPRKIND_VALUES ||
1357 : kind == EXPRKIND_TABLESAMPLE ||
1358 : kind == EXPRKIND_TABLEFUNC))
1359 106638 : expr = flatten_join_alias_vars(root, root->parse, expr);
1360 :
1361 : /*
1362 : * Simplify constant expressions. For function RTEs, this was already
1363 : * done by preprocess_function_rtes. (But note we must do it again for
1364 : * EXPRKIND_RTFUNC_LATERAL, because those might by now contain
1365 : * un-simplified subexpressions inserted by flattening of subqueries or
1366 : * join alias variables.)
1367 : *
1368 : * Note: an essential effect of this is to convert named-argument function
1369 : * calls to positional notation and insert the current actual values of
1370 : * any default arguments for functions. To ensure that happens, we *must*
1371 : * process all expressions here. Previous PG versions sometimes skipped
1372 : * const-simplification if it didn't seem worth the trouble, but we can't
1373 : * do that anymore.
1374 : *
1375 : * Note: this also flattens nested AND and OR expressions into N-argument
1376 : * form. All processing of a qual expression after this point must be
1377 : * careful to maintain AND/OR flatness --- that is, do not generate a tree
1378 : * with AND directly under AND, nor OR directly under OR.
1379 : */
1380 487033 : if (kind != EXPRKIND_RTFUNC)
1381 464931 : expr = eval_const_expressions(root, expr);
1382 :
1383 : /*
1384 : * If it's a qual or havingQual, canonicalize it.
1385 : */
1386 485043 : if (kind == EXPRKIND_QUAL)
1387 : {
1388 175335 : expr = (Node *) canonicalize_qual((Expr *) expr, false);
1389 :
1390 : #ifdef OPTIMIZER_DEBUG
1391 : printf("After canonicalize_qual()\n");
1392 : pprint(expr);
1393 : #endif
1394 : }
1395 :
1396 : /*
1397 : * Check for ANY ScalarArrayOpExpr with Const arrays and set the
1398 : * hashfuncid of any that might execute more quickly by using hash lookups
1399 : * instead of a linear search.
1400 : */
1401 485043 : if (kind == EXPRKIND_QUAL || kind == EXPRKIND_TARGET)
1402 : {
1403 443929 : convert_saop_to_hashed_saop(expr);
1404 : }
1405 :
1406 : /* Expand SubLinks to SubPlans */
1407 485043 : if (root->parse->hasSubLinks)
1408 54550 : expr = SS_process_sublinks(root, expr, (kind == EXPRKIND_QUAL));
1409 :
1410 : /*
1411 : * XXX do not insert anything here unless you have grokked the comments in
1412 : * SS_replace_correlation_vars ...
1413 : */
1414 :
1415 : /* Replace uplevel vars with Param nodes (this IS possible in VALUES) */
1416 485043 : if (root->query_level > 1)
1417 96146 : expr = SS_replace_correlation_vars(root, expr);
1418 :
1419 : /*
1420 : * If it's a qual or havingQual, convert it to implicit-AND format. (We
1421 : * don't want to do this before eval_const_expressions, since the latter
1422 : * would be unable to simplify a top-level AND correctly. Also,
1423 : * SS_process_sublinks expects explicit-AND format.)
1424 : */
1425 485043 : if (kind == EXPRKIND_QUAL)
1426 175335 : expr = (Node *) make_ands_implicit((Expr *) expr);
1427 :
1428 485043 : return expr;
1429 : }
1430 :
1431 : /*
1432 : * preprocess_qual_conditions
1433 : * Recursively scan the query's jointree and do subquery_planner's
1434 : * preprocessing work on each qual condition found therein.
1435 : */
1436 : static void
1437 684938 : preprocess_qual_conditions(PlannerInfo *root, Node *jtnode)
1438 : {
1439 684938 : if (jtnode == NULL)
1440 0 : return;
1441 684938 : if (IsA(jtnode, RangeTblRef))
1442 : {
1443 : /* nothing to do here */
1444 : }
1445 335063 : else if (IsA(jtnode, FromExpr))
1446 : {
1447 282503 : FromExpr *f = (FromExpr *) jtnode;
1448 : ListCell *l;
1449 :
1450 587544 : foreach(l, f->fromlist)
1451 305041 : preprocess_qual_conditions(root, lfirst(l));
1452 :
1453 282503 : f->quals = preprocess_expression(root, f->quals, EXPRKIND_QUAL);
1454 : }
1455 52560 : else if (IsA(jtnode, JoinExpr))
1456 : {
1457 52560 : JoinExpr *j = (JoinExpr *) jtnode;
1458 :
1459 52560 : preprocess_qual_conditions(root, j->larg);
1460 52560 : preprocess_qual_conditions(root, j->rarg);
1461 :
1462 52560 : j->quals = preprocess_expression(root, j->quals, EXPRKIND_QUAL);
1463 : }
1464 : else
1465 0 : elog(ERROR, "unrecognized node type: %d",
1466 : (int) nodeTag(jtnode));
1467 : }
1468 :
1469 : /*
1470 : * preprocess_phv_expression
1471 : * Do preprocessing on a PlaceHolderVar expression that's been pulled up.
1472 : *
1473 : * If a LATERAL subquery references an output of another subquery, and that
1474 : * output must be wrapped in a PlaceHolderVar because of an intermediate outer
1475 : * join, then we'll push the PlaceHolderVar expression down into the subquery
1476 : * and later pull it back up during find_lateral_references, which runs after
1477 : * subquery_planner has preprocessed all the expressions that were in the
1478 : * current query level to start with. So we need to preprocess it then.
1479 : */
1480 : Expr *
1481 45 : preprocess_phv_expression(PlannerInfo *root, Expr *expr)
1482 : {
1483 45 : return (Expr *) preprocess_expression(root, (Node *) expr, EXPRKIND_PHV);
1484 : }
1485 :
1486 : /*--------------------
1487 : * grouping_planner
1488 : * Perform planning steps related to grouping, aggregation, etc.
1489 : *
1490 : * This function adds all required top-level processing to the scan/join
1491 : * Path(s) produced by query_planner.
1492 : *
1493 : * tuple_fraction is the fraction of tuples we expect will be retrieved.
1494 : * tuple_fraction is interpreted as follows:
1495 : * 0: expect all tuples to be retrieved (normal case)
1496 : * 0 < tuple_fraction < 1: expect the given fraction of tuples available
1497 : * from the plan to be retrieved
1498 : * tuple_fraction >= 1: tuple_fraction is the absolute number of tuples
1499 : * expected to be retrieved (ie, a LIMIT specification).
1500 : * setops is used for set operation subqueries to provide the subquery with
1501 : * the context in which it's being used so that Paths correctly sorted for the
1502 : * set operation can be generated. NULL when not planning a set operation
1503 : * child, or when a child of a set op that isn't interested in sorted input.
1504 : *
1505 : * Returns nothing; the useful output is in the Paths we attach to the
1506 : * (UPPERREL_FINAL, NULL) upperrel in *root. In addition,
1507 : * root->processed_tlist contains the final processed targetlist.
1508 : *
1509 : * Note that we have not done set_cheapest() on the final rel; it's convenient
1510 : * to leave this to the caller.
1511 : *--------------------
1512 : */
1513 : static void
1514 274777 : grouping_planner(PlannerInfo *root, double tuple_fraction,
1515 : SetOperationStmt *setops)
1516 : {
1517 274777 : Query *parse = root->parse;
1518 274777 : int64 offset_est = 0;
1519 274777 : int64 count_est = 0;
1520 274777 : double limit_tuples = -1.0;
1521 274777 : bool have_postponed_srfs = false;
1522 : PathTarget *final_target;
1523 : List *final_targets;
1524 : List *final_targets_contain_srfs;
1525 : bool final_target_parallel_safe;
1526 : RelOptInfo *current_rel;
1527 : RelOptInfo *final_rel;
1528 : FinalPathExtraData extra;
1529 : ListCell *lc;
1530 :
1531 : /* Tweak caller-supplied tuple_fraction if have LIMIT/OFFSET */
1532 274777 : if (parse->limitCount || parse->limitOffset)
1533 : {
1534 2625 : tuple_fraction = preprocess_limit(root, tuple_fraction,
1535 : &offset_est, &count_est);
1536 :
1537 : /*
1538 : * If we have a known LIMIT, and don't have an unknown OFFSET, we can
1539 : * estimate the effects of using a bounded sort.
1540 : */
1541 2625 : if (count_est > 0 && offset_est >= 0)
1542 2347 : limit_tuples = (double) count_est + (double) offset_est;
1543 : }
1544 :
1545 : /* Make tuple_fraction accessible to lower-level routines */
1546 274777 : root->tuple_fraction = tuple_fraction;
1547 :
1548 274777 : if (parse->setOperations)
1549 : {
1550 : /*
1551 : * Construct Paths for set operations. The results will not need any
1552 : * work except perhaps a top-level sort and/or LIMIT. Note that any
1553 : * special work for recursive unions is the responsibility of
1554 : * plan_set_operations.
1555 : */
1556 3111 : current_rel = plan_set_operations(root);
1557 :
1558 : /*
1559 : * We should not need to call preprocess_targetlist, since we must be
1560 : * in a SELECT query node. Instead, use the processed_tlist returned
1561 : * by plan_set_operations (since this tells whether it returned any
1562 : * resjunk columns!), and transfer any sort key information from the
1563 : * original tlist.
1564 : */
1565 : Assert(parse->commandType == CMD_SELECT);
1566 :
1567 : /* for safety, copy processed_tlist instead of modifying in-place */
1568 3108 : root->processed_tlist =
1569 3108 : postprocess_setop_tlist(copyObject(root->processed_tlist),
1570 : parse->targetList);
1571 :
1572 : /* Also extract the PathTarget form of the setop result tlist */
1573 3108 : final_target = current_rel->cheapest_total_path->pathtarget;
1574 :
1575 : /* And check whether it's parallel safe */
1576 : final_target_parallel_safe =
1577 3108 : is_parallel_safe(root, (Node *) final_target->exprs);
1578 :
1579 : /* The setop result tlist couldn't contain any SRFs */
1580 : Assert(!parse->hasTargetSRFs);
1581 3108 : final_targets = final_targets_contain_srfs = NIL;
1582 :
1583 : /*
1584 : * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
1585 : * checked already, but let's make sure).
1586 : */
1587 3108 : if (parse->rowMarks)
1588 0 : ereport(ERROR,
1589 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1590 : /*------
1591 : translator: %s is a SQL row locking clause such as FOR UPDATE */
1592 : errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT",
1593 : LCS_asString(linitial_node(RowMarkClause,
1594 : parse->rowMarks)->strength))));
1595 :
1596 : /*
1597 : * Calculate pathkeys that represent result ordering requirements
1598 : */
1599 : Assert(parse->distinctClause == NIL);
1600 3108 : root->sort_pathkeys = make_pathkeys_for_sortclauses(root,
1601 : parse->sortClause,
1602 : root->processed_tlist);
1603 : }
1604 : else
1605 : {
1606 : /* No set operations, do regular planning */
1607 : PathTarget *sort_input_target;
1608 : List *sort_input_targets;
1609 : List *sort_input_targets_contain_srfs;
1610 : bool sort_input_target_parallel_safe;
1611 : PathTarget *grouping_target;
1612 : List *grouping_targets;
1613 : List *grouping_targets_contain_srfs;
1614 : bool grouping_target_parallel_safe;
1615 : PathTarget *scanjoin_target;
1616 : List *scanjoin_targets;
1617 : List *scanjoin_targets_contain_srfs;
1618 : bool scanjoin_target_parallel_safe;
1619 : bool scanjoin_target_same_exprs;
1620 : bool have_grouping;
1621 271666 : WindowFuncLists *wflists = NULL;
1622 271666 : List *activeWindows = NIL;
1623 271666 : grouping_sets_data *gset_data = NULL;
1624 : standard_qp_extra qp_extra;
1625 :
1626 : /* A recursive query should always have setOperations */
1627 : Assert(!root->hasRecursion);
1628 :
1629 : /* Preprocess grouping sets and GROUP BY clause, if any */
1630 271666 : if (parse->groupingSets)
1631 : {
1632 510 : gset_data = preprocess_grouping_sets(root);
1633 : }
1634 271156 : else if (parse->groupClause)
1635 : {
1636 : /* Preprocess regular GROUP BY clause, if any */
1637 2067 : root->processed_groupClause = preprocess_groupclause(root, NIL);
1638 : }
1639 :
1640 : /*
1641 : * Preprocess targetlist. Note that much of the remaining planning
1642 : * work will be done with the PathTarget representation of tlists, but
1643 : * we must also maintain the full representation of the final tlist so
1644 : * that we can transfer its decoration (resnames etc) to the topmost
1645 : * tlist of the finished Plan. This is kept in processed_tlist.
1646 : */
1647 271663 : preprocess_targetlist(root);
1648 :
1649 : /*
1650 : * Mark all the aggregates with resolved aggtranstypes, and detect
1651 : * aggregates that are duplicates or can share transition state. We
1652 : * must do this before slicing and dicing the tlist into various
1653 : * pathtargets, else some copies of the Aggref nodes might escape
1654 : * being marked.
1655 : */
1656 271663 : if (parse->hasAggs)
1657 : {
1658 22596 : preprocess_aggrefs(root, (Node *) root->processed_tlist);
1659 22596 : preprocess_aggrefs(root, (Node *) parse->havingQual);
1660 : }
1661 :
1662 : /*
1663 : * Locate any window functions in the tlist. (We don't need to look
1664 : * anywhere else, since expressions used in ORDER BY will be in there
1665 : * too.) Note that they could all have been eliminated by constant
1666 : * folding, in which case we don't need to do any more work.
1667 : */
1668 271663 : if (parse->hasWindowFuncs)
1669 : {
1670 1341 : wflists = find_window_functions((Node *) root->processed_tlist,
1671 1341 : list_length(parse->windowClause));
1672 1341 : if (wflists->numWindowFuncs > 0)
1673 : {
1674 : /*
1675 : * See if any modifications can be made to each WindowClause
1676 : * to allow the executor to execute the WindowFuncs more
1677 : * quickly.
1678 : */
1679 1338 : optimize_window_clauses(root, wflists);
1680 :
1681 : /* Extract the list of windows actually in use. */
1682 1338 : activeWindows = select_active_windows(root, wflists);
1683 :
1684 : /* Make sure they all have names, for EXPLAIN's use. */
1685 1338 : name_active_windows(activeWindows);
1686 : }
1687 : else
1688 3 : parse->hasWindowFuncs = false;
1689 : }
1690 :
1691 : /*
1692 : * Preprocess MIN/MAX aggregates, if any. Note: be careful about
1693 : * adding logic between here and the query_planner() call. Anything
1694 : * that is needed in MIN/MAX-optimizable cases will have to be
1695 : * duplicated in planagg.c.
1696 : */
1697 271663 : if (parse->hasAggs)
1698 22596 : preprocess_minmax_aggregates(root);
1699 :
1700 : /*
1701 : * Figure out whether there's a hard limit on the number of rows that
1702 : * query_planner's result subplan needs to return. Even if we know a
1703 : * hard limit overall, it doesn't apply if the query has any
1704 : * grouping/aggregation operations, or SRFs in the tlist.
1705 : */
1706 271663 : if (parse->groupClause ||
1707 269128 : parse->groupingSets ||
1708 269089 : parse->distinctClause ||
1709 267599 : parse->hasAggs ||
1710 247223 : parse->hasWindowFuncs ||
1711 245957 : parse->hasTargetSRFs ||
1712 240175 : root->hasHavingQual)
1713 31500 : root->limit_tuples = -1.0;
1714 : else
1715 240163 : root->limit_tuples = limit_tuples;
1716 :
1717 : /* Set up data needed by standard_qp_callback */
1718 271663 : qp_extra.activeWindows = activeWindows;
1719 271663 : qp_extra.gset_data = gset_data;
1720 :
1721 : /*
1722 : * If we're a subquery for a set operation, store the SetOperationStmt
1723 : * in qp_extra.
1724 : */
1725 271663 : qp_extra.setop = setops;
1726 :
1727 : /*
1728 : * Generate the best unsorted and presorted paths for the scan/join
1729 : * portion of this Query, ie the processing represented by the
1730 : * FROM/WHERE clauses. (Note there may not be any presorted paths.)
1731 : * We also generate (in standard_qp_callback) pathkey representations
1732 : * of the query's sort clause, distinct clause, etc.
1733 : */
1734 271663 : current_rel = query_planner(root, standard_qp_callback, &qp_extra);
1735 :
1736 : /*
1737 : * Convert the query's result tlist into PathTarget format.
1738 : *
1739 : * Note: this cannot be done before query_planner() has performed
1740 : * appendrel expansion, because that might add resjunk entries to
1741 : * root->processed_tlist. Waiting till afterwards is also helpful
1742 : * because the target width estimates can use per-Var width numbers
1743 : * that were obtained within query_planner().
1744 : */
1745 271636 : final_target = create_pathtarget(root, root->processed_tlist);
1746 : final_target_parallel_safe =
1747 271636 : is_parallel_safe(root, (Node *) final_target->exprs);
1748 :
1749 : /*
1750 : * If ORDER BY was given, consider whether we should use a post-sort
1751 : * projection, and compute the adjusted target for preceding steps if
1752 : * so.
1753 : */
1754 271636 : if (parse->sortClause)
1755 : {
1756 36082 : sort_input_target = make_sort_input_target(root,
1757 : final_target,
1758 : &have_postponed_srfs);
1759 : sort_input_target_parallel_safe =
1760 36082 : is_parallel_safe(root, (Node *) sort_input_target->exprs);
1761 : }
1762 : else
1763 : {
1764 235554 : sort_input_target = final_target;
1765 235554 : sort_input_target_parallel_safe = final_target_parallel_safe;
1766 : }
1767 :
1768 : /*
1769 : * If we have window functions to deal with, the output from any
1770 : * grouping step needs to be what the window functions want;
1771 : * otherwise, it should be sort_input_target.
1772 : */
1773 271636 : if (activeWindows)
1774 : {
1775 1338 : grouping_target = make_window_input_target(root,
1776 : final_target,
1777 : activeWindows);
1778 : grouping_target_parallel_safe =
1779 1338 : is_parallel_safe(root, (Node *) grouping_target->exprs);
1780 : }
1781 : else
1782 : {
1783 270298 : grouping_target = sort_input_target;
1784 270298 : grouping_target_parallel_safe = sort_input_target_parallel_safe;
1785 : }
1786 :
1787 : /*
1788 : * If we have grouping or aggregation to do, the topmost scan/join
1789 : * plan node must emit what the grouping step wants; otherwise, it
1790 : * should emit grouping_target.
1791 : */
1792 269101 : have_grouping = (parse->groupClause || parse->groupingSets ||
1793 540737 : parse->hasAggs || root->hasHavingQual);
1794 271636 : if (have_grouping)
1795 : {
1796 22981 : scanjoin_target = make_group_input_target(root, final_target);
1797 : scanjoin_target_parallel_safe =
1798 22981 : is_parallel_safe(root, (Node *) scanjoin_target->exprs);
1799 : }
1800 : else
1801 : {
1802 248655 : scanjoin_target = grouping_target;
1803 248655 : scanjoin_target_parallel_safe = grouping_target_parallel_safe;
1804 : }
1805 :
1806 : /*
1807 : * If there are any SRFs in the targetlist, we must separate each of
1808 : * these PathTargets into SRF-computing and SRF-free targets. Replace
1809 : * each of the named targets with a SRF-free version, and remember the
1810 : * list of additional projection steps we need to add afterwards.
1811 : */
1812 271636 : if (parse->hasTargetSRFs)
1813 : {
1814 : /* final_target doesn't recompute any SRFs in sort_input_target */
1815 6041 : split_pathtarget_at_srfs(root, final_target, sort_input_target,
1816 : &final_targets,
1817 : &final_targets_contain_srfs);
1818 6041 : final_target = linitial_node(PathTarget, final_targets);
1819 : Assert(!linitial_int(final_targets_contain_srfs));
1820 : /* likewise for sort_input_target vs. grouping_target */
1821 6041 : split_pathtarget_at_srfs(root, sort_input_target, grouping_target,
1822 : &sort_input_targets,
1823 : &sort_input_targets_contain_srfs);
1824 6041 : sort_input_target = linitial_node(PathTarget, sort_input_targets);
1825 : Assert(!linitial_int(sort_input_targets_contain_srfs));
1826 : /* likewise for grouping_target vs. scanjoin_target */
1827 6041 : split_pathtarget_at_srfs_grouping(root,
1828 : grouping_target, scanjoin_target,
1829 : &grouping_targets,
1830 : &grouping_targets_contain_srfs);
1831 6041 : grouping_target = linitial_node(PathTarget, grouping_targets);
1832 : Assert(!linitial_int(grouping_targets_contain_srfs));
1833 : /* scanjoin_target will not have any SRFs precomputed for it */
1834 6041 : split_pathtarget_at_srfs(root, scanjoin_target, NULL,
1835 : &scanjoin_targets,
1836 : &scanjoin_targets_contain_srfs);
1837 6041 : scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
1838 : Assert(!linitial_int(scanjoin_targets_contain_srfs));
1839 : }
1840 : else
1841 : {
1842 : /* initialize lists; for most of these, dummy values are OK */
1843 265595 : final_targets = final_targets_contain_srfs = NIL;
1844 265595 : sort_input_targets = sort_input_targets_contain_srfs = NIL;
1845 265595 : grouping_targets = grouping_targets_contain_srfs = NIL;
1846 265595 : scanjoin_targets = list_make1(scanjoin_target);
1847 265595 : scanjoin_targets_contain_srfs = NIL;
1848 : }
1849 :
1850 : /* Apply scan/join target. */
1851 271636 : scanjoin_target_same_exprs = list_length(scanjoin_targets) == 1
1852 271636 : && equal(scanjoin_target->exprs, current_rel->reltarget->exprs);
1853 271636 : apply_scanjoin_target_to_paths(root, current_rel, scanjoin_targets,
1854 : scanjoin_targets_contain_srfs,
1855 : scanjoin_target_parallel_safe,
1856 : scanjoin_target_same_exprs);
1857 :
1858 : /*
1859 : * Save the various upper-rel PathTargets we just computed into
1860 : * root->upper_targets[]. The core code doesn't use this, but it
1861 : * provides a convenient place for extensions to get at the info. For
1862 : * consistency, we save all the intermediate targets, even though some
1863 : * of the corresponding upperrels might not be needed for this query.
1864 : */
1865 271636 : root->upper_targets[UPPERREL_FINAL] = final_target;
1866 271636 : root->upper_targets[UPPERREL_ORDERED] = final_target;
1867 271636 : root->upper_targets[UPPERREL_DISTINCT] = sort_input_target;
1868 271636 : root->upper_targets[UPPERREL_PARTIAL_DISTINCT] = sort_input_target;
1869 271636 : root->upper_targets[UPPERREL_WINDOW] = sort_input_target;
1870 271636 : root->upper_targets[UPPERREL_GROUP_AGG] = grouping_target;
1871 :
1872 : /*
1873 : * If we have grouping and/or aggregation, consider ways to implement
1874 : * that. We build a new upperrel representing the output of this
1875 : * phase.
1876 : */
1877 271636 : if (have_grouping)
1878 : {
1879 22981 : current_rel = create_grouping_paths(root,
1880 : current_rel,
1881 : grouping_target,
1882 : grouping_target_parallel_safe,
1883 : gset_data);
1884 : /* Fix things up if grouping_target contains SRFs */
1885 22978 : if (parse->hasTargetSRFs)
1886 238 : adjust_paths_for_srfs(root, current_rel,
1887 : grouping_targets,
1888 : grouping_targets_contain_srfs);
1889 : }
1890 :
1891 : /*
1892 : * If we have window functions, consider ways to implement those. We
1893 : * build a new upperrel representing the output of this phase.
1894 : */
1895 271633 : if (activeWindows)
1896 : {
1897 1338 : current_rel = create_window_paths(root,
1898 : current_rel,
1899 : grouping_target,
1900 : sort_input_target,
1901 : sort_input_target_parallel_safe,
1902 : wflists,
1903 : activeWindows);
1904 : /* Fix things up if sort_input_target contains SRFs */
1905 1338 : if (parse->hasTargetSRFs)
1906 6 : adjust_paths_for_srfs(root, current_rel,
1907 : sort_input_targets,
1908 : sort_input_targets_contain_srfs);
1909 : }
1910 :
1911 : /*
1912 : * If there is a DISTINCT clause, consider ways to implement that. We
1913 : * build a new upperrel representing the output of this phase.
1914 : */
1915 271633 : if (parse->distinctClause)
1916 : {
1917 1507 : current_rel = create_distinct_paths(root,
1918 : current_rel,
1919 : sort_input_target);
1920 : }
1921 : } /* end of if (setOperations) */
1922 :
1923 : /*
1924 : * If ORDER BY was given, consider ways to implement that, and generate a
1925 : * new upperrel containing only paths that emit the correct ordering and
1926 : * project the correct final_target. We can apply the original
1927 : * limit_tuples limit in sort costing here, but only if there are no
1928 : * postponed SRFs.
1929 : */
1930 274741 : if (parse->sortClause)
1931 : {
1932 38068 : current_rel = create_ordered_paths(root,
1933 : current_rel,
1934 : final_target,
1935 : final_target_parallel_safe,
1936 : have_postponed_srfs ? -1.0 :
1937 : limit_tuples);
1938 : /* Fix things up if final_target contains SRFs */
1939 38068 : if (parse->hasTargetSRFs)
1940 110 : adjust_paths_for_srfs(root, current_rel,
1941 : final_targets,
1942 : final_targets_contain_srfs);
1943 : }
1944 :
1945 : /*
1946 : * Now we are prepared to build the final-output upperrel.
1947 : */
1948 274741 : final_rel = fetch_upper_rel(root, UPPERREL_FINAL, NULL);
1949 :
1950 : /*
1951 : * If the input rel is marked consider_parallel and there's nothing that's
1952 : * not parallel-safe in the LIMIT clause, then the final_rel can be marked
1953 : * consider_parallel as well. Note that if the query has rowMarks or is
1954 : * not a SELECT, consider_parallel will be false for every relation in the
1955 : * query.
1956 : */
1957 365259 : if (current_rel->consider_parallel &&
1958 181024 : is_parallel_safe(root, parse->limitOffset) &&
1959 90506 : is_parallel_safe(root, parse->limitCount))
1960 90503 : final_rel->consider_parallel = true;
1961 :
1962 : /*
1963 : * If the current_rel belongs to a single FDW, so does the final_rel.
1964 : */
1965 274741 : final_rel->serverid = current_rel->serverid;
1966 274741 : final_rel->userid = current_rel->userid;
1967 274741 : final_rel->useridiscurrent = current_rel->useridiscurrent;
1968 274741 : final_rel->fdwroutine = current_rel->fdwroutine;
1969 :
1970 : /*
1971 : * Generate paths for the final_rel. Insert all surviving paths, with
1972 : * LockRows, Limit, and/or ModifyTable steps added if needed.
1973 : */
1974 560113 : foreach(lc, current_rel->pathlist)
1975 : {
1976 285372 : Path *path = (Path *) lfirst(lc);
1977 :
1978 : /*
1979 : * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
1980 : * (Note: we intentionally test parse->rowMarks not root->rowMarks
1981 : * here. If there are only non-locking rowmarks, they should be
1982 : * handled by the ModifyTable node instead. However, root->rowMarks
1983 : * is what goes into the LockRows node.)
1984 : */
1985 285372 : if (parse->rowMarks)
1986 : {
1987 7030 : path = (Path *) create_lockrows_path(root, final_rel, path,
1988 : root->rowMarks,
1989 : assign_special_exec_param(root));
1990 : }
1991 :
1992 : /*
1993 : * If there is a LIMIT/OFFSET clause, add the LIMIT node.
1994 : */
1995 285372 : if (limit_needed(parse))
1996 : {
1997 3135 : path = (Path *) create_limit_path(root, final_rel, path,
1998 : parse->limitOffset,
1999 : parse->limitCount,
2000 : parse->limitOption,
2001 : offset_est, count_est);
2002 : }
2003 :
2004 : /*
2005 : * If this is an INSERT/UPDATE/DELETE/MERGE, add the ModifyTable node.
2006 : */
2007 285372 : if (parse->commandType != CMD_SELECT)
2008 : {
2009 : Index rootRelation;
2010 46950 : List *resultRelations = NIL;
2011 46950 : List *updateColnosLists = NIL;
2012 46950 : List *withCheckOptionLists = NIL;
2013 46950 : List *returningLists = NIL;
2014 46950 : List *mergeActionLists = NIL;
2015 46950 : List *mergeJoinConditions = NIL;
2016 : List *rowMarks;
2017 :
2018 46950 : if (bms_membership(root->all_result_relids) == BMS_MULTIPLE)
2019 : {
2020 : /* Inherited UPDATE/DELETE/MERGE */
2021 1458 : RelOptInfo *top_result_rel = find_base_rel(root,
2022 : parse->resultRelation);
2023 1458 : int resultRelation = -1;
2024 :
2025 : /* Pass the root result rel forward to the executor. */
2026 1458 : rootRelation = parse->resultRelation;
2027 :
2028 : /* Add only leaf children to ModifyTable. */
2029 4237 : while ((resultRelation = bms_next_member(root->leaf_result_relids,
2030 4237 : resultRelation)) >= 0)
2031 : {
2032 2779 : RelOptInfo *this_result_rel = find_base_rel(root,
2033 : resultRelation);
2034 :
2035 : /*
2036 : * Also exclude any leaf rels that have turned dummy since
2037 : * being added to the list, for example, by being excluded
2038 : * by constraint exclusion.
2039 : */
2040 2779 : if (IS_DUMMY_REL(this_result_rel))
2041 90 : continue;
2042 :
2043 : /* Build per-target-rel lists needed by ModifyTable */
2044 2689 : resultRelations = lappend_int(resultRelations,
2045 : resultRelation);
2046 2689 : if (parse->commandType == CMD_UPDATE)
2047 : {
2048 1848 : List *update_colnos = root->update_colnos;
2049 :
2050 1848 : if (this_result_rel != top_result_rel)
2051 : update_colnos =
2052 1848 : adjust_inherited_attnums_multilevel(root,
2053 : update_colnos,
2054 : this_result_rel->relid,
2055 : top_result_rel->relid);
2056 1848 : updateColnosLists = lappend(updateColnosLists,
2057 : update_colnos);
2058 : }
2059 2689 : if (parse->withCheckOptions)
2060 : {
2061 252 : List *withCheckOptions = parse->withCheckOptions;
2062 :
2063 252 : if (this_result_rel != top_result_rel)
2064 : withCheckOptions = (List *)
2065 252 : adjust_appendrel_attrs_multilevel(root,
2066 : (Node *) withCheckOptions,
2067 : this_result_rel,
2068 : top_result_rel);
2069 252 : withCheckOptionLists = lappend(withCheckOptionLists,
2070 : withCheckOptions);
2071 : }
2072 2689 : if (parse->returningList)
2073 : {
2074 423 : List *returningList = parse->returningList;
2075 :
2076 423 : if (this_result_rel != top_result_rel)
2077 : returningList = (List *)
2078 423 : adjust_appendrel_attrs_multilevel(root,
2079 : (Node *) returningList,
2080 : this_result_rel,
2081 : top_result_rel);
2082 423 : returningLists = lappend(returningLists,
2083 : returningList);
2084 : }
2085 2689 : if (parse->mergeActionList)
2086 : {
2087 : ListCell *l;
2088 271 : List *mergeActionList = NIL;
2089 :
2090 : /*
2091 : * Copy MergeActions and translate stuff that
2092 : * references attribute numbers.
2093 : */
2094 846 : foreach(l, parse->mergeActionList)
2095 : {
2096 575 : MergeAction *action = lfirst(l),
2097 575 : *leaf_action = copyObject(action);
2098 :
2099 575 : leaf_action->qual =
2100 575 : adjust_appendrel_attrs_multilevel(root,
2101 : (Node *) action->qual,
2102 : this_result_rel,
2103 : top_result_rel);
2104 575 : leaf_action->targetList = (List *)
2105 575 : adjust_appendrel_attrs_multilevel(root,
2106 575 : (Node *) action->targetList,
2107 : this_result_rel,
2108 : top_result_rel);
2109 575 : if (leaf_action->commandType == CMD_UPDATE)
2110 321 : leaf_action->updateColnos =
2111 321 : adjust_inherited_attnums_multilevel(root,
2112 : action->updateColnos,
2113 : this_result_rel->relid,
2114 : top_result_rel->relid);
2115 575 : mergeActionList = lappend(mergeActionList,
2116 : leaf_action);
2117 : }
2118 :
2119 271 : mergeActionLists = lappend(mergeActionLists,
2120 : mergeActionList);
2121 : }
2122 2689 : if (parse->commandType == CMD_MERGE)
2123 : {
2124 271 : Node *mergeJoinCondition = parse->mergeJoinCondition;
2125 :
2126 271 : if (this_result_rel != top_result_rel)
2127 : mergeJoinCondition =
2128 271 : adjust_appendrel_attrs_multilevel(root,
2129 : mergeJoinCondition,
2130 : this_result_rel,
2131 : top_result_rel);
2132 271 : mergeJoinConditions = lappend(mergeJoinConditions,
2133 : mergeJoinCondition);
2134 : }
2135 : }
2136 :
2137 1458 : if (resultRelations == NIL)
2138 : {
2139 : /*
2140 : * We managed to exclude every child rel, so generate a
2141 : * dummy one-relation plan using info for the top target
2142 : * rel (even though that may not be a leaf target).
2143 : * Although it's clear that no data will be updated or
2144 : * deleted, we still need to have a ModifyTable node so
2145 : * that any statement triggers will be executed. (This
2146 : * could be cleaner if we fixed nodeModifyTable.c to allow
2147 : * zero target relations, but that probably wouldn't be a
2148 : * net win.)
2149 : */
2150 18 : resultRelations = list_make1_int(parse->resultRelation);
2151 18 : if (parse->commandType == CMD_UPDATE)
2152 16 : updateColnosLists = list_make1(root->update_colnos);
2153 18 : if (parse->withCheckOptions)
2154 0 : withCheckOptionLists = list_make1(parse->withCheckOptions);
2155 18 : if (parse->returningList)
2156 9 : returningLists = list_make1(parse->returningList);
2157 18 : if (parse->mergeActionList)
2158 1 : mergeActionLists = list_make1(parse->mergeActionList);
2159 18 : if (parse->commandType == CMD_MERGE)
2160 1 : mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2161 : }
2162 : }
2163 : else
2164 : {
2165 : /* Single-relation INSERT/UPDATE/DELETE/MERGE. */
2166 45492 : rootRelation = 0; /* there's no separate root rel */
2167 45492 : resultRelations = list_make1_int(parse->resultRelation);
2168 45492 : if (parse->commandType == CMD_UPDATE)
2169 6360 : updateColnosLists = list_make1(root->update_colnos);
2170 45492 : if (parse->withCheckOptions)
2171 547 : withCheckOptionLists = list_make1(parse->withCheckOptions);
2172 45492 : if (parse->returningList)
2173 1474 : returningLists = list_make1(parse->returningList);
2174 45492 : if (parse->mergeActionList)
2175 801 : mergeActionLists = list_make1(parse->mergeActionList);
2176 45492 : if (parse->commandType == CMD_MERGE)
2177 801 : mergeJoinConditions = list_make1(parse->mergeJoinCondition);
2178 : }
2179 :
2180 : /*
2181 : * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
2182 : * will have dealt with fetching non-locked marked rows, else we
2183 : * need to have ModifyTable do that.
2184 : */
2185 46950 : if (parse->rowMarks)
2186 0 : rowMarks = NIL;
2187 : else
2188 46950 : rowMarks = root->rowMarks;
2189 :
2190 : path = (Path *)
2191 46950 : create_modifytable_path(root, final_rel,
2192 : path,
2193 : parse->commandType,
2194 46950 : parse->canSetTag,
2195 46950 : parse->resultRelation,
2196 : rootRelation,
2197 : resultRelations,
2198 : updateColnosLists,
2199 : withCheckOptionLists,
2200 : returningLists,
2201 : rowMarks,
2202 : parse->onConflict,
2203 : mergeActionLists,
2204 : mergeJoinConditions,
2205 : assign_special_exec_param(root));
2206 : }
2207 :
2208 : /* And shove it into final_rel */
2209 285372 : add_path(final_rel, path);
2210 : }
2211 :
2212 : /*
2213 : * Generate partial paths for final_rel, too, if outer query levels might
2214 : * be able to make use of them.
2215 : */
2216 274741 : if (final_rel->consider_parallel && root->query_level > 1 &&
2217 15528 : !limit_needed(parse))
2218 : {
2219 : Assert(!parse->rowMarks && parse->commandType == CMD_SELECT);
2220 15497 : foreach(lc, current_rel->partial_pathlist)
2221 : {
2222 62 : Path *partial_path = (Path *) lfirst(lc);
2223 :
2224 62 : add_partial_path(final_rel, partial_path);
2225 : }
2226 : }
2227 :
2228 274741 : extra.limit_needed = limit_needed(parse);
2229 274741 : extra.limit_tuples = limit_tuples;
2230 274741 : extra.count_est = count_est;
2231 274741 : extra.offset_est = offset_est;
2232 :
2233 : /*
2234 : * If there is an FDW that's responsible for all baserels of the query,
2235 : * let it consider adding ForeignPaths.
2236 : */
2237 274741 : if (final_rel->fdwroutine &&
2238 632 : final_rel->fdwroutine->GetForeignUpperPaths)
2239 596 : final_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_FINAL,
2240 : current_rel, final_rel,
2241 : &extra);
2242 :
2243 : /* Let extensions possibly add some more paths */
2244 274741 : if (create_upper_paths_hook)
2245 0 : (*create_upper_paths_hook) (root, UPPERREL_FINAL,
2246 : current_rel, final_rel, &extra);
2247 :
2248 : /* Note: currently, we leave it to callers to do set_cheapest() */
2249 274741 : }
2250 :
2251 : /*
2252 : * Do preprocessing for groupingSets clause and related data.
2253 : *
2254 : * We expect that parse->groupingSets has already been expanded into a flat
2255 : * list of grouping sets (that is, just integer Lists of ressortgroupref
2256 : * numbers) by expand_grouping_sets(). This function handles the preliminary
2257 : * steps of organizing the grouping sets into lists of rollups, and preparing
2258 : * annotations which will later be filled in with size estimates.
2259 : */
2260 : static grouping_sets_data *
2261 510 : preprocess_grouping_sets(PlannerInfo *root)
2262 : {
2263 510 : Query *parse = root->parse;
2264 : List *sets;
2265 510 : int maxref = 0;
2266 : ListCell *lc_set;
2267 510 : grouping_sets_data *gd = palloc0_object(grouping_sets_data);
2268 :
2269 : /*
2270 : * We don't currently make any attempt to optimize the groupClause when
2271 : * there are grouping sets, so just duplicate it in processed_groupClause.
2272 : */
2273 510 : root->processed_groupClause = parse->groupClause;
2274 :
2275 : /* Detect unhashable and unsortable grouping expressions */
2276 510 : gd->any_hashable = false;
2277 510 : gd->unhashable_refs = NULL;
2278 510 : gd->unsortable_refs = NULL;
2279 510 : gd->unsortable_sets = NIL;
2280 :
2281 510 : if (parse->groupClause)
2282 : {
2283 : ListCell *lc;
2284 :
2285 1490 : foreach(lc, parse->groupClause)
2286 : {
2287 1019 : SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2288 1019 : Index ref = gc->tleSortGroupRef;
2289 :
2290 1019 : if (ref > maxref)
2291 995 : maxref = ref;
2292 :
2293 1019 : if (!gc->hashable)
2294 15 : gd->unhashable_refs = bms_add_member(gd->unhashable_refs, ref);
2295 :
2296 1019 : if (!OidIsValid(gc->sortop))
2297 21 : gd->unsortable_refs = bms_add_member(gd->unsortable_refs, ref);
2298 : }
2299 : }
2300 :
2301 : /* Allocate workspace array for remapping */
2302 510 : gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
2303 :
2304 : /*
2305 : * If we have any unsortable sets, we must extract them before trying to
2306 : * prepare rollups. Unsortable sets don't go through
2307 : * reorder_grouping_sets, so we must apply the GroupingSetData annotation
2308 : * here.
2309 : */
2310 510 : if (!bms_is_empty(gd->unsortable_refs))
2311 : {
2312 21 : List *sortable_sets = NIL;
2313 : ListCell *lc;
2314 :
2315 63 : foreach(lc, parse->groupingSets)
2316 : {
2317 45 : List *gset = (List *) lfirst(lc);
2318 :
2319 45 : if (bms_overlap_list(gd->unsortable_refs, gset))
2320 : {
2321 24 : GroupingSetData *gs = makeNode(GroupingSetData);
2322 :
2323 24 : gs->set = gset;
2324 24 : gd->unsortable_sets = lappend(gd->unsortable_sets, gs);
2325 :
2326 : /*
2327 : * We must enforce here that an unsortable set is hashable;
2328 : * later code assumes this. Parse analysis only checks that
2329 : * every individual column is either hashable or sortable.
2330 : *
2331 : * Note that passing this test doesn't guarantee we can
2332 : * generate a plan; there might be other showstoppers.
2333 : */
2334 24 : if (bms_overlap_list(gd->unhashable_refs, gset))
2335 3 : ereport(ERROR,
2336 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2337 : errmsg("could not implement GROUP BY"),
2338 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
2339 : }
2340 : else
2341 21 : sortable_sets = lappend(sortable_sets, gset);
2342 : }
2343 :
2344 18 : if (sortable_sets)
2345 15 : sets = extract_rollup_sets(sortable_sets);
2346 : else
2347 3 : sets = NIL;
2348 : }
2349 : else
2350 489 : sets = extract_rollup_sets(parse->groupingSets);
2351 :
2352 1327 : foreach(lc_set, sets)
2353 : {
2354 820 : List *current_sets = (List *) lfirst(lc_set);
2355 820 : RollupData *rollup = makeNode(RollupData);
2356 : GroupingSetData *gs;
2357 :
2358 : /*
2359 : * Reorder the current list of grouping sets into correct prefix
2360 : * order. If only one aggregation pass is needed, try to make the
2361 : * list match the ORDER BY clause; if more than one pass is needed, we
2362 : * don't bother with that.
2363 : *
2364 : * Note that this reorders the sets from smallest-member-first to
2365 : * largest-member-first, and applies the GroupingSetData annotations,
2366 : * though the data will be filled in later.
2367 : */
2368 820 : current_sets = reorder_grouping_sets(current_sets,
2369 820 : (list_length(sets) == 1
2370 : ? parse->sortClause
2371 : : NIL));
2372 :
2373 : /*
2374 : * Get the initial (and therefore largest) grouping set.
2375 : */
2376 820 : gs = linitial_node(GroupingSetData, current_sets);
2377 :
2378 : /*
2379 : * Order the groupClause appropriately. If the first grouping set is
2380 : * empty, then the groupClause must also be empty; otherwise we have
2381 : * to force the groupClause to match that grouping set's order.
2382 : *
2383 : * (The first grouping set can be empty even though parse->groupClause
2384 : * is not empty only if all non-empty grouping sets are unsortable.
2385 : * The groupClauses for hashed grouping sets are built later on.)
2386 : */
2387 820 : if (gs->set)
2388 781 : rollup->groupClause = preprocess_groupclause(root, gs->set);
2389 : else
2390 39 : rollup->groupClause = NIL;
2391 :
2392 : /*
2393 : * Is it hashable? We pretend empty sets are hashable even though we
2394 : * actually force them not to be hashed later. But don't bother if
2395 : * there's nothing but empty sets (since in that case we can't hash
2396 : * anything).
2397 : */
2398 820 : if (gs->set &&
2399 781 : !bms_overlap_list(gd->unhashable_refs, gs->set))
2400 : {
2401 769 : rollup->hashable = true;
2402 769 : gd->any_hashable = true;
2403 : }
2404 :
2405 : /*
2406 : * Now that we've pinned down an order for the groupClause for this
2407 : * list of grouping sets, we need to remap the entries in the grouping
2408 : * sets from sortgrouprefs to plain indices (0-based) into the
2409 : * groupClause for this collection of grouping sets. We keep the
2410 : * original form for later use, though.
2411 : */
2412 820 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
2413 : current_sets,
2414 : gd->tleref_to_colnum_map);
2415 820 : rollup->gsets_data = current_sets;
2416 :
2417 820 : gd->rollups = lappend(gd->rollups, rollup);
2418 : }
2419 :
2420 507 : if (gd->unsortable_sets)
2421 : {
2422 : /*
2423 : * We have not yet pinned down a groupclause for this, but we will
2424 : * need index-based lists for estimation purposes. Construct
2425 : * hash_sets_idx based on the entire original groupclause for now.
2426 : */
2427 18 : gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
2428 : gd->unsortable_sets,
2429 : gd->tleref_to_colnum_map);
2430 18 : gd->any_hashable = true;
2431 : }
2432 :
2433 507 : return gd;
2434 : }
2435 :
2436 : /*
2437 : * Given a groupclause and a list of GroupingSetData, return equivalent sets
2438 : * (without annotation) mapped to indexes into the given groupclause.
2439 : */
2440 : static List *
2441 2314 : remap_to_groupclause_idx(List *groupClause,
2442 : List *gsets,
2443 : int *tleref_to_colnum_map)
2444 : {
2445 2314 : int ref = 0;
2446 2314 : List *result = NIL;
2447 : ListCell *lc;
2448 :
2449 5566 : foreach(lc, groupClause)
2450 : {
2451 3252 : SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
2452 :
2453 3252 : tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
2454 : }
2455 :
2456 5320 : foreach(lc, gsets)
2457 : {
2458 3006 : List *set = NIL;
2459 : ListCell *lc2;
2460 3006 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
2461 :
2462 6689 : foreach(lc2, gs->set)
2463 : {
2464 3683 : set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
2465 : }
2466 :
2467 3006 : result = lappend(result, set);
2468 : }
2469 :
2470 2314 : return result;
2471 : }
2472 :
2473 :
2474 : /*
2475 : * preprocess_rowmarks - set up PlanRowMarks if needed
2476 : */
2477 : static void
2478 276767 : preprocess_rowmarks(PlannerInfo *root)
2479 : {
2480 276767 : Query *parse = root->parse;
2481 : Bitmapset *rels;
2482 : List *prowmarks;
2483 : ListCell *l;
2484 : int i;
2485 :
2486 276767 : if (parse->rowMarks)
2487 : {
2488 : /*
2489 : * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
2490 : * grouping, since grouping renders a reference to individual tuple
2491 : * CTIDs invalid. This is also checked at parse time, but that's
2492 : * insufficient because of rule substitution, query pullup, etc.
2493 : */
2494 6786 : CheckSelectLocking(parse, linitial_node(RowMarkClause,
2495 : parse->rowMarks)->strength);
2496 : }
2497 : else
2498 : {
2499 : /*
2500 : * We only need rowmarks for UPDATE, DELETE, MERGE, or FOR [KEY]
2501 : * UPDATE/SHARE.
2502 : */
2503 269981 : if (parse->commandType != CMD_UPDATE &&
2504 262599 : parse->commandType != CMD_DELETE &&
2505 260403 : parse->commandType != CMD_MERGE)
2506 259476 : return;
2507 : }
2508 :
2509 : /*
2510 : * We need to have rowmarks for all base relations except the target. We
2511 : * make a bitmapset of all base rels and then remove the items we don't
2512 : * need or have FOR [KEY] UPDATE/SHARE marks for.
2513 : */
2514 17291 : rels = get_relids_in_jointree((Node *) parse->jointree, false, false);
2515 17291 : if (parse->resultRelation)
2516 10505 : rels = bms_del_member(rels, parse->resultRelation);
2517 :
2518 : /*
2519 : * Convert RowMarkClauses to PlanRowMark representation.
2520 : */
2521 17291 : prowmarks = NIL;
2522 24192 : foreach(l, parse->rowMarks)
2523 : {
2524 6901 : RowMarkClause *rc = lfirst_node(RowMarkClause, l);
2525 6901 : RangeTblEntry *rte = rt_fetch(rc->rti, parse->rtable);
2526 : PlanRowMark *newrc;
2527 :
2528 : /*
2529 : * Currently, it is syntactically impossible to have FOR UPDATE et al
2530 : * applied to an update/delete target rel. If that ever becomes
2531 : * possible, we should drop the target from the PlanRowMark list.
2532 : */
2533 : Assert(rc->rti != parse->resultRelation);
2534 :
2535 : /*
2536 : * Ignore RowMarkClauses for subqueries; they aren't real tables and
2537 : * can't support true locking. Subqueries that got flattened into the
2538 : * main query should be ignored completely. Any that didn't will get
2539 : * ROW_MARK_COPY items in the next loop.
2540 : */
2541 6901 : if (rte->rtekind != RTE_RELATION)
2542 30 : continue;
2543 :
2544 6871 : rels = bms_del_member(rels, rc->rti);
2545 :
2546 6871 : newrc = makeNode(PlanRowMark);
2547 6871 : newrc->rti = newrc->prti = rc->rti;
2548 6871 : newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2549 6871 : newrc->markType = select_rowmark_type(rte, rc->strength);
2550 6871 : newrc->allMarkTypes = (1 << newrc->markType);
2551 6871 : newrc->strength = rc->strength;
2552 6871 : newrc->waitPolicy = rc->waitPolicy;
2553 6871 : newrc->isParent = false;
2554 :
2555 6871 : prowmarks = lappend(prowmarks, newrc);
2556 : }
2557 :
2558 : /*
2559 : * Now, add rowmarks for any non-target, non-locked base relations.
2560 : */
2561 17291 : i = 0;
2562 40012 : foreach(l, parse->rtable)
2563 : {
2564 22721 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
2565 : PlanRowMark *newrc;
2566 :
2567 22721 : i++;
2568 22721 : if (!bms_is_member(i, rels))
2569 20836 : continue;
2570 :
2571 1885 : newrc = makeNode(PlanRowMark);
2572 1885 : newrc->rti = newrc->prti = i;
2573 1885 : newrc->rowmarkId = ++(root->glob->lastRowMarkId);
2574 1885 : newrc->markType = select_rowmark_type(rte, LCS_NONE);
2575 1885 : newrc->allMarkTypes = (1 << newrc->markType);
2576 1885 : newrc->strength = LCS_NONE;
2577 1885 : newrc->waitPolicy = LockWaitBlock; /* doesn't matter */
2578 1885 : newrc->isParent = false;
2579 :
2580 1885 : prowmarks = lappend(prowmarks, newrc);
2581 : }
2582 :
2583 17291 : root->rowMarks = prowmarks;
2584 : }
2585 :
2586 : /*
2587 : * Select RowMarkType to use for a given table
2588 : */
2589 : RowMarkType
2590 9964 : select_rowmark_type(RangeTblEntry *rte, LockClauseStrength strength)
2591 : {
2592 9964 : if (rte->rtekind != RTE_RELATION)
2593 : {
2594 : /* If it's not a table at all, use ROW_MARK_COPY */
2595 719 : return ROW_MARK_COPY;
2596 : }
2597 9245 : else if (rte->relkind == RELKIND_FOREIGN_TABLE)
2598 : {
2599 : /* Let the FDW select the rowmark type, if it wants to */
2600 114 : FdwRoutine *fdwroutine = GetFdwRoutineByRelId(rte->relid);
2601 :
2602 114 : if (fdwroutine->GetForeignRowMarkType != NULL)
2603 0 : return fdwroutine->GetForeignRowMarkType(rte, strength);
2604 : /* Otherwise, use ROW_MARK_COPY by default */
2605 114 : return ROW_MARK_COPY;
2606 : }
2607 : else
2608 : {
2609 : /* Regular table, apply the appropriate lock type */
2610 9131 : switch (strength)
2611 : {
2612 1273 : case LCS_NONE:
2613 :
2614 : /*
2615 : * We don't need a tuple lock, only the ability to re-fetch
2616 : * the row.
2617 : */
2618 1273 : return ROW_MARK_REFERENCE;
2619 : break;
2620 6892 : case LCS_FORKEYSHARE:
2621 6892 : return ROW_MARK_KEYSHARE;
2622 : break;
2623 153 : case LCS_FORSHARE:
2624 153 : return ROW_MARK_SHARE;
2625 : break;
2626 39 : case LCS_FORNOKEYUPDATE:
2627 39 : return ROW_MARK_NOKEYEXCLUSIVE;
2628 : break;
2629 774 : case LCS_FORUPDATE:
2630 774 : return ROW_MARK_EXCLUSIVE;
2631 : break;
2632 : }
2633 0 : elog(ERROR, "unrecognized LockClauseStrength %d", (int) strength);
2634 : return ROW_MARK_EXCLUSIVE; /* keep compiler quiet */
2635 : }
2636 : }
2637 :
2638 : /*
2639 : * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses
2640 : *
2641 : * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the
2642 : * results back in *count_est and *offset_est. These variables are set to
2643 : * 0 if the corresponding clause is not present, and -1 if it's present
2644 : * but we couldn't estimate the value for it. (The "0" convention is OK
2645 : * for OFFSET but a little bit bogus for LIMIT: effectively we estimate
2646 : * LIMIT 0 as though it were LIMIT 1. But this is in line with the planner's
2647 : * usual practice of never estimating less than one row.) These values will
2648 : * be passed to create_limit_path, which see if you change this code.
2649 : *
2650 : * The return value is the suitably adjusted tuple_fraction to use for
2651 : * planning the query. This adjustment is not overridable, since it reflects
2652 : * plan actions that grouping_planner() will certainly take, not assumptions
2653 : * about context.
2654 : */
2655 : static double
2656 2625 : preprocess_limit(PlannerInfo *root, double tuple_fraction,
2657 : int64 *offset_est, int64 *count_est)
2658 : {
2659 2625 : Query *parse = root->parse;
2660 : Node *est;
2661 : double limit_fraction;
2662 :
2663 : /* Should not be called unless LIMIT or OFFSET */
2664 : Assert(parse->limitCount || parse->limitOffset);
2665 :
2666 : /*
2667 : * Try to obtain the clause values. We use estimate_expression_value
2668 : * primarily because it can sometimes do something useful with Params.
2669 : */
2670 2625 : if (parse->limitCount)
2671 : {
2672 2359 : est = estimate_expression_value(root, parse->limitCount);
2673 2359 : if (est && IsA(est, Const))
2674 : {
2675 2356 : if (((Const *) est)->constisnull)
2676 : {
2677 : /* NULL indicates LIMIT ALL, ie, no limit */
2678 0 : *count_est = 0; /* treat as not present */
2679 : }
2680 : else
2681 : {
2682 2356 : *count_est = DatumGetInt64(((Const *) est)->constvalue);
2683 2356 : if (*count_est <= 0)
2684 75 : *count_est = 1; /* force to at least 1 */
2685 : }
2686 : }
2687 : else
2688 3 : *count_est = -1; /* can't estimate */
2689 : }
2690 : else
2691 266 : *count_est = 0; /* not present */
2692 :
2693 2625 : if (parse->limitOffset)
2694 : {
2695 452 : est = estimate_expression_value(root, parse->limitOffset);
2696 452 : if (est && IsA(est, Const))
2697 : {
2698 440 : if (((Const *) est)->constisnull)
2699 : {
2700 : /* Treat NULL as no offset; the executor will too */
2701 0 : *offset_est = 0; /* treat as not present */
2702 : }
2703 : else
2704 : {
2705 440 : *offset_est = DatumGetInt64(((Const *) est)->constvalue);
2706 440 : if (*offset_est < 0)
2707 0 : *offset_est = 0; /* treat as not present */
2708 : }
2709 : }
2710 : else
2711 12 : *offset_est = -1; /* can't estimate */
2712 : }
2713 : else
2714 2173 : *offset_est = 0; /* not present */
2715 :
2716 2625 : if (*count_est != 0)
2717 : {
2718 : /*
2719 : * A LIMIT clause limits the absolute number of tuples returned.
2720 : * However, if it's not a constant LIMIT then we have to guess; for
2721 : * lack of a better idea, assume 10% of the plan's result is wanted.
2722 : */
2723 2359 : if (*count_est < 0 || *offset_est < 0)
2724 : {
2725 : /* LIMIT or OFFSET is an expression ... punt ... */
2726 12 : limit_fraction = 0.10;
2727 : }
2728 : else
2729 : {
2730 : /* LIMIT (plus OFFSET, if any) is max number of tuples needed */
2731 2347 : limit_fraction = (double) *count_est + (double) *offset_est;
2732 : }
2733 :
2734 : /*
2735 : * If we have absolute limits from both caller and LIMIT, use the
2736 : * smaller value; likewise if they are both fractional. If one is
2737 : * fractional and the other absolute, we can't easily determine which
2738 : * is smaller, but we use the heuristic that the absolute will usually
2739 : * be smaller.
2740 : */
2741 2359 : if (tuple_fraction >= 1.0)
2742 : {
2743 3 : if (limit_fraction >= 1.0)
2744 : {
2745 : /* both absolute */
2746 3 : tuple_fraction = Min(tuple_fraction, limit_fraction);
2747 : }
2748 : else
2749 : {
2750 : /* caller absolute, limit fractional; use caller's value */
2751 : }
2752 : }
2753 2356 : else if (tuple_fraction > 0.0)
2754 : {
2755 74 : if (limit_fraction >= 1.0)
2756 : {
2757 : /* caller fractional, limit absolute; use limit */
2758 74 : tuple_fraction = limit_fraction;
2759 : }
2760 : else
2761 : {
2762 : /* both fractional */
2763 0 : tuple_fraction = Min(tuple_fraction, limit_fraction);
2764 : }
2765 : }
2766 : else
2767 : {
2768 : /* no info from caller, just use limit */
2769 2282 : tuple_fraction = limit_fraction;
2770 : }
2771 : }
2772 266 : else if (*offset_est != 0 && tuple_fraction > 0.0)
2773 : {
2774 : /*
2775 : * We have an OFFSET but no LIMIT. This acts entirely differently
2776 : * from the LIMIT case: here, we need to increase rather than decrease
2777 : * the caller's tuple_fraction, because the OFFSET acts to cause more
2778 : * tuples to be fetched instead of fewer. This only matters if we got
2779 : * a tuple_fraction > 0, however.
2780 : *
2781 : * As above, use 10% if OFFSET is present but unestimatable.
2782 : */
2783 8 : if (*offset_est < 0)
2784 0 : limit_fraction = 0.10;
2785 : else
2786 8 : limit_fraction = (double) *offset_est;
2787 :
2788 : /*
2789 : * If we have absolute counts from both caller and OFFSET, add them
2790 : * together; likewise if they are both fractional. If one is
2791 : * fractional and the other absolute, we want to take the larger, and
2792 : * we heuristically assume that's the fractional one.
2793 : */
2794 8 : if (tuple_fraction >= 1.0)
2795 : {
2796 0 : if (limit_fraction >= 1.0)
2797 : {
2798 : /* both absolute, so add them together */
2799 0 : tuple_fraction += limit_fraction;
2800 : }
2801 : else
2802 : {
2803 : /* caller absolute, limit fractional; use limit */
2804 0 : tuple_fraction = limit_fraction;
2805 : }
2806 : }
2807 : else
2808 : {
2809 8 : if (limit_fraction >= 1.0)
2810 : {
2811 : /* caller fractional, limit absolute; use caller's value */
2812 : }
2813 : else
2814 : {
2815 : /* both fractional, so add them together */
2816 0 : tuple_fraction += limit_fraction;
2817 0 : if (tuple_fraction >= 1.0)
2818 0 : tuple_fraction = 0.0; /* assume fetch all */
2819 : }
2820 : }
2821 : }
2822 :
2823 2625 : return tuple_fraction;
2824 : }
2825 :
2826 : /*
2827 : * limit_needed - do we actually need a Limit plan node?
2828 : *
2829 : * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
2830 : * a Limit node. This is worth checking for because "OFFSET 0" is a common
2831 : * locution for an optimization fence. (Because other places in the planner
2832 : * merely check whether parse->limitOffset isn't NULL, it will still work as
2833 : * an optimization fence --- we're just suppressing unnecessary run-time
2834 : * overhead.)
2835 : *
2836 : * This might look like it could be merged into preprocess_limit, but there's
2837 : * a key distinction: here we need hard constants in OFFSET/LIMIT, whereas
2838 : * in preprocess_limit it's good enough to consider estimated values.
2839 : */
2840 : bool
2841 585801 : limit_needed(Query *parse)
2842 : {
2843 : Node *node;
2844 :
2845 585801 : node = parse->limitCount;
2846 585801 : if (node)
2847 : {
2848 5652 : if (IsA(node, Const))
2849 : {
2850 : /* NULL indicates LIMIT ALL, ie, no limit */
2851 5534 : if (!((Const *) node)->constisnull)
2852 5534 : return true; /* LIMIT with a constant value */
2853 : }
2854 : else
2855 118 : return true; /* non-constant LIMIT */
2856 : }
2857 :
2858 580149 : node = parse->limitOffset;
2859 580149 : if (node)
2860 : {
2861 775 : if (IsA(node, Const))
2862 : {
2863 : /* Treat NULL as no offset; the executor would too */
2864 619 : if (!((Const *) node)->constisnull)
2865 : {
2866 619 : int64 offset = DatumGetInt64(((Const *) node)->constvalue);
2867 :
2868 619 : if (offset != 0)
2869 73 : return true; /* OFFSET with a nonzero value */
2870 : }
2871 : }
2872 : else
2873 156 : return true; /* non-constant OFFSET */
2874 : }
2875 :
2876 579920 : return false; /* don't need a Limit plan node */
2877 : }
2878 :
2879 : /*
2880 : * preprocess_groupclause - do preparatory work on GROUP BY clause
2881 : *
2882 : * The idea here is to adjust the ordering of the GROUP BY elements
2883 : * (which in itself is semantically insignificant) to match ORDER BY,
2884 : * thereby allowing a single sort operation to both implement the ORDER BY
2885 : * requirement and set up for a Unique step that implements GROUP BY.
2886 : * We also consider partial match between GROUP BY and ORDER BY elements,
2887 : * which could allow to implement ORDER BY using the incremental sort.
2888 : *
2889 : * We also consider other orderings of the GROUP BY elements, which could
2890 : * match the sort ordering of other possible plans (eg an indexscan) and
2891 : * thereby reduce cost. This is implemented during the generation of grouping
2892 : * paths. See get_useful_group_keys_orderings() for details.
2893 : *
2894 : * Note: we need no comparable processing of the distinctClause because
2895 : * the parser already enforced that that matches ORDER BY.
2896 : *
2897 : * Note: we return a fresh List, but its elements are the same
2898 : * SortGroupClauses appearing in parse->groupClause. This is important
2899 : * because later processing may modify the processed_groupClause list.
2900 : *
2901 : * For grouping sets, the order of items is instead forced to agree with that
2902 : * of the grouping set (and items not in the grouping set are skipped). The
2903 : * work of sorting the order of grouping set elements to match the ORDER BY if
2904 : * possible is done elsewhere.
2905 : */
2906 : static List *
2907 4324 : preprocess_groupclause(PlannerInfo *root, List *force)
2908 : {
2909 4324 : Query *parse = root->parse;
2910 4324 : List *new_groupclause = NIL;
2911 : ListCell *sl;
2912 : ListCell *gl;
2913 :
2914 : /* For grouping sets, we need to force the ordering */
2915 4324 : if (force)
2916 : {
2917 5470 : foreach(sl, force)
2918 : {
2919 3213 : Index ref = lfirst_int(sl);
2920 3213 : SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
2921 :
2922 3213 : new_groupclause = lappend(new_groupclause, cl);
2923 : }
2924 :
2925 2257 : return new_groupclause;
2926 : }
2927 :
2928 : /* If no ORDER BY, nothing useful to do here */
2929 2067 : if (parse->sortClause == NIL)
2930 1157 : return list_copy(parse->groupClause);
2931 :
2932 : /*
2933 : * Scan the ORDER BY clause and construct a list of matching GROUP BY
2934 : * items, but only as far as we can make a matching prefix.
2935 : *
2936 : * This code assumes that the sortClause contains no duplicate items.
2937 : */
2938 1773 : foreach(sl, parse->sortClause)
2939 : {
2940 1186 : SortGroupClause *sc = lfirst_node(SortGroupClause, sl);
2941 :
2942 1734 : foreach(gl, parse->groupClause)
2943 : {
2944 1411 : SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
2945 :
2946 1411 : if (equal(gc, sc))
2947 : {
2948 863 : new_groupclause = lappend(new_groupclause, gc);
2949 863 : break;
2950 : }
2951 : }
2952 1186 : if (gl == NULL)
2953 323 : break; /* no match, so stop scanning */
2954 : }
2955 :
2956 :
2957 : /* If no match at all, no point in reordering GROUP BY */
2958 910 : if (new_groupclause == NIL)
2959 149 : return list_copy(parse->groupClause);
2960 :
2961 : /*
2962 : * Add any remaining GROUP BY items to the new list. We don't require a
2963 : * complete match, because even partial match allows ORDER BY to be
2964 : * implemented using incremental sort. Also, give up if there are any
2965 : * non-sortable GROUP BY items, since then there's no hope anyway.
2966 : */
2967 1707 : foreach(gl, parse->groupClause)
2968 : {
2969 946 : SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
2970 :
2971 946 : if (list_member_ptr(new_groupclause, gc))
2972 863 : continue; /* it matched an ORDER BY item */
2973 83 : if (!OidIsValid(gc->sortop)) /* give up, GROUP BY can't be sorted */
2974 0 : return list_copy(parse->groupClause);
2975 83 : new_groupclause = lappend(new_groupclause, gc);
2976 : }
2977 :
2978 : /* Success --- install the rearranged GROUP BY list */
2979 : Assert(list_length(parse->groupClause) == list_length(new_groupclause));
2980 761 : return new_groupclause;
2981 : }
2982 :
2983 : /*
2984 : * Extract lists of grouping sets that can be implemented using a single
2985 : * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
2986 : *
2987 : * Input must be sorted with smallest sets first. Result has each sublist
2988 : * sorted with smallest sets first.
2989 : *
2990 : * We want to produce the absolute minimum possible number of lists here to
2991 : * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
2992 : * of finding the minimal partition of a partially-ordered set into chains
2993 : * (which is what we need, taking the list of grouping sets as a poset ordered
2994 : * by set inclusion) can be mapped to the problem of finding the maximum
2995 : * cardinality matching on a bipartite graph, which is solvable in polynomial
2996 : * time with a worst case of no worse than O(n^2.5) and usually much
2997 : * better. Since our N is at most 4096, we don't need to consider fallbacks to
2998 : * heuristic or approximate methods. (Planning time for a 12-d cube is under
2999 : * half a second on my modest system even with optimization off and assertions
3000 : * on.)
3001 : */
3002 : static List *
3003 504 : extract_rollup_sets(List *groupingSets)
3004 : {
3005 504 : int num_sets_raw = list_length(groupingSets);
3006 504 : int num_empty = 0;
3007 504 : int num_sets = 0; /* distinct sets */
3008 504 : int num_chains = 0;
3009 504 : List *result = NIL;
3010 : List **results;
3011 : List **orig_sets;
3012 : Bitmapset **set_masks;
3013 : int *chains;
3014 : short **adjacency;
3015 : short *adjacency_buf;
3016 : BipartiteMatchState *state;
3017 : int i;
3018 : int j;
3019 : int j_size;
3020 504 : ListCell *lc1 = list_head(groupingSets);
3021 : ListCell *lc;
3022 :
3023 : /*
3024 : * Start by stripping out empty sets. The algorithm doesn't require this,
3025 : * but the planner currently needs all empty sets to be returned in the
3026 : * first list, so we strip them here and add them back after.
3027 : */
3028 856 : while (lc1 && lfirst(lc1) == NIL)
3029 : {
3030 352 : ++num_empty;
3031 352 : lc1 = lnext(groupingSets, lc1);
3032 : }
3033 :
3034 : /* bail out now if it turns out that all we had were empty sets. */
3035 504 : if (!lc1)
3036 39 : return list_make1(groupingSets);
3037 :
3038 : /*----------
3039 : * We don't strictly need to remove duplicate sets here, but if we don't,
3040 : * they tend to become scattered through the result, which is a bit
3041 : * confusing (and irritating if we ever decide to optimize them out).
3042 : * So we remove them here and add them back after.
3043 : *
3044 : * For each non-duplicate set, we fill in the following:
3045 : *
3046 : * orig_sets[i] = list of the original set lists
3047 : * set_masks[i] = bitmapset for testing inclusion
3048 : * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
3049 : *
3050 : * chains[i] will be the result group this set is assigned to.
3051 : *
3052 : * We index all of these from 1 rather than 0 because it is convenient
3053 : * to leave 0 free for the NIL node in the graph algorithm.
3054 : *----------
3055 : */
3056 465 : orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
3057 465 : set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
3058 465 : adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
3059 465 : adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
3060 :
3061 465 : j_size = 0;
3062 465 : j = 0;
3063 465 : i = 1;
3064 :
3065 1622 : for_each_cell(lc, groupingSets, lc1)
3066 : {
3067 1157 : List *candidate = (List *) lfirst(lc);
3068 1157 : Bitmapset *candidate_set = NULL;
3069 : ListCell *lc2;
3070 1157 : int dup_of = 0;
3071 :
3072 2773 : foreach(lc2, candidate)
3073 : {
3074 1616 : candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
3075 : }
3076 :
3077 : /* we can only be a dup if we're the same length as a previous set */
3078 1157 : if (j_size == list_length(candidate))
3079 : {
3080 : int k;
3081 :
3082 1036 : for (k = j; k < i; ++k)
3083 : {
3084 672 : if (bms_equal(set_masks[k], candidate_set))
3085 : {
3086 79 : dup_of = k;
3087 79 : break;
3088 : }
3089 : }
3090 : }
3091 714 : else if (j_size < list_length(candidate))
3092 : {
3093 714 : j_size = list_length(candidate);
3094 714 : j = i;
3095 : }
3096 :
3097 1157 : if (dup_of > 0)
3098 : {
3099 79 : orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
3100 79 : bms_free(candidate_set);
3101 : }
3102 : else
3103 : {
3104 : int k;
3105 1078 : int n_adj = 0;
3106 :
3107 1078 : orig_sets[i] = list_make1(candidate);
3108 1078 : set_masks[i] = candidate_set;
3109 :
3110 : /* fill in adjacency list; no need to compare equal-size sets */
3111 :
3112 1726 : for (k = j - 1; k > 0; --k)
3113 : {
3114 648 : if (bms_is_subset(set_masks[k], candidate_set))
3115 567 : adjacency_buf[++n_adj] = k;
3116 : }
3117 :
3118 1078 : if (n_adj > 0)
3119 : {
3120 311 : adjacency_buf[0] = n_adj;
3121 311 : adjacency[i] = palloc((n_adj + 1) * sizeof(short));
3122 311 : memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
3123 : }
3124 : else
3125 767 : adjacency[i] = NULL;
3126 :
3127 1078 : ++i;
3128 : }
3129 : }
3130 :
3131 465 : num_sets = i - 1;
3132 :
3133 : /*
3134 : * Apply the graph matching algorithm to do the work.
3135 : */
3136 465 : state = BipartiteMatch(num_sets, num_sets, adjacency);
3137 :
3138 : /*
3139 : * Now, the state->pair* fields have the info we need to assign sets to
3140 : * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
3141 : * pair_vu[v] = u (both will be true, but we check both so that we can do
3142 : * it in one pass)
3143 : */
3144 465 : chains = palloc0((num_sets + 1) * sizeof(int));
3145 :
3146 1543 : for (i = 1; i <= num_sets; ++i)
3147 : {
3148 1078 : int u = state->pair_vu[i];
3149 1078 : int v = state->pair_uv[i];
3150 :
3151 1078 : if (u > 0 && u < i)
3152 0 : chains[i] = chains[u];
3153 1078 : else if (v > 0 && v < i)
3154 297 : chains[i] = chains[v];
3155 : else
3156 781 : chains[i] = ++num_chains;
3157 : }
3158 :
3159 : /* build result lists. */
3160 465 : results = palloc0((num_chains + 1) * sizeof(List *));
3161 :
3162 1543 : for (i = 1; i <= num_sets; ++i)
3163 : {
3164 1078 : int c = chains[i];
3165 :
3166 : Assert(c > 0);
3167 :
3168 1078 : results[c] = list_concat(results[c], orig_sets[i]);
3169 : }
3170 :
3171 : /* push any empty sets back on the first list. */
3172 748 : while (num_empty-- > 0)
3173 283 : results[1] = lcons(NIL, results[1]);
3174 :
3175 : /* make result list */
3176 1246 : for (i = 1; i <= num_chains; ++i)
3177 781 : result = lappend(result, results[i]);
3178 :
3179 : /*
3180 : * Free all the things.
3181 : *
3182 : * (This is over-fussy for small sets but for large sets we could have
3183 : * tied up a nontrivial amount of memory.)
3184 : */
3185 465 : BipartiteMatchFree(state);
3186 465 : pfree(results);
3187 465 : pfree(chains);
3188 1543 : for (i = 1; i <= num_sets; ++i)
3189 1078 : if (adjacency[i])
3190 311 : pfree(adjacency[i]);
3191 465 : pfree(adjacency);
3192 465 : pfree(adjacency_buf);
3193 465 : pfree(orig_sets);
3194 1543 : for (i = 1; i <= num_sets; ++i)
3195 1078 : bms_free(set_masks[i]);
3196 465 : pfree(set_masks);
3197 :
3198 465 : return result;
3199 : }
3200 :
3201 : /*
3202 : * Reorder the elements of a list of grouping sets such that they have correct
3203 : * prefix relationships. Also inserts the GroupingSetData annotations.
3204 : *
3205 : * The input must be ordered with smallest sets first; the result is returned
3206 : * with largest sets first. Note that the result shares no list substructure
3207 : * with the input, so it's safe for the caller to modify it later.
3208 : *
3209 : * If we're passed in a sortclause, we follow its order of columns to the
3210 : * extent possible, to minimize the chance that we add unnecessary sorts.
3211 : * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
3212 : * gets implemented in one pass.)
3213 : */
3214 : static List *
3215 820 : reorder_grouping_sets(List *groupingSets, List *sortclause)
3216 : {
3217 : ListCell *lc;
3218 820 : List *previous = NIL;
3219 820 : List *result = NIL;
3220 :
3221 2329 : foreach(lc, groupingSets)
3222 : {
3223 1509 : List *candidate = (List *) lfirst(lc);
3224 1509 : List *new_elems = list_difference_int(candidate, previous);
3225 1509 : GroupingSetData *gs = makeNode(GroupingSetData);
3226 :
3227 1597 : while (list_length(sortclause) > list_length(previous) &&
3228 : new_elems != NIL)
3229 : {
3230 148 : SortGroupClause *sc = list_nth(sortclause, list_length(previous));
3231 148 : int ref = sc->tleSortGroupRef;
3232 :
3233 148 : if (list_member_int(new_elems, ref))
3234 : {
3235 88 : previous = lappend_int(previous, ref);
3236 88 : new_elems = list_delete_int(new_elems, ref);
3237 : }
3238 : else
3239 : {
3240 : /* diverged from the sortclause; give up on it */
3241 60 : sortclause = NIL;
3242 60 : break;
3243 : }
3244 : }
3245 :
3246 1509 : previous = list_concat(previous, new_elems);
3247 :
3248 1509 : gs->set = list_copy(previous);
3249 1509 : result = lcons(gs, result);
3250 : }
3251 :
3252 820 : list_free(previous);
3253 :
3254 820 : return result;
3255 : }
3256 :
3257 : /*
3258 : * has_volatile_pathkey
3259 : * Returns true if any PathKey in 'keys' has an EquivalenceClass
3260 : * containing a volatile function. Otherwise returns false.
3261 : */
3262 : static bool
3263 1421 : has_volatile_pathkey(List *keys)
3264 : {
3265 : ListCell *lc;
3266 :
3267 2914 : foreach(lc, keys)
3268 : {
3269 1502 : PathKey *pathkey = lfirst_node(PathKey, lc);
3270 :
3271 1502 : if (pathkey->pk_eclass->ec_has_volatile)
3272 9 : return true;
3273 : }
3274 :
3275 1412 : return false;
3276 : }
3277 :
3278 : /*
3279 : * adjust_group_pathkeys_for_groupagg
3280 : * Add pathkeys to root->group_pathkeys to reflect the best set of
3281 : * pre-ordered input for ordered aggregates.
3282 : *
3283 : * We define "best" as the pathkeys that suit the largest number of
3284 : * aggregate functions. We find these by looking at the first ORDER BY /
3285 : * DISTINCT aggregate and take the pathkeys for that before searching for
3286 : * other aggregates that require the same or a more strict variation of the
3287 : * same pathkeys. We then repeat that process for any remaining aggregates
3288 : * with different pathkeys and if we find another set of pathkeys that suits a
3289 : * larger number of aggregates then we select those pathkeys instead.
3290 : *
3291 : * When the best pathkeys are found we also mark each Aggref that can use
3292 : * those pathkeys as aggpresorted = true.
3293 : *
3294 : * Note: When an aggregate function's ORDER BY / DISTINCT clause contains any
3295 : * volatile functions, we never make use of these pathkeys. We want to ensure
3296 : * that sorts using volatile functions are done independently in each Aggref
3297 : * rather than once at the query level. If we were to allow this then Aggrefs
3298 : * with compatible sort orders would all transition their rows in the same
3299 : * order if those pathkeys were deemed to be the best pathkeys to sort on.
3300 : * Whereas, if some other set of Aggref's pathkeys happened to be deemed
3301 : * better pathkeys to sort on, then the volatile function Aggrefs would be
3302 : * left to perform their sorts individually. To avoid this inconsistent
3303 : * behavior which could make Aggref results depend on what other Aggrefs the
3304 : * query contains, we always force Aggrefs with volatile functions to perform
3305 : * their own sorts.
3306 : */
3307 : static void
3308 1223 : adjust_group_pathkeys_for_groupagg(PlannerInfo *root)
3309 : {
3310 1223 : List *grouppathkeys = root->group_pathkeys;
3311 : List *bestpathkeys;
3312 : Bitmapset *bestaggs;
3313 : Bitmapset *unprocessed_aggs;
3314 : ListCell *lc;
3315 : int i;
3316 :
3317 : /* Shouldn't be here if there are grouping sets */
3318 : Assert(root->parse->groupingSets == NIL);
3319 : /* Shouldn't be here unless there are some ordered aggregates */
3320 : Assert(root->numOrderedAggs > 0);
3321 :
3322 : /* Do nothing if disabled */
3323 1223 : if (!enable_presorted_aggregate)
3324 3 : return;
3325 :
3326 : /*
3327 : * Make a first pass over all AggInfos to collect a Bitmapset containing
3328 : * the indexes of all AggInfos to be processed below.
3329 : */
3330 1220 : unprocessed_aggs = NULL;
3331 2782 : foreach(lc, root->agginfos)
3332 : {
3333 1562 : AggInfo *agginfo = lfirst_node(AggInfo, lc);
3334 1562 : Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3335 :
3336 1562 : if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3337 132 : continue;
3338 :
3339 : /* Skip unless there's a DISTINCT or ORDER BY clause */
3340 1430 : if (aggref->aggdistinct == NIL && aggref->aggorder == NIL)
3341 150 : continue;
3342 :
3343 : /* Additional safety checks are needed if there's a FILTER clause */
3344 1280 : if (aggref->aggfilter != NULL)
3345 : {
3346 : ListCell *lc2;
3347 27 : bool allow_presort = true;
3348 :
3349 : /*
3350 : * When the Aggref has a FILTER clause, it's possible that the
3351 : * filter removes rows that cannot be sorted because the
3352 : * expression to sort by results in an error during its
3353 : * evaluation. This is a problem for presorting as that happens
3354 : * before the FILTER, whereas without presorting, the Aggregate
3355 : * node will apply the FILTER *before* sorting. So that we never
3356 : * try to sort anything that might error, here we aim to skip over
3357 : * any Aggrefs with arguments with expressions which, when
3358 : * evaluated, could cause an ERROR. Vars and Consts are ok. There
3359 : * may be more cases that should be allowed, but more thought
3360 : * needs to be given. Err on the side of caution.
3361 : */
3362 51 : foreach(lc2, aggref->args)
3363 : {
3364 36 : TargetEntry *tle = (TargetEntry *) lfirst(lc2);
3365 36 : Expr *expr = tle->expr;
3366 :
3367 42 : while (IsA(expr, RelabelType))
3368 6 : expr = (Expr *) (castNode(RelabelType, expr))->arg;
3369 :
3370 : /* Common case, Vars and Consts are ok */
3371 36 : if (IsA(expr, Var) || IsA(expr, Const))
3372 24 : continue;
3373 :
3374 : /* Unsupported. Don't try to presort for this Aggref */
3375 12 : allow_presort = false;
3376 12 : break;
3377 : }
3378 :
3379 : /* Skip unsupported Aggrefs */
3380 27 : if (!allow_presort)
3381 12 : continue;
3382 : }
3383 :
3384 1268 : unprocessed_aggs = bms_add_member(unprocessed_aggs,
3385 : foreach_current_index(lc));
3386 : }
3387 :
3388 : /*
3389 : * Now process all the unprocessed_aggs to find the best set of pathkeys
3390 : * for the given set of aggregates.
3391 : *
3392 : * On the first outer loop here 'bestaggs' will be empty. We'll populate
3393 : * this during the first loop using the pathkeys for the very first
3394 : * AggInfo then taking any stronger pathkeys from any other AggInfos with
3395 : * a more strict set of compatible pathkeys. Once the outer loop is
3396 : * complete, we mark off all the aggregates with compatible pathkeys then
3397 : * remove those from the unprocessed_aggs and repeat the process to try to
3398 : * find another set of pathkeys that are suitable for a larger number of
3399 : * aggregates. The outer loop will stop when there are not enough
3400 : * unprocessed aggregates for it to be possible to find a set of pathkeys
3401 : * to suit a larger number of aggregates.
3402 : */
3403 1220 : bestpathkeys = NIL;
3404 1220 : bestaggs = NULL;
3405 2407 : while (bms_num_members(unprocessed_aggs) > bms_num_members(bestaggs))
3406 : {
3407 1187 : Bitmapset *aggindexes = NULL;
3408 1187 : List *currpathkeys = NIL;
3409 :
3410 1187 : i = -1;
3411 2608 : while ((i = bms_next_member(unprocessed_aggs, i)) >= 0)
3412 : {
3413 1421 : AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3414 1421 : Aggref *aggref = linitial_node(Aggref, agginfo->aggrefs);
3415 : List *sortlist;
3416 : List *pathkeys;
3417 :
3418 1421 : if (aggref->aggdistinct != NIL)
3419 362 : sortlist = aggref->aggdistinct;
3420 : else
3421 1059 : sortlist = aggref->aggorder;
3422 :
3423 1421 : pathkeys = make_pathkeys_for_sortclauses(root, sortlist,
3424 : aggref->args);
3425 :
3426 : /*
3427 : * Ignore Aggrefs which have volatile functions in their ORDER BY
3428 : * or DISTINCT clause.
3429 : */
3430 1421 : if (has_volatile_pathkey(pathkeys))
3431 : {
3432 9 : unprocessed_aggs = bms_del_member(unprocessed_aggs, i);
3433 9 : continue;
3434 : }
3435 :
3436 : /*
3437 : * When not set yet, take the pathkeys from the first unprocessed
3438 : * aggregate.
3439 : */
3440 1412 : if (currpathkeys == NIL)
3441 : {
3442 1184 : currpathkeys = pathkeys;
3443 :
3444 : /* include the GROUP BY pathkeys, if they exist */
3445 1184 : if (grouppathkeys != NIL)
3446 138 : currpathkeys = append_pathkeys(list_copy(grouppathkeys),
3447 : currpathkeys);
3448 :
3449 : /* record that we found pathkeys for this aggregate */
3450 1184 : aggindexes = bms_add_member(aggindexes, i);
3451 : }
3452 : else
3453 : {
3454 : /* now look for a stronger set of matching pathkeys */
3455 :
3456 : /* include the GROUP BY pathkeys, if they exist */
3457 228 : if (grouppathkeys != NIL)
3458 144 : pathkeys = append_pathkeys(list_copy(grouppathkeys),
3459 : pathkeys);
3460 :
3461 : /* are 'pathkeys' compatible or better than 'currpathkeys'? */
3462 228 : switch (compare_pathkeys(currpathkeys, pathkeys))
3463 : {
3464 6 : case PATHKEYS_BETTER2:
3465 : /* 'pathkeys' are stronger, use these ones instead */
3466 6 : currpathkeys = pathkeys;
3467 : /* FALLTHROUGH */
3468 :
3469 33 : case PATHKEYS_BETTER1:
3470 : /* 'pathkeys' are less strict */
3471 : /* FALLTHROUGH */
3472 :
3473 : case PATHKEYS_EQUAL:
3474 : /* mark this aggregate as covered by 'currpathkeys' */
3475 33 : aggindexes = bms_add_member(aggindexes, i);
3476 33 : break;
3477 :
3478 195 : case PATHKEYS_DIFFERENT:
3479 195 : break;
3480 : }
3481 : }
3482 : }
3483 :
3484 : /* remove the aggregates that we've just processed */
3485 1187 : unprocessed_aggs = bms_del_members(unprocessed_aggs, aggindexes);
3486 :
3487 : /*
3488 : * If this pass included more aggregates than the previous best then
3489 : * use these ones as the best set.
3490 : */
3491 1187 : if (bms_num_members(aggindexes) > bms_num_members(bestaggs))
3492 : {
3493 1133 : bestaggs = aggindexes;
3494 1133 : bestpathkeys = currpathkeys;
3495 : }
3496 : }
3497 :
3498 : /*
3499 : * If we found any ordered aggregates, update root->group_pathkeys to add
3500 : * the best set of aggregate pathkeys. Note that bestpathkeys includes
3501 : * the original GROUP BY pathkeys already.
3502 : */
3503 1220 : if (bestpathkeys != NIL)
3504 1103 : root->group_pathkeys = bestpathkeys;
3505 :
3506 : /*
3507 : * Now that we've found the best set of aggregates we can set the
3508 : * presorted flag to indicate to the executor that it needn't bother
3509 : * performing a sort for these Aggrefs. We're able to do this now as
3510 : * there's no chance of a Hash Aggregate plan as create_grouping_paths
3511 : * will not mark the GROUP BY as GROUPING_CAN_USE_HASH due to the presence
3512 : * of ordered aggregates.
3513 : */
3514 1220 : i = -1;
3515 2371 : while ((i = bms_next_member(bestaggs, i)) >= 0)
3516 : {
3517 1151 : AggInfo *agginfo = list_nth_node(AggInfo, root->agginfos, i);
3518 :
3519 2311 : foreach(lc, agginfo->aggrefs)
3520 : {
3521 1160 : Aggref *aggref = lfirst_node(Aggref, lc);
3522 :
3523 1160 : aggref->aggpresorted = true;
3524 : }
3525 : }
3526 : }
3527 :
3528 : /*
3529 : * Compute query_pathkeys and other pathkeys during plan generation
3530 : */
3531 : static void
3532 271654 : standard_qp_callback(PlannerInfo *root, void *extra)
3533 : {
3534 271654 : Query *parse = root->parse;
3535 271654 : standard_qp_extra *qp_extra = (standard_qp_extra *) extra;
3536 271654 : List *tlist = root->processed_tlist;
3537 271654 : List *activeWindows = qp_extra->activeWindows;
3538 :
3539 : /*
3540 : * Calculate pathkeys that represent grouping/ordering and/or ordered
3541 : * aggregate requirements.
3542 : */
3543 271654 : if (qp_extra->gset_data)
3544 : {
3545 : /*
3546 : * With grouping sets, just use the first RollupData's groupClause. We
3547 : * don't make any effort to optimize grouping clauses when there are
3548 : * grouping sets, nor can we combine aggregate ordering keys with
3549 : * grouping.
3550 : */
3551 507 : List *rollups = qp_extra->gset_data->rollups;
3552 507 : List *groupClause = (rollups ? linitial_node(RollupData, rollups)->groupClause : NIL);
3553 :
3554 507 : if (grouping_is_sortable(groupClause))
3555 : {
3556 : bool sortable;
3557 :
3558 : /*
3559 : * The groupClause is logically below the grouping step. So if
3560 : * there is an RTE entry for the grouping step, we need to remove
3561 : * its RT index from the sort expressions before we make PathKeys
3562 : * for them.
3563 : */
3564 507 : root->group_pathkeys =
3565 507 : make_pathkeys_for_sortclauses_extended(root,
3566 : &groupClause,
3567 : tlist,
3568 : false,
3569 507 : parse->hasGroupRTE,
3570 : &sortable,
3571 : false);
3572 : Assert(sortable);
3573 507 : root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3574 : }
3575 : else
3576 : {
3577 0 : root->group_pathkeys = NIL;
3578 0 : root->num_groupby_pathkeys = 0;
3579 : }
3580 : }
3581 271147 : else if (parse->groupClause || root->numOrderedAggs > 0)
3582 3168 : {
3583 : /*
3584 : * With a plain GROUP BY list, we can remove any grouping items that
3585 : * are proven redundant by EquivalenceClass processing. For example,
3586 : * we can remove y given "WHERE x = y GROUP BY x, y". These aren't
3587 : * especially common cases, but they're nearly free to detect. Note
3588 : * that we remove redundant items from processed_groupClause but not
3589 : * the original parse->groupClause.
3590 : */
3591 : bool sortable;
3592 :
3593 : /*
3594 : * Convert group clauses into pathkeys. Set the ec_sortref field of
3595 : * EquivalenceClass'es if it's not set yet.
3596 : */
3597 3168 : root->group_pathkeys =
3598 3168 : make_pathkeys_for_sortclauses_extended(root,
3599 : &root->processed_groupClause,
3600 : tlist,
3601 : true,
3602 : false,
3603 : &sortable,
3604 : true);
3605 3168 : if (!sortable)
3606 : {
3607 : /* Can't sort; no point in considering aggregate ordering either */
3608 0 : root->group_pathkeys = NIL;
3609 0 : root->num_groupby_pathkeys = 0;
3610 : }
3611 : else
3612 : {
3613 3168 : root->num_groupby_pathkeys = list_length(root->group_pathkeys);
3614 : /* If we have ordered aggs, consider adding onto group_pathkeys */
3615 3168 : if (root->numOrderedAggs > 0)
3616 1223 : adjust_group_pathkeys_for_groupagg(root);
3617 : }
3618 : }
3619 : else
3620 : {
3621 267979 : root->group_pathkeys = NIL;
3622 267979 : root->num_groupby_pathkeys = 0;
3623 : }
3624 :
3625 : /* We consider only the first (bottom) window in pathkeys logic */
3626 271654 : if (activeWindows != NIL)
3627 : {
3628 1338 : WindowClause *wc = linitial_node(WindowClause, activeWindows);
3629 :
3630 1338 : root->window_pathkeys = make_pathkeys_for_window(root,
3631 : wc,
3632 : tlist);
3633 : }
3634 : else
3635 270316 : root->window_pathkeys = NIL;
3636 :
3637 : /*
3638 : * As with GROUP BY, we can discard any DISTINCT items that are proven
3639 : * redundant by EquivalenceClass processing. The non-redundant list is
3640 : * kept in root->processed_distinctClause, leaving the original
3641 : * parse->distinctClause alone.
3642 : */
3643 271654 : if (parse->distinctClause)
3644 : {
3645 : bool sortable;
3646 :
3647 : /* Make a copy since pathkey processing can modify the list */
3648 1507 : root->processed_distinctClause = list_copy(parse->distinctClause);
3649 1507 : root->distinct_pathkeys =
3650 1507 : make_pathkeys_for_sortclauses_extended(root,
3651 : &root->processed_distinctClause,
3652 : tlist,
3653 : true,
3654 : false,
3655 : &sortable,
3656 : false);
3657 1507 : if (!sortable)
3658 3 : root->distinct_pathkeys = NIL;
3659 : }
3660 : else
3661 270147 : root->distinct_pathkeys = NIL;
3662 :
3663 271654 : root->sort_pathkeys =
3664 271654 : make_pathkeys_for_sortclauses(root,
3665 : parse->sortClause,
3666 : tlist);
3667 :
3668 : /* setting setop_pathkeys might be useful to the union planner */
3669 271654 : if (qp_extra->setop != NULL)
3670 : {
3671 : List *groupClauses;
3672 : bool sortable;
3673 :
3674 6388 : groupClauses = generate_setop_child_grouplist(qp_extra->setop, tlist);
3675 :
3676 6388 : root->setop_pathkeys =
3677 6388 : make_pathkeys_for_sortclauses_extended(root,
3678 : &groupClauses,
3679 : tlist,
3680 : false,
3681 : false,
3682 : &sortable,
3683 : false);
3684 6388 : if (!sortable)
3685 104 : root->setop_pathkeys = NIL;
3686 : }
3687 : else
3688 265266 : root->setop_pathkeys = NIL;
3689 :
3690 : /*
3691 : * Figure out whether we want a sorted result from query_planner.
3692 : *
3693 : * If we have a sortable GROUP BY clause, then we want a result sorted
3694 : * properly for grouping. Otherwise, if we have window functions to
3695 : * evaluate, we try to sort for the first window. Otherwise, if there's a
3696 : * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
3697 : * we try to produce output that's sufficiently well sorted for the
3698 : * DISTINCT. Otherwise, if there is an ORDER BY clause, we want to sort
3699 : * by the ORDER BY clause. Otherwise, if we're a subquery being planned
3700 : * for a set operation which can benefit from presorted results and have a
3701 : * sortable targetlist, we want to sort by the target list.
3702 : *
3703 : * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
3704 : * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
3705 : * that might just leave us failing to exploit an available sort order at
3706 : * all. Needs more thought. The choice for DISTINCT versus ORDER BY is
3707 : * much easier, since we know that the parser ensured that one is a
3708 : * superset of the other.
3709 : */
3710 271654 : if (root->group_pathkeys)
3711 3456 : root->query_pathkeys = root->group_pathkeys;
3712 268198 : else if (root->window_pathkeys)
3713 1055 : root->query_pathkeys = root->window_pathkeys;
3714 534286 : else if (list_length(root->distinct_pathkeys) >
3715 267143 : list_length(root->sort_pathkeys))
3716 1259 : root->query_pathkeys = root->distinct_pathkeys;
3717 265884 : else if (root->sort_pathkeys)
3718 34689 : root->query_pathkeys = root->sort_pathkeys;
3719 231195 : else if (root->setop_pathkeys != NIL)
3720 5688 : root->query_pathkeys = root->setop_pathkeys;
3721 : else
3722 225507 : root->query_pathkeys = NIL;
3723 271654 : }
3724 :
3725 : /*
3726 : * Estimate number of groups produced by grouping clauses (1 if not grouping)
3727 : *
3728 : * path_rows: number of output rows from scan/join step
3729 : * gd: grouping sets data including list of grouping sets and their clauses
3730 : * target_list: target list containing group clause references
3731 : *
3732 : * If doing grouping sets, we also annotate the gsets data with the estimates
3733 : * for each set and each individual rollup list, with a view to later
3734 : * determining whether some combination of them could be hashed instead.
3735 : */
3736 : static double
3737 27179 : get_number_of_groups(PlannerInfo *root,
3738 : double path_rows,
3739 : grouping_sets_data *gd,
3740 : List *target_list)
3741 : {
3742 27179 : Query *parse = root->parse;
3743 : double dNumGroups;
3744 :
3745 27179 : if (parse->groupClause)
3746 : {
3747 : List *groupExprs;
3748 :
3749 5204 : if (parse->groupingSets)
3750 : {
3751 : /* Add up the estimates for each grouping set */
3752 : ListCell *lc;
3753 :
3754 : Assert(gd); /* keep Coverity happy */
3755 :
3756 468 : dNumGroups = 0;
3757 :
3758 1249 : foreach(lc, gd->rollups)
3759 : {
3760 781 : RollupData *rollup = lfirst_node(RollupData, lc);
3761 : ListCell *lc2;
3762 : ListCell *lc3;
3763 :
3764 781 : groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
3765 : target_list);
3766 :
3767 781 : rollup->numGroups = 0.0;
3768 :
3769 2221 : forboth(lc2, rollup->gsets, lc3, rollup->gsets_data)
3770 : {
3771 1440 : List *gset = (List *) lfirst(lc2);
3772 1440 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc3);
3773 1440 : double numGroups = estimate_num_groups(root,
3774 : groupExprs,
3775 : path_rows,
3776 : &gset,
3777 : NULL);
3778 :
3779 1440 : gs->numGroups = numGroups;
3780 1440 : rollup->numGroups += numGroups;
3781 : }
3782 :
3783 781 : dNumGroups += rollup->numGroups;
3784 : }
3785 :
3786 468 : if (gd->hash_sets_idx)
3787 : {
3788 : ListCell *lc2;
3789 :
3790 18 : gd->dNumHashGroups = 0;
3791 :
3792 18 : groupExprs = get_sortgrouplist_exprs(parse->groupClause,
3793 : target_list);
3794 :
3795 39 : forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
3796 : {
3797 21 : List *gset = (List *) lfirst(lc);
3798 21 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
3799 21 : double numGroups = estimate_num_groups(root,
3800 : groupExprs,
3801 : path_rows,
3802 : &gset,
3803 : NULL);
3804 :
3805 21 : gs->numGroups = numGroups;
3806 21 : gd->dNumHashGroups += numGroups;
3807 : }
3808 :
3809 18 : dNumGroups += gd->dNumHashGroups;
3810 : }
3811 : }
3812 : else
3813 : {
3814 : /* Plain GROUP BY -- estimate based on optimized groupClause */
3815 4736 : groupExprs = get_sortgrouplist_exprs(root->processed_groupClause,
3816 : target_list);
3817 :
3818 4736 : dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
3819 : NULL, NULL);
3820 : }
3821 : }
3822 21975 : else if (parse->groupingSets)
3823 : {
3824 : /* Empty grouping sets ... one result row for each one */
3825 30 : dNumGroups = list_length(parse->groupingSets);
3826 : }
3827 21945 : else if (parse->hasAggs || root->hasHavingQual)
3828 : {
3829 : /* Plain aggregation, one result row */
3830 21945 : dNumGroups = 1;
3831 : }
3832 : else
3833 : {
3834 : /* Not grouping */
3835 0 : dNumGroups = 1;
3836 : }
3837 :
3838 27179 : return dNumGroups;
3839 : }
3840 :
3841 : /*
3842 : * create_grouping_paths
3843 : *
3844 : * Build a new upperrel containing Paths for grouping and/or aggregation.
3845 : * Along the way, we also build an upperrel for Paths which are partially
3846 : * grouped and/or aggregated. A partially grouped and/or aggregated path
3847 : * needs a FinalizeAggregate node to complete the aggregation. Currently,
3848 : * the only partially grouped paths we build are also partial paths; that
3849 : * is, they need a Gather and then a FinalizeAggregate.
3850 : *
3851 : * input_rel: contains the source-data Paths
3852 : * target: the pathtarget for the result Paths to compute
3853 : * gd: grouping sets data including list of grouping sets and their clauses
3854 : *
3855 : * Note: all Paths in input_rel are expected to return the target computed
3856 : * by make_group_input_target.
3857 : */
3858 : static RelOptInfo *
3859 22981 : create_grouping_paths(PlannerInfo *root,
3860 : RelOptInfo *input_rel,
3861 : PathTarget *target,
3862 : bool target_parallel_safe,
3863 : grouping_sets_data *gd)
3864 : {
3865 22981 : Query *parse = root->parse;
3866 : RelOptInfo *grouped_rel;
3867 : RelOptInfo *partially_grouped_rel;
3868 : AggClauseCosts agg_costs;
3869 :
3870 137886 : MemSet(&agg_costs, 0, sizeof(AggClauseCosts));
3871 22981 : get_agg_clause_costs(root, AGGSPLIT_SIMPLE, &agg_costs);
3872 :
3873 : /*
3874 : * Create grouping relation to hold fully aggregated grouping and/or
3875 : * aggregation paths.
3876 : */
3877 22981 : grouped_rel = make_grouping_rel(root, input_rel, target,
3878 : target_parallel_safe, parse->havingQual);
3879 :
3880 : /*
3881 : * Create either paths for a degenerate grouping or paths for ordinary
3882 : * grouping, as appropriate.
3883 : */
3884 22981 : if (is_degenerate_grouping(root))
3885 21 : create_degenerate_grouping_paths(root, input_rel, grouped_rel);
3886 : else
3887 : {
3888 22960 : int flags = 0;
3889 : GroupPathExtraData extra;
3890 :
3891 : /*
3892 : * Determine whether it's possible to perform sort-based
3893 : * implementations of grouping. (Note that if processed_groupClause
3894 : * is empty, grouping_is_sortable() is trivially true, and all the
3895 : * pathkeys_contained_in() tests will succeed too, so that we'll
3896 : * consider every surviving input path.)
3897 : *
3898 : * If we have grouping sets, we might be able to sort some but not all
3899 : * of them; in this case, we need can_sort to be true as long as we
3900 : * must consider any sorted-input plan.
3901 : */
3902 22960 : if ((gd && gd->rollups != NIL)
3903 22465 : || grouping_is_sortable(root->processed_groupClause))
3904 22957 : flags |= GROUPING_CAN_USE_SORT;
3905 :
3906 : /*
3907 : * Determine whether we should consider hash-based implementations of
3908 : * grouping.
3909 : *
3910 : * Hashed aggregation only applies if we're grouping. If we have
3911 : * grouping sets, some groups might be hashable but others not; in
3912 : * this case we set can_hash true as long as there is nothing globally
3913 : * preventing us from hashing (and we should therefore consider plans
3914 : * with hashes).
3915 : *
3916 : * Executor doesn't support hashed aggregation with DISTINCT or ORDER
3917 : * BY aggregates. (Doing so would imply storing *all* the input
3918 : * values in the hash table, and/or running many sorts in parallel,
3919 : * either of which seems like a certain loser.) We similarly don't
3920 : * support ordered-set aggregates in hashed aggregation, but that case
3921 : * is also included in the numOrderedAggs count.
3922 : *
3923 : * Note: grouping_is_hashable() is much more expensive to check than
3924 : * the other gating conditions, so we want to do it last.
3925 : */
3926 22960 : if ((parse->groupClause != NIL &&
3927 4480 : root->numOrderedAggs == 0 &&
3928 1945 : (gd ? gd->any_hashable : grouping_is_hashable(root->processed_groupClause))))
3929 2393 : flags |= GROUPING_CAN_USE_HASH;
3930 :
3931 : /*
3932 : * Determine whether partial aggregation is possible.
3933 : */
3934 22960 : if (can_partial_agg(root))
3935 20511 : flags |= GROUPING_CAN_PARTIAL_AGG;
3936 :
3937 22960 : extra.flags = flags;
3938 22960 : extra.target_parallel_safe = target_parallel_safe;
3939 22960 : extra.havingQual = parse->havingQual;
3940 22960 : extra.targetList = parse->targetList;
3941 22960 : extra.partial_costs_set = false;
3942 :
3943 : /*
3944 : * Determine whether partitionwise aggregation is in theory possible.
3945 : * It can be disabled by the user, and for now, we don't try to
3946 : * support grouping sets. create_ordinary_grouping_paths() will check
3947 : * additional conditions, such as whether input_rel is partitioned.
3948 : */
3949 22960 : if (enable_partitionwise_aggregate && !parse->groupingSets)
3950 350 : extra.patype = PARTITIONWISE_AGGREGATE_FULL;
3951 : else
3952 22610 : extra.patype = PARTITIONWISE_AGGREGATE_NONE;
3953 :
3954 22960 : create_ordinary_grouping_paths(root, input_rel, grouped_rel,
3955 : &agg_costs, gd, &extra,
3956 : &partially_grouped_rel);
3957 : }
3958 :
3959 22978 : set_cheapest(grouped_rel);
3960 22978 : return grouped_rel;
3961 : }
3962 :
3963 : /*
3964 : * make_grouping_rel
3965 : *
3966 : * Create a new grouping rel and set basic properties.
3967 : *
3968 : * input_rel represents the underlying scan/join relation.
3969 : * target is the output expected from the grouping relation.
3970 : */
3971 : static RelOptInfo *
3972 24064 : make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
3973 : PathTarget *target, bool target_parallel_safe,
3974 : Node *havingQual)
3975 : {
3976 : RelOptInfo *grouped_rel;
3977 :
3978 24064 : if (IS_OTHER_REL(input_rel))
3979 : {
3980 1083 : grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
3981 : input_rel->relids);
3982 1083 : grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
3983 : }
3984 : else
3985 : {
3986 : /*
3987 : * By tradition, the relids set for the main grouping relation is
3988 : * NULL. (This could be changed, but might require adjustments
3989 : * elsewhere.)
3990 : */
3991 22981 : grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
3992 : }
3993 :
3994 : /* Set target. */
3995 24064 : grouped_rel->reltarget = target;
3996 :
3997 : /*
3998 : * If the input relation is not parallel-safe, then the grouped relation
3999 : * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
4000 : * target list and HAVING quals are parallel-safe.
4001 : */
4002 38818 : if (input_rel->consider_parallel && target_parallel_safe &&
4003 14754 : is_parallel_safe(root, havingQual))
4004 14742 : grouped_rel->consider_parallel = true;
4005 :
4006 : /* Assume that the same path generation strategies are allowed */
4007 24064 : grouped_rel->pgs_mask = input_rel->pgs_mask;
4008 :
4009 : /*
4010 : * If the input rel belongs to a single FDW, so does the grouped rel.
4011 : */
4012 24064 : grouped_rel->serverid = input_rel->serverid;
4013 24064 : grouped_rel->userid = input_rel->userid;
4014 24064 : grouped_rel->useridiscurrent = input_rel->useridiscurrent;
4015 24064 : grouped_rel->fdwroutine = input_rel->fdwroutine;
4016 :
4017 24064 : return grouped_rel;
4018 : }
4019 :
4020 : /*
4021 : * is_degenerate_grouping
4022 : *
4023 : * A degenerate grouping is one in which the query has a HAVING qual and/or
4024 : * grouping sets, but no aggregates and no GROUP BY (which implies that the
4025 : * grouping sets are all empty).
4026 : */
4027 : static bool
4028 22981 : is_degenerate_grouping(PlannerInfo *root)
4029 : {
4030 22981 : Query *parse = root->parse;
4031 :
4032 22356 : return (root->hasHavingQual || parse->groupingSets) &&
4033 45337 : !parse->hasAggs && parse->groupClause == NIL;
4034 : }
4035 :
4036 : /*
4037 : * create_degenerate_grouping_paths
4038 : *
4039 : * When the grouping is degenerate (see is_degenerate_grouping), we are
4040 : * supposed to emit either zero or one row for each grouping set depending on
4041 : * whether HAVING succeeds. Furthermore, there cannot be any variables in
4042 : * either HAVING or the targetlist, so we actually do not need the FROM table
4043 : * at all! We can just throw away the plan-so-far and generate a Result node.
4044 : * This is a sufficiently unusual corner case that it's not worth contorting
4045 : * the structure of this module to avoid having to generate the earlier paths
4046 : * in the first place.
4047 : */
4048 : static void
4049 21 : create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
4050 : RelOptInfo *grouped_rel)
4051 : {
4052 21 : Query *parse = root->parse;
4053 : int nrows;
4054 : Path *path;
4055 :
4056 21 : nrows = list_length(parse->groupingSets);
4057 21 : if (nrows > 1)
4058 : {
4059 : /*
4060 : * Doesn't seem worthwhile writing code to cons up a generate_series
4061 : * or a values scan to emit multiple rows. Instead just make N clones
4062 : * and append them. (With a volatile HAVING clause, this means you
4063 : * might get between 0 and N output rows. Offhand I think that's
4064 : * desired.)
4065 : */
4066 6 : AppendPathInput append = {0};
4067 :
4068 18 : while (--nrows >= 0)
4069 : {
4070 : path = (Path *)
4071 12 : create_group_result_path(root, grouped_rel,
4072 12 : grouped_rel->reltarget,
4073 12 : (List *) parse->havingQual);
4074 12 : append.subpaths = lappend(append.subpaths, path);
4075 : }
4076 : path = (Path *)
4077 6 : create_append_path(root,
4078 : grouped_rel,
4079 : append,
4080 : NIL,
4081 : NULL,
4082 : 0,
4083 : false,
4084 : -1);
4085 : }
4086 : else
4087 : {
4088 : /* No grouping sets, or just one, so one output row */
4089 : path = (Path *)
4090 15 : create_group_result_path(root, grouped_rel,
4091 15 : grouped_rel->reltarget,
4092 15 : (List *) parse->havingQual);
4093 : }
4094 :
4095 21 : add_path(grouped_rel, path);
4096 21 : }
4097 :
4098 : /*
4099 : * create_ordinary_grouping_paths
4100 : *
4101 : * Create grouping paths for the ordinary (that is, non-degenerate) case.
4102 : *
4103 : * We need to consider sorted and hashed aggregation in the same function,
4104 : * because otherwise (1) it would be harder to throw an appropriate error
4105 : * message if neither way works, and (2) we should not allow hashtable size
4106 : * considerations to dissuade us from using hashing if sorting is not possible.
4107 : *
4108 : * *partially_grouped_rel_p will be set to the partially grouped rel which this
4109 : * function creates, or to NULL if it doesn't create one.
4110 : */
4111 : static void
4112 24043 : create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
4113 : RelOptInfo *grouped_rel,
4114 : const AggClauseCosts *agg_costs,
4115 : grouping_sets_data *gd,
4116 : GroupPathExtraData *extra,
4117 : RelOptInfo **partially_grouped_rel_p)
4118 : {
4119 24043 : RelOptInfo *partially_grouped_rel = NULL;
4120 24043 : PartitionwiseAggregateType patype = PARTITIONWISE_AGGREGATE_NONE;
4121 :
4122 : /*
4123 : * If this is the topmost grouping relation or if the parent relation is
4124 : * doing some form of partitionwise aggregation, then we may be able to do
4125 : * it at this level also. However, if the input relation is not
4126 : * partitioned, partitionwise aggregate is impossible.
4127 : */
4128 24043 : if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
4129 1433 : IS_PARTITIONED_REL(input_rel))
4130 : {
4131 : /*
4132 : * If this is the topmost relation or if the parent relation is doing
4133 : * full partitionwise aggregation, then we can do full partitionwise
4134 : * aggregation provided that the GROUP BY clause contains all of the
4135 : * partitioning columns at this level and the collation used by GROUP
4136 : * BY matches the partitioning collation. Otherwise, we can do at
4137 : * most partial partitionwise aggregation. But if partial aggregation
4138 : * is not supported in general then we can't use it for partitionwise
4139 : * aggregation either.
4140 : *
4141 : * Check parse->groupClause not processed_groupClause, because it's
4142 : * okay if some of the partitioning columns were proved redundant.
4143 : */
4144 820 : if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
4145 386 : group_by_has_partkey(input_rel, extra->targetList,
4146 386 : root->parse->groupClause))
4147 244 : patype = PARTITIONWISE_AGGREGATE_FULL;
4148 190 : else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4149 169 : patype = PARTITIONWISE_AGGREGATE_PARTIAL;
4150 : else
4151 21 : patype = PARTITIONWISE_AGGREGATE_NONE;
4152 : }
4153 :
4154 : /*
4155 : * Before generating paths for grouped_rel, we first generate any possible
4156 : * partially grouped paths; that way, later code can easily consider both
4157 : * parallel and non-parallel approaches to grouping.
4158 : */
4159 24043 : if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
4160 : {
4161 : bool force_rel_creation;
4162 :
4163 : /*
4164 : * If we're doing partitionwise aggregation at this level, force
4165 : * creation of a partially_grouped_rel so we can add partitionwise
4166 : * paths to it.
4167 : */
4168 21558 : force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
4169 :
4170 : partially_grouped_rel =
4171 21558 : create_partial_grouping_paths(root,
4172 : grouped_rel,
4173 : input_rel,
4174 : gd,
4175 : extra,
4176 : force_rel_creation);
4177 : }
4178 :
4179 : /* Set out parameter. */
4180 24043 : *partially_grouped_rel_p = partially_grouped_rel;
4181 :
4182 : /* Apply partitionwise aggregation technique, if possible. */
4183 24043 : if (patype != PARTITIONWISE_AGGREGATE_NONE)
4184 413 : create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
4185 : partially_grouped_rel, agg_costs,
4186 : gd, patype, extra);
4187 :
4188 : /* If we are doing partial aggregation only, return. */
4189 24043 : if (extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
4190 : {
4191 : Assert(partially_grouped_rel);
4192 :
4193 429 : if (partially_grouped_rel->pathlist)
4194 429 : set_cheapest(partially_grouped_rel);
4195 :
4196 429 : return;
4197 : }
4198 :
4199 : /* Gather any partially grouped partial paths. */
4200 23614 : if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
4201 1385 : gather_grouping_paths(root, partially_grouped_rel);
4202 :
4203 : /* Now choose the best path(s) for partially_grouped_rel. */
4204 23614 : if (partially_grouped_rel && partially_grouped_rel->pathlist)
4205 1499 : set_cheapest(partially_grouped_rel);
4206 :
4207 : /* Build final grouping paths */
4208 23614 : add_paths_to_grouping_rel(root, input_rel, grouped_rel,
4209 : partially_grouped_rel, agg_costs, gd,
4210 : extra);
4211 :
4212 : /* Give a helpful error if we failed to find any implementation */
4213 23614 : if (grouped_rel->pathlist == NIL)
4214 3 : ereport(ERROR,
4215 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4216 : errmsg("could not implement GROUP BY"),
4217 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4218 :
4219 : /*
4220 : * If there is an FDW that's responsible for all baserels of the query,
4221 : * let it consider adding ForeignPaths.
4222 : */
4223 23611 : if (grouped_rel->fdwroutine &&
4224 169 : grouped_rel->fdwroutine->GetForeignUpperPaths)
4225 168 : grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
4226 : input_rel, grouped_rel,
4227 : extra);
4228 :
4229 : /* Let extensions possibly add some more paths */
4230 23611 : if (create_upper_paths_hook)
4231 0 : (*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
4232 : input_rel, grouped_rel,
4233 : extra);
4234 : }
4235 :
4236 : /*
4237 : * For a given input path, consider the possible ways of doing grouping sets on
4238 : * it, by combinations of hashing and sorting. This can be called multiple
4239 : * times, so it's important that it not scribble on input. No result is
4240 : * returned, but any generated paths are added to grouped_rel.
4241 : */
4242 : static void
4243 981 : consider_groupingsets_paths(PlannerInfo *root,
4244 : RelOptInfo *grouped_rel,
4245 : Path *path,
4246 : bool is_sorted,
4247 : bool can_hash,
4248 : grouping_sets_data *gd,
4249 : const AggClauseCosts *agg_costs,
4250 : double dNumGroups)
4251 : {
4252 981 : Query *parse = root->parse;
4253 981 : Size hash_mem_limit = get_hash_memory_limit();
4254 :
4255 : /*
4256 : * If we're not being offered sorted input, then only consider plans that
4257 : * can be done entirely by hashing.
4258 : *
4259 : * We can hash everything if it looks like it'll fit in hash_mem. But if
4260 : * the input is actually sorted despite not being advertised as such, we
4261 : * prefer to make use of that in order to use less memory.
4262 : *
4263 : * If none of the grouping sets are sortable, then ignore the hash_mem
4264 : * limit and generate a path anyway, since otherwise we'll just fail.
4265 : */
4266 981 : if (!is_sorted)
4267 : {
4268 450 : List *new_rollups = NIL;
4269 450 : RollupData *unhashed_rollup = NULL;
4270 : List *sets_data;
4271 450 : List *empty_sets_data = NIL;
4272 450 : List *empty_sets = NIL;
4273 : ListCell *lc;
4274 450 : ListCell *l_start = list_head(gd->rollups);
4275 450 : AggStrategy strat = AGG_HASHED;
4276 : double hashsize;
4277 450 : double exclude_groups = 0.0;
4278 :
4279 : Assert(can_hash);
4280 :
4281 : /*
4282 : * If the input is coincidentally sorted usefully (which can happen
4283 : * even if is_sorted is false, since that only means that our caller
4284 : * has set up the sorting for us), then save some hashtable space by
4285 : * making use of that. But we need to watch out for degenerate cases:
4286 : *
4287 : * 1) If there are any empty grouping sets, then group_pathkeys might
4288 : * be NIL if all non-empty grouping sets are unsortable. In this case,
4289 : * there will be a rollup containing only empty groups, and the
4290 : * pathkeys_contained_in test is vacuously true; this is ok.
4291 : *
4292 : * XXX: the above relies on the fact that group_pathkeys is generated
4293 : * from the first rollup. If we add the ability to consider multiple
4294 : * sort orders for grouping input, this assumption might fail.
4295 : *
4296 : * 2) If there are no empty sets and only unsortable sets, then the
4297 : * rollups list will be empty (and thus l_start == NULL), and
4298 : * group_pathkeys will be NIL; we must ensure that the vacuously-true
4299 : * pathkeys_contained_in test doesn't cause us to crash.
4300 : */
4301 897 : if (l_start != NULL &&
4302 447 : pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
4303 : {
4304 18 : unhashed_rollup = lfirst_node(RollupData, l_start);
4305 18 : exclude_groups = unhashed_rollup->numGroups;
4306 18 : l_start = lnext(gd->rollups, l_start);
4307 : }
4308 :
4309 450 : hashsize = estimate_hashagg_tablesize(root,
4310 : path,
4311 : agg_costs,
4312 : dNumGroups - exclude_groups);
4313 :
4314 : /*
4315 : * gd->rollups is empty if we have only unsortable columns to work
4316 : * with. Override hash_mem in that case; otherwise, we'll rely on the
4317 : * sorted-input case to generate usable mixed paths.
4318 : */
4319 450 : if (hashsize > hash_mem_limit && gd->rollups)
4320 9 : return; /* nope, won't fit */
4321 :
4322 : /*
4323 : * We need to burst the existing rollups list into individual grouping
4324 : * sets and recompute a groupClause for each set.
4325 : */
4326 441 : sets_data = list_copy(gd->unsortable_sets);
4327 :
4328 1114 : for_each_cell(lc, gd->rollups, l_start)
4329 : {
4330 685 : RollupData *rollup = lfirst_node(RollupData, lc);
4331 :
4332 : /*
4333 : * If we find an unhashable rollup that's not been skipped by the
4334 : * "actually sorted" check above, we can't cope; we'd need sorted
4335 : * input (with a different sort order) but we can't get that here.
4336 : * So bail out; we'll get a valid path from the is_sorted case
4337 : * instead.
4338 : *
4339 : * The mere presence of empty grouping sets doesn't make a rollup
4340 : * unhashable (see preprocess_grouping_sets), we handle those
4341 : * specially below.
4342 : */
4343 685 : if (!rollup->hashable)
4344 12 : return;
4345 :
4346 673 : sets_data = list_concat(sets_data, rollup->gsets_data);
4347 : }
4348 1740 : foreach(lc, sets_data)
4349 : {
4350 1311 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
4351 1311 : List *gset = gs->set;
4352 : RollupData *rollup;
4353 :
4354 1311 : if (gset == NIL)
4355 : {
4356 : /* Empty grouping sets can't be hashed. */
4357 265 : empty_sets_data = lappend(empty_sets_data, gs);
4358 265 : empty_sets = lappend(empty_sets, NIL);
4359 : }
4360 : else
4361 : {
4362 1046 : rollup = makeNode(RollupData);
4363 :
4364 1046 : rollup->groupClause = preprocess_groupclause(root, gset);
4365 1046 : rollup->gsets_data = list_make1(gs);
4366 1046 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4367 : rollup->gsets_data,
4368 : gd->tleref_to_colnum_map);
4369 1046 : rollup->numGroups = gs->numGroups;
4370 1046 : rollup->hashable = true;
4371 1046 : rollup->is_hashed = true;
4372 1046 : new_rollups = lappend(new_rollups, rollup);
4373 : }
4374 : }
4375 :
4376 : /*
4377 : * If we didn't find anything nonempty to hash, then bail. We'll
4378 : * generate a path from the is_sorted case.
4379 : */
4380 429 : if (new_rollups == NIL)
4381 0 : return;
4382 :
4383 : /*
4384 : * If there were empty grouping sets they should have been in the
4385 : * first rollup.
4386 : */
4387 : Assert(!unhashed_rollup || !empty_sets);
4388 :
4389 429 : if (unhashed_rollup)
4390 : {
4391 18 : new_rollups = lappend(new_rollups, unhashed_rollup);
4392 18 : strat = AGG_MIXED;
4393 : }
4394 411 : else if (empty_sets)
4395 : {
4396 241 : RollupData *rollup = makeNode(RollupData);
4397 :
4398 241 : rollup->groupClause = NIL;
4399 241 : rollup->gsets_data = empty_sets_data;
4400 241 : rollup->gsets = empty_sets;
4401 241 : rollup->numGroups = list_length(empty_sets);
4402 241 : rollup->hashable = false;
4403 241 : rollup->is_hashed = false;
4404 241 : new_rollups = lappend(new_rollups, rollup);
4405 241 : strat = AGG_MIXED;
4406 : }
4407 :
4408 429 : add_path(grouped_rel, (Path *)
4409 429 : create_groupingsets_path(root,
4410 : grouped_rel,
4411 : path,
4412 429 : (List *) parse->havingQual,
4413 : strat,
4414 : new_rollups,
4415 : agg_costs));
4416 429 : return;
4417 : }
4418 :
4419 : /*
4420 : * If we have sorted input but nothing we can do with it, bail.
4421 : */
4422 531 : if (gd->rollups == NIL)
4423 0 : return;
4424 :
4425 : /*
4426 : * Given sorted input, we try and make two paths: one sorted and one mixed
4427 : * sort/hash. (We need to try both because hashagg might be disabled, or
4428 : * some columns might not be sortable.)
4429 : *
4430 : * can_hash is passed in as false if some obstacle elsewhere (such as
4431 : * ordered aggs) means that we shouldn't consider hashing at all.
4432 : */
4433 531 : if (can_hash && gd->any_hashable)
4434 : {
4435 483 : List *rollups = NIL;
4436 483 : List *hash_sets = list_copy(gd->unsortable_sets);
4437 483 : double availspace = hash_mem_limit;
4438 : ListCell *lc;
4439 :
4440 : /*
4441 : * Account first for space needed for groups we can't sort at all.
4442 : */
4443 483 : availspace -= estimate_hashagg_tablesize(root,
4444 : path,
4445 : agg_costs,
4446 : gd->dNumHashGroups);
4447 :
4448 483 : if (availspace > 0 && list_length(gd->rollups) > 1)
4449 : {
4450 : double scale;
4451 246 : int num_rollups = list_length(gd->rollups);
4452 : int k_capacity;
4453 246 : int *k_weights = palloc(num_rollups * sizeof(int));
4454 246 : Bitmapset *hash_items = NULL;
4455 : int i;
4456 :
4457 : /*
4458 : * We treat this as a knapsack problem: the knapsack capacity
4459 : * represents hash_mem, the item weights are the estimated memory
4460 : * usage of the hashtables needed to implement a single rollup,
4461 : * and we really ought to use the cost saving as the item value;
4462 : * however, currently the costs assigned to sort nodes don't
4463 : * reflect the comparison costs well, and so we treat all items as
4464 : * of equal value (each rollup we hash instead saves us one sort).
4465 : *
4466 : * To use the discrete knapsack, we need to scale the values to a
4467 : * reasonably small bounded range. We choose to allow a 5% error
4468 : * margin; we have no more than 4096 rollups in the worst possible
4469 : * case, which with a 5% error margin will require a bit over 42MB
4470 : * of workspace. (Anyone wanting to plan queries that complex had
4471 : * better have the memory for it. In more reasonable cases, with
4472 : * no more than a couple of dozen rollups, the memory usage will
4473 : * be negligible.)
4474 : *
4475 : * k_capacity is naturally bounded, but we clamp the values for
4476 : * scale and weight (below) to avoid overflows or underflows (or
4477 : * uselessly trying to use a scale factor less than 1 byte).
4478 : */
4479 246 : scale = Max(availspace / (20.0 * num_rollups), 1.0);
4480 246 : k_capacity = (int) floor(availspace / scale);
4481 :
4482 : /*
4483 : * We leave the first rollup out of consideration since it's the
4484 : * one that matches the input sort order. We assign indexes "i"
4485 : * to only those entries considered for hashing; the second loop,
4486 : * below, must use the same condition.
4487 : */
4488 246 : i = 0;
4489 624 : for_each_from(lc, gd->rollups, 1)
4490 : {
4491 378 : RollupData *rollup = lfirst_node(RollupData, lc);
4492 :
4493 378 : if (rollup->hashable)
4494 : {
4495 378 : double sz = estimate_hashagg_tablesize(root,
4496 : path,
4497 : agg_costs,
4498 : rollup->numGroups);
4499 :
4500 : /*
4501 : * If sz is enormous, but hash_mem (and hence scale) is
4502 : * small, avoid integer overflow here.
4503 : */
4504 378 : k_weights[i] = (int) Min(floor(sz / scale),
4505 : k_capacity + 1.0);
4506 378 : ++i;
4507 : }
4508 : }
4509 :
4510 : /*
4511 : * Apply knapsack algorithm; compute the set of items which
4512 : * maximizes the value stored (in this case the number of sorts
4513 : * saved) while keeping the total size (approximately) within
4514 : * capacity.
4515 : */
4516 246 : if (i > 0)
4517 246 : hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
4518 :
4519 246 : if (!bms_is_empty(hash_items))
4520 : {
4521 246 : rollups = list_make1(linitial(gd->rollups));
4522 :
4523 246 : i = 0;
4524 624 : for_each_from(lc, gd->rollups, 1)
4525 : {
4526 378 : RollupData *rollup = lfirst_node(RollupData, lc);
4527 :
4528 378 : if (rollup->hashable)
4529 : {
4530 378 : if (bms_is_member(i, hash_items))
4531 360 : hash_sets = list_concat(hash_sets,
4532 360 : rollup->gsets_data);
4533 : else
4534 18 : rollups = lappend(rollups, rollup);
4535 378 : ++i;
4536 : }
4537 : else
4538 0 : rollups = lappend(rollups, rollup);
4539 : }
4540 : }
4541 : }
4542 :
4543 483 : if (!rollups && hash_sets)
4544 12 : rollups = list_copy(gd->rollups);
4545 :
4546 913 : foreach(lc, hash_sets)
4547 : {
4548 430 : GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
4549 430 : RollupData *rollup = makeNode(RollupData);
4550 :
4551 : Assert(gs->set != NIL);
4552 :
4553 430 : rollup->groupClause = preprocess_groupclause(root, gs->set);
4554 430 : rollup->gsets_data = list_make1(gs);
4555 430 : rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
4556 : rollup->gsets_data,
4557 : gd->tleref_to_colnum_map);
4558 430 : rollup->numGroups = gs->numGroups;
4559 430 : rollup->hashable = true;
4560 430 : rollup->is_hashed = true;
4561 430 : rollups = lcons(rollup, rollups);
4562 : }
4563 :
4564 483 : if (rollups)
4565 : {
4566 258 : add_path(grouped_rel, (Path *)
4567 258 : create_groupingsets_path(root,
4568 : grouped_rel,
4569 : path,
4570 258 : (List *) parse->havingQual,
4571 : AGG_MIXED,
4572 : rollups,
4573 : agg_costs));
4574 : }
4575 : }
4576 :
4577 : /*
4578 : * Now try the simple sorted case.
4579 : */
4580 531 : if (!gd->unsortable_sets)
4581 516 : add_path(grouped_rel, (Path *)
4582 516 : create_groupingsets_path(root,
4583 : grouped_rel,
4584 : path,
4585 516 : (List *) parse->havingQual,
4586 : AGG_SORTED,
4587 : gd->rollups,
4588 : agg_costs));
4589 : }
4590 :
4591 : /*
4592 : * create_window_paths
4593 : *
4594 : * Build a new upperrel containing Paths for window-function evaluation.
4595 : *
4596 : * input_rel: contains the source-data Paths
4597 : * input_target: result of make_window_input_target
4598 : * output_target: what the topmost WindowAggPath should return
4599 : * wflists: result of find_window_functions
4600 : * activeWindows: result of select_active_windows
4601 : *
4602 : * Note: all Paths in input_rel are expected to return input_target.
4603 : */
4604 : static RelOptInfo *
4605 1338 : create_window_paths(PlannerInfo *root,
4606 : RelOptInfo *input_rel,
4607 : PathTarget *input_target,
4608 : PathTarget *output_target,
4609 : bool output_target_parallel_safe,
4610 : WindowFuncLists *wflists,
4611 : List *activeWindows)
4612 : {
4613 : RelOptInfo *window_rel;
4614 : ListCell *lc;
4615 :
4616 : /* For now, do all work in the (WINDOW, NULL) upperrel */
4617 1338 : window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
4618 :
4619 : /*
4620 : * If the input relation is not parallel-safe, then the window relation
4621 : * can't be parallel-safe, either. Otherwise, we need to examine the
4622 : * target list and active windows for non-parallel-safe constructs.
4623 : */
4624 1338 : if (input_rel->consider_parallel && output_target_parallel_safe &&
4625 0 : is_parallel_safe(root, (Node *) activeWindows))
4626 0 : window_rel->consider_parallel = true;
4627 :
4628 : /*
4629 : * If the input rel belongs to a single FDW, so does the window rel.
4630 : */
4631 1338 : window_rel->serverid = input_rel->serverid;
4632 1338 : window_rel->userid = input_rel->userid;
4633 1338 : window_rel->useridiscurrent = input_rel->useridiscurrent;
4634 1338 : window_rel->fdwroutine = input_rel->fdwroutine;
4635 :
4636 : /*
4637 : * Consider computing window functions starting from the existing
4638 : * cheapest-total path (which will likely require a sort) as well as any
4639 : * existing paths that satisfy or partially satisfy root->window_pathkeys.
4640 : */
4641 2846 : foreach(lc, input_rel->pathlist)
4642 : {
4643 1508 : Path *path = (Path *) lfirst(lc);
4644 : int presorted_keys;
4645 :
4646 1678 : if (path == input_rel->cheapest_total_path ||
4647 170 : pathkeys_count_contained_in(root->window_pathkeys, path->pathkeys,
4648 76 : &presorted_keys) ||
4649 76 : presorted_keys > 0)
4650 1445 : create_one_window_path(root,
4651 : window_rel,
4652 : path,
4653 : input_target,
4654 : output_target,
4655 : wflists,
4656 : activeWindows);
4657 : }
4658 :
4659 : /*
4660 : * If there is an FDW that's responsible for all baserels of the query,
4661 : * let it consider adding ForeignPaths.
4662 : */
4663 1338 : if (window_rel->fdwroutine &&
4664 6 : window_rel->fdwroutine->GetForeignUpperPaths)
4665 6 : window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
4666 : input_rel, window_rel,
4667 : NULL);
4668 :
4669 : /* Let extensions possibly add some more paths */
4670 1338 : if (create_upper_paths_hook)
4671 0 : (*create_upper_paths_hook) (root, UPPERREL_WINDOW,
4672 : input_rel, window_rel, NULL);
4673 :
4674 : /* Now choose the best path(s) */
4675 1338 : set_cheapest(window_rel);
4676 :
4677 1338 : return window_rel;
4678 : }
4679 :
4680 : /*
4681 : * Stack window-function implementation steps atop the given Path, and
4682 : * add the result to window_rel.
4683 : *
4684 : * window_rel: upperrel to contain result
4685 : * path: input Path to use (must return input_target)
4686 : * input_target: result of make_window_input_target
4687 : * output_target: what the topmost WindowAggPath should return
4688 : * wflists: result of find_window_functions
4689 : * activeWindows: result of select_active_windows
4690 : */
4691 : static void
4692 1445 : create_one_window_path(PlannerInfo *root,
4693 : RelOptInfo *window_rel,
4694 : Path *path,
4695 : PathTarget *input_target,
4696 : PathTarget *output_target,
4697 : WindowFuncLists *wflists,
4698 : List *activeWindows)
4699 : {
4700 : PathTarget *window_target;
4701 : ListCell *l;
4702 1445 : List *topqual = NIL;
4703 :
4704 : /*
4705 : * Since each window clause could require a different sort order, we stack
4706 : * up a WindowAgg node for each clause, with sort steps between them as
4707 : * needed. (We assume that select_active_windows chose a good order for
4708 : * executing the clauses in.)
4709 : *
4710 : * input_target should contain all Vars and Aggs needed for the result.
4711 : * (In some cases we wouldn't need to propagate all of these all the way
4712 : * to the top, since they might only be needed as inputs to WindowFuncs.
4713 : * It's probably not worth trying to optimize that though.) It must also
4714 : * contain all window partitioning and sorting expressions, to ensure
4715 : * they're computed only once at the bottom of the stack (that's critical
4716 : * for volatile functions). As we climb up the stack, we'll add outputs
4717 : * for the WindowFuncs computed at each level.
4718 : */
4719 1445 : window_target = input_target;
4720 :
4721 2983 : foreach(l, activeWindows)
4722 : {
4723 1538 : WindowClause *wc = lfirst_node(WindowClause, l);
4724 : List *window_pathkeys;
4725 1538 : List *runcondition = NIL;
4726 : int presorted_keys;
4727 : bool is_sorted;
4728 : bool topwindow;
4729 : ListCell *lc2;
4730 :
4731 1538 : window_pathkeys = make_pathkeys_for_window(root,
4732 : wc,
4733 : root->processed_tlist);
4734 :
4735 1538 : is_sorted = pathkeys_count_contained_in(window_pathkeys,
4736 : path->pathkeys,
4737 : &presorted_keys);
4738 :
4739 : /* Sort if necessary */
4740 1538 : if (!is_sorted)
4741 : {
4742 : /*
4743 : * No presorted keys or incremental sort disabled, just perform a
4744 : * complete sort.
4745 : */
4746 1092 : if (presorted_keys == 0 || !enable_incremental_sort)
4747 1061 : path = (Path *) create_sort_path(root, window_rel,
4748 : path,
4749 : window_pathkeys,
4750 : -1.0);
4751 : else
4752 : {
4753 : /*
4754 : * Since we have presorted keys and incremental sort is
4755 : * enabled, just use incremental sort.
4756 : */
4757 31 : path = (Path *) create_incremental_sort_path(root,
4758 : window_rel,
4759 : path,
4760 : window_pathkeys,
4761 : presorted_keys,
4762 : -1.0);
4763 : }
4764 : }
4765 :
4766 1538 : if (lnext(activeWindows, l))
4767 : {
4768 : /*
4769 : * Add the current WindowFuncs to the output target for this
4770 : * intermediate WindowAggPath. We must copy window_target to
4771 : * avoid changing the previous path's target.
4772 : *
4773 : * Note: a WindowFunc adds nothing to the target's eval costs; but
4774 : * we do need to account for the increase in tlist width.
4775 : */
4776 93 : int64 tuple_width = window_target->width;
4777 :
4778 93 : window_target = copy_pathtarget(window_target);
4779 222 : foreach(lc2, wflists->windowFuncs[wc->winref])
4780 : {
4781 129 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4782 :
4783 129 : add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
4784 129 : tuple_width += get_typavgwidth(wfunc->wintype, -1);
4785 : }
4786 93 : window_target->width = clamp_width_est(tuple_width);
4787 : }
4788 : else
4789 : {
4790 : /* Install the goal target in the topmost WindowAgg */
4791 1445 : window_target = output_target;
4792 : }
4793 :
4794 : /* mark the final item in the list as the top-level window */
4795 1538 : topwindow = foreach_current_index(l) == list_length(activeWindows) - 1;
4796 :
4797 : /*
4798 : * Collect the WindowFuncRunConditions from each WindowFunc and
4799 : * convert them into OpExprs
4800 : */
4801 3517 : foreach(lc2, wflists->windowFuncs[wc->winref])
4802 : {
4803 : ListCell *lc3;
4804 1979 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
4805 :
4806 2069 : foreach(lc3, wfunc->runCondition)
4807 : {
4808 90 : WindowFuncRunCondition *wfuncrc =
4809 : lfirst_node(WindowFuncRunCondition, lc3);
4810 : Expr *opexpr;
4811 : Expr *leftop;
4812 : Expr *rightop;
4813 :
4814 90 : if (wfuncrc->wfunc_left)
4815 : {
4816 81 : leftop = (Expr *) copyObject(wfunc);
4817 81 : rightop = copyObject(wfuncrc->arg);
4818 : }
4819 : else
4820 : {
4821 9 : leftop = copyObject(wfuncrc->arg);
4822 9 : rightop = (Expr *) copyObject(wfunc);
4823 : }
4824 :
4825 90 : opexpr = make_opclause(wfuncrc->opno,
4826 : BOOLOID,
4827 : false,
4828 : leftop,
4829 : rightop,
4830 : InvalidOid,
4831 : wfuncrc->inputcollid);
4832 :
4833 90 : runcondition = lappend(runcondition, opexpr);
4834 :
4835 90 : if (!topwindow)
4836 12 : topqual = lappend(topqual, opexpr);
4837 : }
4838 : }
4839 :
4840 : path = (Path *)
4841 1538 : create_windowagg_path(root, window_rel, path, window_target,
4842 1538 : wflists->windowFuncs[wc->winref],
4843 : runcondition, wc,
4844 : topwindow ? topqual : NIL, topwindow);
4845 : }
4846 :
4847 1445 : add_path(window_rel, path);
4848 1445 : }
4849 :
4850 : /*
4851 : * create_distinct_paths
4852 : *
4853 : * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
4854 : *
4855 : * input_rel: contains the source-data Paths
4856 : * target: the pathtarget for the result Paths to compute
4857 : *
4858 : * Note: input paths should already compute the desired pathtarget, since
4859 : * Sort/Unique won't project anything.
4860 : */
4861 : static RelOptInfo *
4862 1507 : create_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4863 : PathTarget *target)
4864 : {
4865 : RelOptInfo *distinct_rel;
4866 :
4867 : /* For now, do all work in the (DISTINCT, NULL) upperrel */
4868 1507 : distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
4869 :
4870 : /*
4871 : * We don't compute anything at this level, so distinct_rel will be
4872 : * parallel-safe if the input rel is parallel-safe. In particular, if
4873 : * there is a DISTINCT ON (...) clause, any path for the input_rel will
4874 : * output those expressions, and will not be parallel-safe unless those
4875 : * expressions are parallel-safe.
4876 : */
4877 1507 : distinct_rel->consider_parallel = input_rel->consider_parallel;
4878 :
4879 : /*
4880 : * If the input rel belongs to a single FDW, so does the distinct_rel.
4881 : */
4882 1507 : distinct_rel->serverid = input_rel->serverid;
4883 1507 : distinct_rel->userid = input_rel->userid;
4884 1507 : distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4885 1507 : distinct_rel->fdwroutine = input_rel->fdwroutine;
4886 :
4887 : /* build distinct paths based on input_rel's pathlist */
4888 1507 : create_final_distinct_paths(root, input_rel, distinct_rel);
4889 :
4890 : /* now build distinct paths based on input_rel's partial_pathlist */
4891 1507 : create_partial_distinct_paths(root, input_rel, distinct_rel, target);
4892 :
4893 : /* Give a helpful error if we failed to create any paths */
4894 1507 : if (distinct_rel->pathlist == NIL)
4895 0 : ereport(ERROR,
4896 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
4897 : errmsg("could not implement DISTINCT"),
4898 : errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
4899 :
4900 : /*
4901 : * If there is an FDW that's responsible for all baserels of the query,
4902 : * let it consider adding ForeignPaths.
4903 : */
4904 1507 : if (distinct_rel->fdwroutine &&
4905 8 : distinct_rel->fdwroutine->GetForeignUpperPaths)
4906 8 : distinct_rel->fdwroutine->GetForeignUpperPaths(root,
4907 : UPPERREL_DISTINCT,
4908 : input_rel,
4909 : distinct_rel,
4910 : NULL);
4911 :
4912 : /* Let extensions possibly add some more paths */
4913 1507 : if (create_upper_paths_hook)
4914 0 : (*create_upper_paths_hook) (root, UPPERREL_DISTINCT, input_rel,
4915 : distinct_rel, NULL);
4916 :
4917 : /* Now choose the best path(s) */
4918 1507 : set_cheapest(distinct_rel);
4919 :
4920 1507 : return distinct_rel;
4921 : }
4922 :
4923 : /*
4924 : * create_partial_distinct_paths
4925 : *
4926 : * Process 'input_rel' partial paths and add unique/aggregate paths to the
4927 : * UPPERREL_PARTIAL_DISTINCT rel. For paths created, add Gather/GatherMerge
4928 : * paths on top and add a final unique/aggregate path to remove any duplicate
4929 : * produced from combining rows from parallel workers.
4930 : */
4931 : static void
4932 1507 : create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
4933 : RelOptInfo *final_distinct_rel,
4934 : PathTarget *target)
4935 : {
4936 : RelOptInfo *partial_distinct_rel;
4937 : Query *parse;
4938 : List *distinctExprs;
4939 : double numDistinctRows;
4940 : Path *cheapest_partial_path;
4941 : ListCell *lc;
4942 :
4943 : /* nothing to do when there are no partial paths in the input rel */
4944 1507 : if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
4945 1453 : return;
4946 :
4947 54 : parse = root->parse;
4948 :
4949 : /* can't do parallel DISTINCT ON */
4950 54 : if (parse->hasDistinctOn)
4951 0 : return;
4952 :
4953 54 : partial_distinct_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_DISTINCT,
4954 : NULL);
4955 54 : partial_distinct_rel->reltarget = target;
4956 54 : partial_distinct_rel->consider_parallel = input_rel->consider_parallel;
4957 :
4958 : /*
4959 : * If input_rel belongs to a single FDW, so does the partial_distinct_rel.
4960 : */
4961 54 : partial_distinct_rel->serverid = input_rel->serverid;
4962 54 : partial_distinct_rel->userid = input_rel->userid;
4963 54 : partial_distinct_rel->useridiscurrent = input_rel->useridiscurrent;
4964 54 : partial_distinct_rel->fdwroutine = input_rel->fdwroutine;
4965 :
4966 54 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
4967 :
4968 54 : distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
4969 : parse->targetList);
4970 :
4971 : /* estimate how many distinct rows we'll get from each worker */
4972 54 : numDistinctRows = estimate_num_groups(root, distinctExprs,
4973 : cheapest_partial_path->rows,
4974 : NULL, NULL);
4975 :
4976 : /*
4977 : * Try sorting the cheapest path and incrementally sorting any paths with
4978 : * presorted keys and put a unique paths atop of those. We'll also
4979 : * attempt to reorder the required pathkeys to match the input path's
4980 : * pathkeys as much as possible, in hopes of avoiding a possible need to
4981 : * re-sort.
4982 : */
4983 54 : if (grouping_is_sortable(root->processed_distinctClause))
4984 : {
4985 117 : foreach(lc, input_rel->partial_pathlist)
4986 : {
4987 63 : Path *input_path = (Path *) lfirst(lc);
4988 : Path *sorted_path;
4989 63 : List *useful_pathkeys_list = NIL;
4990 :
4991 : useful_pathkeys_list =
4992 63 : get_useful_pathkeys_for_distinct(root,
4993 : root->distinct_pathkeys,
4994 : input_path->pathkeys);
4995 : Assert(list_length(useful_pathkeys_list) > 0);
4996 :
4997 195 : foreach_node(List, useful_pathkeys, useful_pathkeys_list)
4998 : {
4999 69 : sorted_path = make_ordered_path(root,
5000 : partial_distinct_rel,
5001 : input_path,
5002 : cheapest_partial_path,
5003 : useful_pathkeys,
5004 : -1.0);
5005 :
5006 69 : if (sorted_path == NULL)
5007 6 : continue;
5008 :
5009 : /*
5010 : * An empty distinct_pathkeys means all tuples have the same
5011 : * value for the DISTINCT clause. See
5012 : * create_final_distinct_paths()
5013 : */
5014 63 : if (root->distinct_pathkeys == NIL)
5015 : {
5016 : Node *limitCount;
5017 :
5018 3 : limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5019 : sizeof(int64),
5020 : Int64GetDatum(1), false,
5021 : true);
5022 :
5023 : /*
5024 : * Apply a LimitPath onto the partial path to restrict the
5025 : * tuples from each worker to 1.
5026 : * create_final_distinct_paths will need to apply an
5027 : * additional LimitPath to restrict this to a single row
5028 : * after the Gather node. If the query already has a
5029 : * LIMIT clause, then we could end up with three Limit
5030 : * nodes in the final plan. Consolidating the top two of
5031 : * these could be done, but does not seem worth troubling
5032 : * over.
5033 : */
5034 3 : add_partial_path(partial_distinct_rel, (Path *)
5035 3 : create_limit_path(root, partial_distinct_rel,
5036 : sorted_path,
5037 : NULL,
5038 : limitCount,
5039 : LIMIT_OPTION_COUNT,
5040 : 0, 1));
5041 : }
5042 : else
5043 : {
5044 60 : add_partial_path(partial_distinct_rel, (Path *)
5045 60 : create_unique_path(root, partial_distinct_rel,
5046 : sorted_path,
5047 60 : list_length(root->distinct_pathkeys),
5048 : numDistinctRows));
5049 : }
5050 : }
5051 : }
5052 : }
5053 :
5054 : /*
5055 : * Now try hash aggregate paths, if enabled and hashing is possible. Since
5056 : * we're not on the hook to ensure we do our best to create at least one
5057 : * path here, we treat enable_hashagg as a hard off-switch rather than the
5058 : * slightly softer variant in create_final_distinct_paths.
5059 : */
5060 54 : if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
5061 : {
5062 39 : add_partial_path(partial_distinct_rel, (Path *)
5063 39 : create_agg_path(root,
5064 : partial_distinct_rel,
5065 : cheapest_partial_path,
5066 : cheapest_partial_path->pathtarget,
5067 : AGG_HASHED,
5068 : AGGSPLIT_SIMPLE,
5069 : root->processed_distinctClause,
5070 : NIL,
5071 : NULL,
5072 : numDistinctRows));
5073 : }
5074 :
5075 : /*
5076 : * If there is an FDW that's responsible for all baserels of the query,
5077 : * let it consider adding ForeignPaths.
5078 : */
5079 54 : if (partial_distinct_rel->fdwroutine &&
5080 0 : partial_distinct_rel->fdwroutine->GetForeignUpperPaths)
5081 0 : partial_distinct_rel->fdwroutine->GetForeignUpperPaths(root,
5082 : UPPERREL_PARTIAL_DISTINCT,
5083 : input_rel,
5084 : partial_distinct_rel,
5085 : NULL);
5086 :
5087 : /* Let extensions possibly add some more partial paths */
5088 54 : if (create_upper_paths_hook)
5089 0 : (*create_upper_paths_hook) (root, UPPERREL_PARTIAL_DISTINCT,
5090 : input_rel, partial_distinct_rel, NULL);
5091 :
5092 54 : if (partial_distinct_rel->partial_pathlist != NIL)
5093 : {
5094 54 : generate_useful_gather_paths(root, partial_distinct_rel, true);
5095 54 : set_cheapest(partial_distinct_rel);
5096 :
5097 : /*
5098 : * Finally, create paths to distinctify the final result. This step
5099 : * is needed to remove any duplicates due to combining rows from
5100 : * parallel workers.
5101 : */
5102 54 : create_final_distinct_paths(root, partial_distinct_rel,
5103 : final_distinct_rel);
5104 : }
5105 : }
5106 :
5107 : /*
5108 : * create_final_distinct_paths
5109 : * Create distinct paths in 'distinct_rel' based on 'input_rel' pathlist
5110 : *
5111 : * input_rel: contains the source-data paths
5112 : * distinct_rel: destination relation for storing created paths
5113 : */
5114 : static RelOptInfo *
5115 1561 : create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
5116 : RelOptInfo *distinct_rel)
5117 : {
5118 1561 : Query *parse = root->parse;
5119 1561 : Path *cheapest_input_path = input_rel->cheapest_total_path;
5120 : double numDistinctRows;
5121 : bool allow_hash;
5122 :
5123 : /* Estimate number of distinct rows there will be */
5124 1561 : if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
5125 1524 : root->hasHavingQual)
5126 : {
5127 : /*
5128 : * If there was grouping or aggregation, use the number of input rows
5129 : * as the estimated number of DISTINCT rows (ie, assume the input is
5130 : * already mostly unique).
5131 : */
5132 37 : numDistinctRows = cheapest_input_path->rows;
5133 : }
5134 : else
5135 : {
5136 : /*
5137 : * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
5138 : */
5139 : List *distinctExprs;
5140 :
5141 1524 : distinctExprs = get_sortgrouplist_exprs(root->processed_distinctClause,
5142 : parse->targetList);
5143 1524 : numDistinctRows = estimate_num_groups(root, distinctExprs,
5144 : cheapest_input_path->rows,
5145 : NULL, NULL);
5146 : }
5147 :
5148 : /*
5149 : * Consider sort-based implementations of DISTINCT, if possible.
5150 : */
5151 1561 : if (grouping_is_sortable(root->processed_distinctClause))
5152 : {
5153 : /*
5154 : * Firstly, if we have any adequately-presorted paths, just stick a
5155 : * Unique node on those. We also, consider doing an explicit sort of
5156 : * the cheapest input path and Unique'ing that. If any paths have
5157 : * presorted keys then we'll create an incremental sort atop of those
5158 : * before adding a unique node on the top. We'll also attempt to
5159 : * reorder the required pathkeys to match the input path's pathkeys as
5160 : * much as possible, in hopes of avoiding a possible need to re-sort.
5161 : *
5162 : * When we have DISTINCT ON, we must sort by the more rigorous of
5163 : * DISTINCT and ORDER BY, else it won't have the desired behavior.
5164 : * Also, if we do have to do an explicit sort, we might as well use
5165 : * the more rigorous ordering to avoid a second sort later. (Note
5166 : * that the parser will have ensured that one clause is a prefix of
5167 : * the other.)
5168 : */
5169 : List *needed_pathkeys;
5170 : ListCell *lc;
5171 1558 : double limittuples = root->distinct_pathkeys == NIL ? 1.0 : -1.0;
5172 :
5173 1682 : if (parse->hasDistinctOn &&
5174 124 : list_length(root->distinct_pathkeys) <
5175 124 : list_length(root->sort_pathkeys))
5176 27 : needed_pathkeys = root->sort_pathkeys;
5177 : else
5178 1531 : needed_pathkeys = root->distinct_pathkeys;
5179 :
5180 4205 : foreach(lc, input_rel->pathlist)
5181 : {
5182 2647 : Path *input_path = (Path *) lfirst(lc);
5183 : Path *sorted_path;
5184 2647 : List *useful_pathkeys_list = NIL;
5185 :
5186 : useful_pathkeys_list =
5187 2647 : get_useful_pathkeys_for_distinct(root,
5188 : needed_pathkeys,
5189 : input_path->pathkeys);
5190 : Assert(list_length(useful_pathkeys_list) > 0);
5191 :
5192 8351 : foreach_node(List, useful_pathkeys, useful_pathkeys_list)
5193 : {
5194 3057 : sorted_path = make_ordered_path(root,
5195 : distinct_rel,
5196 : input_path,
5197 : cheapest_input_path,
5198 : useful_pathkeys,
5199 : limittuples);
5200 :
5201 3057 : if (sorted_path == NULL)
5202 439 : continue;
5203 :
5204 : /*
5205 : * distinct_pathkeys may have become empty if all of the
5206 : * pathkeys were determined to be redundant. If all of the
5207 : * pathkeys are redundant then each DISTINCT target must only
5208 : * allow a single value, therefore all resulting tuples must
5209 : * be identical (or at least indistinguishable by an equality
5210 : * check). We can uniquify these tuples simply by just taking
5211 : * the first tuple. All we do here is add a path to do "LIMIT
5212 : * 1" atop of 'sorted_path'. When doing a DISTINCT ON we may
5213 : * still have a non-NIL sort_pathkeys list, so we must still
5214 : * only do this with paths which are correctly sorted by
5215 : * sort_pathkeys.
5216 : */
5217 2618 : if (root->distinct_pathkeys == NIL)
5218 : {
5219 : Node *limitCount;
5220 :
5221 69 : limitCount = (Node *) makeConst(INT8OID, -1, InvalidOid,
5222 : sizeof(int64),
5223 : Int64GetDatum(1), false,
5224 : true);
5225 :
5226 : /*
5227 : * If the query already has a LIMIT clause, then we could
5228 : * end up with a duplicate LimitPath in the final plan.
5229 : * That does not seem worth troubling over too much.
5230 : */
5231 69 : add_path(distinct_rel, (Path *)
5232 69 : create_limit_path(root, distinct_rel, sorted_path,
5233 : NULL, limitCount,
5234 : LIMIT_OPTION_COUNT, 0, 1));
5235 : }
5236 : else
5237 : {
5238 2549 : add_path(distinct_rel, (Path *)
5239 2549 : create_unique_path(root, distinct_rel,
5240 : sorted_path,
5241 2549 : list_length(root->distinct_pathkeys),
5242 : numDistinctRows));
5243 : }
5244 : }
5245 : }
5246 : }
5247 :
5248 : /*
5249 : * Consider hash-based implementations of DISTINCT, if possible.
5250 : *
5251 : * If we were not able to make any other types of path, we *must* hash or
5252 : * die trying. If we do have other choices, there are two things that
5253 : * should prevent selection of hashing: if the query uses DISTINCT ON
5254 : * (because it won't really have the expected behavior if we hash), or if
5255 : * enable_hashagg is off.
5256 : *
5257 : * Note: grouping_is_hashable() is much more expensive to check than the
5258 : * other gating conditions, so we want to do it last.
5259 : */
5260 1561 : if (distinct_rel->pathlist == NIL)
5261 3 : allow_hash = true; /* we have no alternatives */
5262 1558 : else if (parse->hasDistinctOn || !enable_hashagg)
5263 199 : allow_hash = false; /* policy-based decision not to hash */
5264 : else
5265 1359 : allow_hash = true; /* default */
5266 :
5267 1561 : if (allow_hash && grouping_is_hashable(root->processed_distinctClause))
5268 : {
5269 : /* Generate hashed aggregate path --- no sort needed */
5270 1362 : add_path(distinct_rel, (Path *)
5271 1362 : create_agg_path(root,
5272 : distinct_rel,
5273 : cheapest_input_path,
5274 : cheapest_input_path->pathtarget,
5275 : AGG_HASHED,
5276 : AGGSPLIT_SIMPLE,
5277 : root->processed_distinctClause,
5278 : NIL,
5279 : NULL,
5280 : numDistinctRows));
5281 : }
5282 :
5283 1561 : return distinct_rel;
5284 : }
5285 :
5286 : /*
5287 : * get_useful_pathkeys_for_distinct
5288 : * Get useful orderings of pathkeys for distinctClause by reordering
5289 : * 'needed_pathkeys' to match the given 'path_pathkeys' as much as possible.
5290 : *
5291 : * This returns a list of pathkeys that can be useful for DISTINCT or DISTINCT
5292 : * ON clause. For convenience, it always includes the given 'needed_pathkeys'.
5293 : */
5294 : static List *
5295 2710 : get_useful_pathkeys_for_distinct(PlannerInfo *root, List *needed_pathkeys,
5296 : List *path_pathkeys)
5297 : {
5298 2710 : List *useful_pathkeys_list = NIL;
5299 2710 : List *useful_pathkeys = NIL;
5300 :
5301 : /* always include the given 'needed_pathkeys' */
5302 2710 : useful_pathkeys_list = lappend(useful_pathkeys_list,
5303 : needed_pathkeys);
5304 :
5305 2710 : if (!enable_distinct_reordering)
5306 0 : return useful_pathkeys_list;
5307 :
5308 : /*
5309 : * Scan the given 'path_pathkeys' and construct a list of PathKey nodes
5310 : * that match 'needed_pathkeys', but only up to the longest matching
5311 : * prefix.
5312 : *
5313 : * When we have DISTINCT ON, we must ensure that the resulting pathkey
5314 : * list matches initial distinctClause pathkeys; otherwise, it won't have
5315 : * the desired behavior.
5316 : */
5317 6711 : foreach_node(PathKey, pathkey, path_pathkeys)
5318 : {
5319 : /*
5320 : * The PathKey nodes are canonical, so they can be checked for
5321 : * equality by simple pointer comparison.
5322 : */
5323 1305 : if (!list_member_ptr(needed_pathkeys, pathkey))
5324 5 : break;
5325 1300 : if (root->parse->hasDistinctOn &&
5326 100 : !list_member_ptr(root->distinct_pathkeys, pathkey))
5327 9 : break;
5328 :
5329 1291 : useful_pathkeys = lappend(useful_pathkeys, pathkey);
5330 : }
5331 :
5332 : /* If no match at all, no point in reordering needed_pathkeys */
5333 2710 : if (useful_pathkeys == NIL)
5334 1551 : return useful_pathkeys_list;
5335 :
5336 : /*
5337 : * If not full match, the resulting pathkey list is not useful without
5338 : * incremental sort.
5339 : */
5340 1159 : if (list_length(useful_pathkeys) < list_length(needed_pathkeys) &&
5341 780 : !enable_incremental_sort)
5342 30 : return useful_pathkeys_list;
5343 :
5344 : /* Append the remaining PathKey nodes in needed_pathkeys */
5345 1129 : useful_pathkeys = list_concat_unique_ptr(useful_pathkeys,
5346 : needed_pathkeys);
5347 :
5348 : /*
5349 : * If the resulting pathkey list is the same as the 'needed_pathkeys',
5350 : * just drop it.
5351 : */
5352 1129 : if (compare_pathkeys(needed_pathkeys,
5353 : useful_pathkeys) == PATHKEYS_EQUAL)
5354 713 : return useful_pathkeys_list;
5355 :
5356 416 : useful_pathkeys_list = lappend(useful_pathkeys_list,
5357 : useful_pathkeys);
5358 :
5359 416 : return useful_pathkeys_list;
5360 : }
5361 :
5362 : /*
5363 : * create_ordered_paths
5364 : *
5365 : * Build a new upperrel containing Paths for ORDER BY evaluation.
5366 : *
5367 : * All paths in the result must satisfy the ORDER BY ordering.
5368 : * The only new paths we need consider are an explicit full sort
5369 : * and incremental sort on the cheapest-total existing path.
5370 : *
5371 : * input_rel: contains the source-data Paths
5372 : * target: the output tlist the result Paths must emit
5373 : * limit_tuples: estimated bound on the number of output tuples,
5374 : * or -1 if no LIMIT or couldn't estimate
5375 : *
5376 : * XXX This only looks at sort_pathkeys. I wonder if it needs to look at the
5377 : * other pathkeys (grouping, ...) like generate_useful_gather_paths.
5378 : */
5379 : static RelOptInfo *
5380 38068 : create_ordered_paths(PlannerInfo *root,
5381 : RelOptInfo *input_rel,
5382 : PathTarget *target,
5383 : bool target_parallel_safe,
5384 : double limit_tuples)
5385 : {
5386 38068 : Path *cheapest_input_path = input_rel->cheapest_total_path;
5387 : RelOptInfo *ordered_rel;
5388 : ListCell *lc;
5389 :
5390 : /* For now, do all work in the (ORDERED, NULL) upperrel */
5391 38068 : ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
5392 :
5393 : /*
5394 : * If the input relation is not parallel-safe, then the ordered relation
5395 : * can't be parallel-safe, either. Otherwise, it's parallel-safe if the
5396 : * target list is parallel-safe.
5397 : */
5398 38068 : if (input_rel->consider_parallel && target_parallel_safe)
5399 26544 : ordered_rel->consider_parallel = true;
5400 :
5401 : /* Assume that the same path generation strategies are allowed. */
5402 38068 : ordered_rel->pgs_mask = input_rel->pgs_mask;
5403 :
5404 : /*
5405 : * If the input rel belongs to a single FDW, so does the ordered_rel.
5406 : */
5407 38068 : ordered_rel->serverid = input_rel->serverid;
5408 38068 : ordered_rel->userid = input_rel->userid;
5409 38068 : ordered_rel->useridiscurrent = input_rel->useridiscurrent;
5410 38068 : ordered_rel->fdwroutine = input_rel->fdwroutine;
5411 :
5412 96276 : foreach(lc, input_rel->pathlist)
5413 : {
5414 58208 : Path *input_path = (Path *) lfirst(lc);
5415 : Path *sorted_path;
5416 : bool is_sorted;
5417 : int presorted_keys;
5418 :
5419 58208 : is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5420 : input_path->pathkeys, &presorted_keys);
5421 :
5422 58208 : if (is_sorted)
5423 21790 : sorted_path = input_path;
5424 : else
5425 : {
5426 : /*
5427 : * Try at least sorting the cheapest path and also try
5428 : * incrementally sorting any path which is partially sorted
5429 : * already (no need to deal with paths which have presorted keys
5430 : * when incremental sort is disabled unless it's the cheapest
5431 : * input path).
5432 : */
5433 36418 : if (input_path != cheapest_input_path &&
5434 3150 : (presorted_keys == 0 || !enable_incremental_sort))
5435 1117 : continue;
5436 :
5437 : /*
5438 : * We've no need to consider both a sort and incremental sort.
5439 : * We'll just do a sort if there are no presorted keys and an
5440 : * incremental sort when there are presorted keys.
5441 : */
5442 35301 : if (presorted_keys == 0 || !enable_incremental_sort)
5443 32931 : sorted_path = (Path *) create_sort_path(root,
5444 : ordered_rel,
5445 : input_path,
5446 : root->sort_pathkeys,
5447 : limit_tuples);
5448 : else
5449 2370 : sorted_path = (Path *) create_incremental_sort_path(root,
5450 : ordered_rel,
5451 : input_path,
5452 : root->sort_pathkeys,
5453 : presorted_keys,
5454 : limit_tuples);
5455 : }
5456 :
5457 : /*
5458 : * If the pathtarget of the result path has different expressions from
5459 : * the target to be applied, a projection step is needed.
5460 : */
5461 57091 : if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5462 197 : sorted_path = apply_projection_to_path(root, ordered_rel,
5463 : sorted_path, target);
5464 :
5465 57091 : add_path(ordered_rel, sorted_path);
5466 : }
5467 :
5468 : /*
5469 : * generate_gather_paths() will have already generated a simple Gather
5470 : * path for the best parallel path, if any, and the loop above will have
5471 : * considered sorting it. Similarly, generate_gather_paths() will also
5472 : * have generated order-preserving Gather Merge plans which can be used
5473 : * without sorting if they happen to match the sort_pathkeys, and the loop
5474 : * above will have handled those as well. However, there's one more
5475 : * possibility: it may make sense to sort the cheapest partial path or
5476 : * incrementally sort any partial path that is partially sorted according
5477 : * to the required output order and then use Gather Merge.
5478 : */
5479 38068 : if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
5480 26442 : input_rel->partial_pathlist != NIL)
5481 : {
5482 : Path *cheapest_partial_path;
5483 :
5484 1437 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
5485 :
5486 3271 : foreach(lc, input_rel->partial_pathlist)
5487 : {
5488 1834 : Path *input_path = (Path *) lfirst(lc);
5489 : Path *sorted_path;
5490 : bool is_sorted;
5491 : int presorted_keys;
5492 : double total_groups;
5493 :
5494 1834 : is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
5495 : input_path->pathkeys,
5496 : &presorted_keys);
5497 :
5498 1834 : if (is_sorted)
5499 337 : continue;
5500 :
5501 : /*
5502 : * Try at least sorting the cheapest path and also try
5503 : * incrementally sorting any path which is partially sorted
5504 : * already (no need to deal with paths which have presorted keys
5505 : * when incremental sort is disabled unless it's the cheapest
5506 : * partial path).
5507 : */
5508 1497 : if (input_path != cheapest_partial_path &&
5509 75 : (presorted_keys == 0 || !enable_incremental_sort))
5510 0 : continue;
5511 :
5512 : /*
5513 : * We've no need to consider both a sort and incremental sort.
5514 : * We'll just do a sort if there are no presorted keys and an
5515 : * incremental sort when there are presorted keys.
5516 : */
5517 1497 : if (presorted_keys == 0 || !enable_incremental_sort)
5518 1413 : sorted_path = (Path *) create_sort_path(root,
5519 : ordered_rel,
5520 : input_path,
5521 : root->sort_pathkeys,
5522 : limit_tuples);
5523 : else
5524 84 : sorted_path = (Path *) create_incremental_sort_path(root,
5525 : ordered_rel,
5526 : input_path,
5527 : root->sort_pathkeys,
5528 : presorted_keys,
5529 : limit_tuples);
5530 1497 : total_groups = compute_gather_rows(sorted_path);
5531 : sorted_path = (Path *)
5532 1497 : create_gather_merge_path(root, ordered_rel,
5533 : sorted_path,
5534 : sorted_path->pathtarget,
5535 : root->sort_pathkeys, NULL,
5536 : &total_groups);
5537 :
5538 : /*
5539 : * If the pathtarget of the result path has different expressions
5540 : * from the target to be applied, a projection step is needed.
5541 : */
5542 1497 : if (!equal(sorted_path->pathtarget->exprs, target->exprs))
5543 3 : sorted_path = apply_projection_to_path(root, ordered_rel,
5544 : sorted_path, target);
5545 :
5546 1497 : add_path(ordered_rel, sorted_path);
5547 : }
5548 : }
5549 :
5550 : /*
5551 : * If there is an FDW that's responsible for all baserels of the query,
5552 : * let it consider adding ForeignPaths.
5553 : */
5554 38068 : if (ordered_rel->fdwroutine &&
5555 193 : ordered_rel->fdwroutine->GetForeignUpperPaths)
5556 185 : ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
5557 : input_rel, ordered_rel,
5558 : NULL);
5559 :
5560 : /* Let extensions possibly add some more paths */
5561 38068 : if (create_upper_paths_hook)
5562 0 : (*create_upper_paths_hook) (root, UPPERREL_ORDERED,
5563 : input_rel, ordered_rel, NULL);
5564 :
5565 : /*
5566 : * No need to bother with set_cheapest here; grouping_planner does not
5567 : * need us to do it.
5568 : */
5569 : Assert(ordered_rel->pathlist != NIL);
5570 :
5571 38068 : return ordered_rel;
5572 : }
5573 :
5574 :
5575 : /*
5576 : * make_group_input_target
5577 : * Generate appropriate PathTarget for initial input to grouping nodes.
5578 : *
5579 : * If there is grouping or aggregation, the scan/join subplan cannot emit
5580 : * the query's final targetlist; for example, it certainly can't emit any
5581 : * aggregate function calls. This routine generates the correct target
5582 : * for the scan/join subplan.
5583 : *
5584 : * The query target list passed from the parser already contains entries
5585 : * for all ORDER BY and GROUP BY expressions, but it will not have entries
5586 : * for variables used only in HAVING clauses; so we need to add those
5587 : * variables to the subplan target list. Also, we flatten all expressions
5588 : * except GROUP BY items into their component variables; other expressions
5589 : * will be computed by the upper plan nodes rather than by the subplan.
5590 : * For example, given a query like
5591 : * SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
5592 : * we want to pass this targetlist to the subplan:
5593 : * a+b,c,d
5594 : * where the a+b target will be used by the Sort/Group steps, and the
5595 : * other targets will be used for computing the final results.
5596 : *
5597 : * 'final_target' is the query's final target list (in PathTarget form)
5598 : *
5599 : * The result is the PathTarget to be computed by the Paths returned from
5600 : * query_planner().
5601 : */
5602 : static PathTarget *
5603 22981 : make_group_input_target(PlannerInfo *root, PathTarget *final_target)
5604 : {
5605 22981 : Query *parse = root->parse;
5606 : PathTarget *input_target;
5607 : List *non_group_cols;
5608 : List *non_group_vars;
5609 : int i;
5610 : ListCell *lc;
5611 :
5612 : /*
5613 : * We must build a target containing all grouping columns, plus any other
5614 : * Vars mentioned in the query's targetlist and HAVING qual.
5615 : */
5616 22981 : input_target = create_empty_pathtarget();
5617 22981 : non_group_cols = NIL;
5618 :
5619 22981 : i = 0;
5620 57008 : foreach(lc, final_target->exprs)
5621 : {
5622 34027 : Expr *expr = (Expr *) lfirst(lc);
5623 34027 : Index sgref = get_pathtarget_sortgroupref(final_target, i);
5624 :
5625 38867 : if (sgref && root->processed_groupClause &&
5626 4840 : get_sortgroupref_clause_noerr(sgref,
5627 : root->processed_groupClause) != NULL)
5628 : {
5629 : /*
5630 : * It's a grouping column, so add it to the input target as-is.
5631 : *
5632 : * Note that the target is logically below the grouping step. So
5633 : * with grouping sets we need to remove the RT index of the
5634 : * grouping step if there is any from the target expression.
5635 : */
5636 3905 : if (parse->hasGroupRTE && parse->groupingSets != NIL)
5637 : {
5638 : Assert(root->group_rtindex > 0);
5639 : expr = (Expr *)
5640 1013 : remove_nulling_relids((Node *) expr,
5641 1013 : bms_make_singleton(root->group_rtindex),
5642 : NULL);
5643 : }
5644 3905 : add_column_to_pathtarget(input_target, expr, sgref);
5645 : }
5646 : else
5647 : {
5648 : /*
5649 : * Non-grouping column, so just remember the expression for later
5650 : * call to pull_var_clause.
5651 : */
5652 30122 : non_group_cols = lappend(non_group_cols, expr);
5653 : }
5654 :
5655 34027 : i++;
5656 : }
5657 :
5658 : /*
5659 : * If there's a HAVING clause, we'll need the Vars it uses, too.
5660 : */
5661 22981 : if (parse->havingQual)
5662 481 : non_group_cols = lappend(non_group_cols, parse->havingQual);
5663 :
5664 : /*
5665 : * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
5666 : * add them to the input target if not already present. (A Var used
5667 : * directly as a GROUP BY item will be present already.) Note this
5668 : * includes Vars used in resjunk items, so we are covering the needs of
5669 : * ORDER BY and window specifications. Vars used within Aggrefs and
5670 : * WindowFuncs will be pulled out here, too.
5671 : *
5672 : * Note that the target is logically below the grouping step. So with
5673 : * grouping sets we need to remove the RT index of the grouping step if
5674 : * there is any from the non-group Vars.
5675 : */
5676 22981 : non_group_vars = pull_var_clause((Node *) non_group_cols,
5677 : PVC_RECURSE_AGGREGATES |
5678 : PVC_RECURSE_WINDOWFUNCS |
5679 : PVC_INCLUDE_PLACEHOLDERS);
5680 22981 : if (parse->hasGroupRTE && parse->groupingSets != NIL)
5681 : {
5682 : Assert(root->group_rtindex > 0);
5683 : non_group_vars = (List *)
5684 468 : remove_nulling_relids((Node *) non_group_vars,
5685 468 : bms_make_singleton(root->group_rtindex),
5686 : NULL);
5687 : }
5688 22981 : add_new_columns_to_pathtarget(input_target, non_group_vars);
5689 :
5690 : /* clean up cruft */
5691 22981 : list_free(non_group_vars);
5692 22981 : list_free(non_group_cols);
5693 :
5694 : /* XXX this causes some redundant cost calculation ... */
5695 22981 : return set_pathtarget_cost_width(root, input_target);
5696 : }
5697 :
5698 : /*
5699 : * make_partial_grouping_target
5700 : * Generate appropriate PathTarget for output of partial aggregate
5701 : * (or partial grouping, if there are no aggregates) nodes.
5702 : *
5703 : * A partial aggregation node needs to emit all the same aggregates that
5704 : * a regular aggregation node would, plus any aggregates used in HAVING;
5705 : * except that the Aggref nodes should be marked as partial aggregates.
5706 : *
5707 : * In addition, we'd better emit any Vars and PlaceHolderVars that are
5708 : * used outside of Aggrefs in the aggregation tlist and HAVING. (Presumably,
5709 : * these would be Vars that are grouped by or used in grouping expressions.)
5710 : *
5711 : * grouping_target is the tlist to be emitted by the topmost aggregation step.
5712 : * havingQual represents the HAVING clause.
5713 : */
5714 : static PathTarget *
5715 1928 : make_partial_grouping_target(PlannerInfo *root,
5716 : PathTarget *grouping_target,
5717 : Node *havingQual)
5718 : {
5719 : PathTarget *partial_target;
5720 : List *non_group_cols;
5721 : List *non_group_exprs;
5722 : int i;
5723 : ListCell *lc;
5724 :
5725 1928 : partial_target = create_empty_pathtarget();
5726 1928 : non_group_cols = NIL;
5727 :
5728 1928 : i = 0;
5729 6377 : foreach(lc, grouping_target->exprs)
5730 : {
5731 4449 : Expr *expr = (Expr *) lfirst(lc);
5732 4449 : Index sgref = get_pathtarget_sortgroupref(grouping_target, i);
5733 :
5734 6837 : if (sgref && root->processed_groupClause &&
5735 2388 : get_sortgroupref_clause_noerr(sgref,
5736 : root->processed_groupClause) != NULL)
5737 : {
5738 : /*
5739 : * It's a grouping column, so add it to the partial_target as-is.
5740 : * (This allows the upper agg step to repeat the grouping calcs.)
5741 : */
5742 1414 : add_column_to_pathtarget(partial_target, expr, sgref);
5743 : }
5744 : else
5745 : {
5746 : /*
5747 : * Non-grouping column, so just remember the expression for later
5748 : * call to pull_var_clause.
5749 : */
5750 3035 : non_group_cols = lappend(non_group_cols, expr);
5751 : }
5752 :
5753 4449 : i++;
5754 : }
5755 :
5756 : /*
5757 : * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
5758 : */
5759 1928 : if (havingQual)
5760 439 : non_group_cols = lappend(non_group_cols, havingQual);
5761 :
5762 : /*
5763 : * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
5764 : * non-group cols (plus HAVING), and add them to the partial_target if not
5765 : * already present. (An expression used directly as a GROUP BY item will
5766 : * be present already.) Note this includes Vars used in resjunk items, so
5767 : * we are covering the needs of ORDER BY and window specifications.
5768 : */
5769 1928 : non_group_exprs = pull_var_clause((Node *) non_group_cols,
5770 : PVC_INCLUDE_AGGREGATES |
5771 : PVC_RECURSE_WINDOWFUNCS |
5772 : PVC_INCLUDE_PLACEHOLDERS);
5773 :
5774 1928 : add_new_columns_to_pathtarget(partial_target, non_group_exprs);
5775 :
5776 : /*
5777 : * Adjust Aggrefs to put them in partial mode. At this point all Aggrefs
5778 : * are at the top level of the target list, so we can just scan the list
5779 : * rather than recursing through the expression trees.
5780 : */
5781 6675 : foreach(lc, partial_target->exprs)
5782 : {
5783 4747 : Aggref *aggref = (Aggref *) lfirst(lc);
5784 :
5785 4747 : if (IsA(aggref, Aggref))
5786 : {
5787 : Aggref *newaggref;
5788 :
5789 : /*
5790 : * We shouldn't need to copy the substructure of the Aggref node,
5791 : * but flat-copy the node itself to avoid damaging other trees.
5792 : */
5793 3318 : newaggref = makeNode(Aggref);
5794 3318 : memcpy(newaggref, aggref, sizeof(Aggref));
5795 :
5796 : /* For now, assume serialization is required */
5797 3318 : mark_partial_aggref(newaggref, AGGSPLIT_INITIAL_SERIAL);
5798 :
5799 3318 : lfirst(lc) = newaggref;
5800 : }
5801 : }
5802 :
5803 : /* clean up cruft */
5804 1928 : list_free(non_group_exprs);
5805 1928 : list_free(non_group_cols);
5806 :
5807 : /* XXX this causes some redundant cost calculation ... */
5808 1928 : return set_pathtarget_cost_width(root, partial_target);
5809 : }
5810 :
5811 : /*
5812 : * mark_partial_aggref
5813 : * Adjust an Aggref to make it represent a partial-aggregation step.
5814 : *
5815 : * The Aggref node is modified in-place; caller must do any copying required.
5816 : */
5817 : void
5818 9290 : mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
5819 : {
5820 : /* aggtranstype should be computed by this point */
5821 : Assert(OidIsValid(agg->aggtranstype));
5822 : /* ... but aggsplit should still be as the parser left it */
5823 : Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
5824 :
5825 : /* Mark the Aggref with the intended partial-aggregation mode */
5826 9290 : agg->aggsplit = aggsplit;
5827 :
5828 : /*
5829 : * Adjust result type if needed. Normally, a partial aggregate returns
5830 : * the aggregate's transition type; but if that's INTERNAL and we're
5831 : * serializing, it returns BYTEA instead.
5832 : */
5833 9290 : if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
5834 : {
5835 8137 : if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
5836 157 : agg->aggtype = BYTEAOID;
5837 : else
5838 7980 : agg->aggtype = agg->aggtranstype;
5839 : }
5840 9290 : }
5841 :
5842 : /*
5843 : * postprocess_setop_tlist
5844 : * Fix up targetlist returned by plan_set_operations().
5845 : *
5846 : * We need to transpose sort key info from the orig_tlist into new_tlist.
5847 : * NOTE: this would not be good enough if we supported resjunk sort keys
5848 : * for results of set operations --- then, we'd need to project a whole
5849 : * new tlist to evaluate the resjunk columns. For now, just ereport if we
5850 : * find any resjunk columns in orig_tlist.
5851 : */
5852 : static List *
5853 3108 : postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
5854 : {
5855 : ListCell *l;
5856 3108 : ListCell *orig_tlist_item = list_head(orig_tlist);
5857 :
5858 11927 : foreach(l, new_tlist)
5859 : {
5860 8819 : TargetEntry *new_tle = lfirst_node(TargetEntry, l);
5861 : TargetEntry *orig_tle;
5862 :
5863 : /* ignore resjunk columns in setop result */
5864 8819 : if (new_tle->resjunk)
5865 0 : continue;
5866 :
5867 : Assert(orig_tlist_item != NULL);
5868 8819 : orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
5869 8819 : orig_tlist_item = lnext(orig_tlist, orig_tlist_item);
5870 8819 : if (orig_tle->resjunk) /* should not happen */
5871 0 : elog(ERROR, "resjunk output columns are not implemented");
5872 : Assert(new_tle->resno == orig_tle->resno);
5873 8819 : new_tle->ressortgroupref = orig_tle->ressortgroupref;
5874 : }
5875 3108 : if (orig_tlist_item != NULL)
5876 0 : elog(ERROR, "resjunk output columns are not implemented");
5877 3108 : return new_tlist;
5878 : }
5879 :
5880 : /*
5881 : * optimize_window_clauses
5882 : * Call each WindowFunc's prosupport function to see if we're able to
5883 : * make any adjustments to any of the WindowClause's so that the executor
5884 : * can execute the window functions in a more optimal way.
5885 : *
5886 : * Currently we only allow adjustments to the WindowClause's frameOptions. We
5887 : * may allow more things to be done here in the future.
5888 : */
5889 : static void
5890 1338 : optimize_window_clauses(PlannerInfo *root, WindowFuncLists *wflists)
5891 : {
5892 1338 : List *windowClause = root->parse->windowClause;
5893 : ListCell *lc;
5894 :
5895 2799 : foreach(lc, windowClause)
5896 : {
5897 1461 : WindowClause *wc = lfirst_node(WindowClause, lc);
5898 : ListCell *lc2;
5899 1461 : int optimizedFrameOptions = 0;
5900 :
5901 : Assert(wc->winref <= wflists->maxWinRef);
5902 :
5903 : /* skip any WindowClauses that have no WindowFuncs */
5904 1461 : if (wflists->windowFuncs[wc->winref] == NIL)
5905 12 : continue;
5906 :
5907 1776 : foreach(lc2, wflists->windowFuncs[wc->winref])
5908 : {
5909 : SupportRequestOptimizeWindowClause req;
5910 : SupportRequestOptimizeWindowClause *res;
5911 1470 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
5912 : Oid prosupport;
5913 :
5914 1470 : prosupport = get_func_support(wfunc->winfnoid);
5915 :
5916 : /* Check if there's a support function for 'wfunc' */
5917 1470 : if (!OidIsValid(prosupport))
5918 1143 : break; /* can't optimize this WindowClause */
5919 :
5920 440 : req.type = T_SupportRequestOptimizeWindowClause;
5921 440 : req.window_clause = wc;
5922 440 : req.window_func = wfunc;
5923 440 : req.frameOptions = wc->frameOptions;
5924 :
5925 : /* call the support function */
5926 : res = (SupportRequestOptimizeWindowClause *)
5927 440 : DatumGetPointer(OidFunctionCall1(prosupport,
5928 : PointerGetDatum(&req)));
5929 :
5930 : /*
5931 : * Skip to next WindowClause if the support function does not
5932 : * support this request type.
5933 : */
5934 440 : if (res == NULL)
5935 113 : break;
5936 :
5937 : /*
5938 : * Save these frameOptions for the first WindowFunc for this
5939 : * WindowClause.
5940 : */
5941 327 : if (foreach_current_index(lc2) == 0)
5942 315 : optimizedFrameOptions = res->frameOptions;
5943 :
5944 : /*
5945 : * On subsequent WindowFuncs, if the frameOptions are not the same
5946 : * then we're unable to optimize the frameOptions for this
5947 : * WindowClause.
5948 : */
5949 12 : else if (optimizedFrameOptions != res->frameOptions)
5950 0 : break; /* skip to the next WindowClause, if any */
5951 : }
5952 :
5953 : /* adjust the frameOptions if all WindowFunc's agree that it's ok */
5954 1449 : if (lc2 == NULL && wc->frameOptions != optimizedFrameOptions)
5955 : {
5956 : ListCell *lc3;
5957 :
5958 : /* apply the new frame options */
5959 306 : wc->frameOptions = optimizedFrameOptions;
5960 :
5961 : /*
5962 : * We now check to see if changing the frameOptions has caused
5963 : * this WindowClause to be a duplicate of some other WindowClause.
5964 : * This can only happen if we have multiple WindowClauses, so
5965 : * don't bother if there's only 1.
5966 : */
5967 306 : if (list_length(windowClause) == 1)
5968 261 : continue;
5969 :
5970 : /*
5971 : * Do the duplicate check and reuse the existing WindowClause if
5972 : * we find a duplicate.
5973 : */
5974 114 : foreach(lc3, windowClause)
5975 : {
5976 87 : WindowClause *existing_wc = lfirst_node(WindowClause, lc3);
5977 :
5978 : /* skip over the WindowClause we're currently editing */
5979 87 : if (existing_wc == wc)
5980 27 : continue;
5981 :
5982 : /*
5983 : * Perform the same duplicate check that is done in
5984 : * transformWindowFuncCall.
5985 : */
5986 120 : if (equal(wc->partitionClause, existing_wc->partitionClause) &&
5987 60 : equal(wc->orderClause, existing_wc->orderClause) &&
5988 60 : wc->frameOptions == existing_wc->frameOptions &&
5989 36 : equal(wc->startOffset, existing_wc->startOffset) &&
5990 18 : equal(wc->endOffset, existing_wc->endOffset))
5991 : {
5992 : ListCell *lc4;
5993 :
5994 : /*
5995 : * Now move each WindowFunc in 'wc' into 'existing_wc'.
5996 : * This required adjusting each WindowFunc's winref and
5997 : * moving the WindowFuncs in 'wc' to the list of
5998 : * WindowFuncs in 'existing_wc'.
5999 : */
6000 39 : foreach(lc4, wflists->windowFuncs[wc->winref])
6001 : {
6002 21 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc4);
6003 :
6004 21 : wfunc->winref = existing_wc->winref;
6005 : }
6006 :
6007 : /* move list items */
6008 36 : wflists->windowFuncs[existing_wc->winref] = list_concat(wflists->windowFuncs[existing_wc->winref],
6009 18 : wflists->windowFuncs[wc->winref]);
6010 18 : wflists->windowFuncs[wc->winref] = NIL;
6011 :
6012 : /*
6013 : * transformWindowFuncCall() should have made sure there
6014 : * are no other duplicates, so we needn't bother looking
6015 : * any further.
6016 : */
6017 18 : break;
6018 : }
6019 : }
6020 : }
6021 : }
6022 1338 : }
6023 :
6024 : /*
6025 : * select_active_windows
6026 : * Create a list of the "active" window clauses (ie, those referenced
6027 : * by non-deleted WindowFuncs) in the order they are to be executed.
6028 : */
6029 : static List *
6030 1338 : select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
6031 : {
6032 1338 : List *windowClause = root->parse->windowClause;
6033 1338 : List *result = NIL;
6034 : ListCell *lc;
6035 1338 : int nActive = 0;
6036 1338 : WindowClauseSortData *actives = palloc_array(WindowClauseSortData,
6037 : list_length(windowClause));
6038 :
6039 : /* First, construct an array of the active windows */
6040 2799 : foreach(lc, windowClause)
6041 : {
6042 1461 : WindowClause *wc = lfirst_node(WindowClause, lc);
6043 :
6044 : /* It's only active if wflists shows some related WindowFuncs */
6045 : Assert(wc->winref <= wflists->maxWinRef);
6046 1461 : if (wflists->windowFuncs[wc->winref] == NIL)
6047 30 : continue;
6048 :
6049 1431 : actives[nActive].wc = wc; /* original clause */
6050 :
6051 : /*
6052 : * For sorting, we want the list of partition keys followed by the
6053 : * list of sort keys. But pathkeys construction will remove duplicates
6054 : * between the two, so we can as well (even though we can't detect all
6055 : * of the duplicates, since some may come from ECs - that might mean
6056 : * we miss optimization chances here). We must, however, ensure that
6057 : * the order of entries is preserved with respect to the ones we do
6058 : * keep.
6059 : *
6060 : * partitionClause and orderClause had their own duplicates removed in
6061 : * parse analysis, so we're only concerned here with removing
6062 : * orderClause entries that also appear in partitionClause.
6063 : */
6064 2862 : actives[nActive].uniqueOrder =
6065 1431 : list_concat_unique(list_copy(wc->partitionClause),
6066 1431 : wc->orderClause);
6067 1431 : nActive++;
6068 : }
6069 :
6070 : /*
6071 : * Sort active windows by their partitioning/ordering clauses, ignoring
6072 : * any framing clauses, so that the windows that need the same sorting are
6073 : * adjacent in the list. When we come to generate paths, this will avoid
6074 : * inserting additional Sort nodes.
6075 : *
6076 : * This is how we implement a specific requirement from the SQL standard,
6077 : * which says that when two or more windows are order-equivalent (i.e.
6078 : * have matching partition and order clauses, even if their names or
6079 : * framing clauses differ), then all peer rows must be presented in the
6080 : * same order in all of them. If we allowed multiple sort nodes for such
6081 : * cases, we'd risk having the peer rows end up in different orders in
6082 : * equivalent windows due to sort instability. (See General Rule 4 of
6083 : * <window clause> in SQL2008 - SQL2016.)
6084 : *
6085 : * Additionally, if the entire list of clauses of one window is a prefix
6086 : * of another, put first the window with stronger sorting requirements.
6087 : * This way we will first sort for stronger window, and won't have to sort
6088 : * again for the weaker one.
6089 : */
6090 1338 : qsort(actives, nActive, sizeof(WindowClauseSortData), common_prefix_cmp);
6091 :
6092 : /* build ordered list of the original WindowClause nodes */
6093 2769 : for (int i = 0; i < nActive; i++)
6094 1431 : result = lappend(result, actives[i].wc);
6095 :
6096 1338 : pfree(actives);
6097 :
6098 1338 : return result;
6099 : }
6100 :
6101 : /*
6102 : * name_active_windows
6103 : * Ensure all active windows have unique names.
6104 : *
6105 : * The parser will have checked that user-assigned window names are unique
6106 : * within the Query. Here we assign made-up names to any unnamed
6107 : * WindowClauses for the benefit of EXPLAIN. (We don't want to do this
6108 : * at parse time, because it'd mess up decompilation of views.)
6109 : *
6110 : * activeWindows: result of select_active_windows
6111 : */
6112 : static void
6113 1338 : name_active_windows(List *activeWindows)
6114 : {
6115 1338 : int next_n = 1;
6116 : char newname[16];
6117 : ListCell *lc;
6118 :
6119 2769 : foreach(lc, activeWindows)
6120 : {
6121 1431 : WindowClause *wc = lfirst_node(WindowClause, lc);
6122 :
6123 : /* Nothing to do if it has a name already. */
6124 1431 : if (wc->name)
6125 288 : continue;
6126 :
6127 : /* Select a name not currently present in the list. */
6128 : for (;;)
6129 3 : {
6130 : ListCell *lc2;
6131 :
6132 1146 : snprintf(newname, sizeof(newname), "w%d", next_n++);
6133 2466 : foreach(lc2, activeWindows)
6134 : {
6135 1323 : WindowClause *wc2 = lfirst_node(WindowClause, lc2);
6136 :
6137 1323 : if (wc2->name && strcmp(wc2->name, newname) == 0)
6138 3 : break; /* matched */
6139 : }
6140 1146 : if (lc2 == NULL)
6141 1143 : break; /* reached the end with no match */
6142 : }
6143 1143 : wc->name = pstrdup(newname);
6144 : }
6145 1338 : }
6146 :
6147 : /*
6148 : * common_prefix_cmp
6149 : * QSort comparison function for WindowClauseSortData
6150 : *
6151 : * Sort the windows by the required sorting clauses. First, compare the sort
6152 : * clauses themselves. Second, if one window's clauses are a prefix of another
6153 : * one's clauses, put the window with more sort clauses first.
6154 : *
6155 : * We purposefully sort by the highest tleSortGroupRef first. Since
6156 : * tleSortGroupRefs are assigned for the query's DISTINCT and ORDER BY first
6157 : * and because here we sort the lowest tleSortGroupRefs last, if a
6158 : * WindowClause is sharing a tleSortGroupRef with the query's DISTINCT or
6159 : * ORDER BY clause, this makes it more likely that the final WindowAgg will
6160 : * provide presorted input for the query's DISTINCT or ORDER BY clause, thus
6161 : * reducing the total number of sorts required for the query.
6162 : */
6163 : static int
6164 102 : common_prefix_cmp(const void *a, const void *b)
6165 : {
6166 102 : const WindowClauseSortData *wcsa = a;
6167 102 : const WindowClauseSortData *wcsb = b;
6168 : ListCell *item_a;
6169 : ListCell *item_b;
6170 :
6171 183 : forboth(item_a, wcsa->uniqueOrder, item_b, wcsb->uniqueOrder)
6172 : {
6173 132 : SortGroupClause *sca = lfirst_node(SortGroupClause, item_a);
6174 132 : SortGroupClause *scb = lfirst_node(SortGroupClause, item_b);
6175 :
6176 132 : if (sca->tleSortGroupRef > scb->tleSortGroupRef)
6177 51 : return -1;
6178 126 : else if (sca->tleSortGroupRef < scb->tleSortGroupRef)
6179 33 : return 1;
6180 93 : else if (sca->sortop > scb->sortop)
6181 0 : return -1;
6182 93 : else if (sca->sortop < scb->sortop)
6183 12 : return 1;
6184 81 : else if (sca->nulls_first && !scb->nulls_first)
6185 0 : return -1;
6186 81 : else if (!sca->nulls_first && scb->nulls_first)
6187 0 : return 1;
6188 : /* no need to compare eqop, since it is fully determined by sortop */
6189 : }
6190 :
6191 51 : if (list_length(wcsa->uniqueOrder) > list_length(wcsb->uniqueOrder))
6192 3 : return -1;
6193 48 : else if (list_length(wcsa->uniqueOrder) < list_length(wcsb->uniqueOrder))
6194 15 : return 1;
6195 :
6196 33 : return 0;
6197 : }
6198 :
6199 : /*
6200 : * make_window_input_target
6201 : * Generate appropriate PathTarget for initial input to WindowAgg nodes.
6202 : *
6203 : * When the query has window functions, this function computes the desired
6204 : * target to be computed by the node just below the first WindowAgg.
6205 : * This tlist must contain all values needed to evaluate the window functions,
6206 : * compute the final target list, and perform any required final sort step.
6207 : * If multiple WindowAggs are needed, each intermediate one adds its window
6208 : * function results onto this base tlist; only the topmost WindowAgg computes
6209 : * the actual desired target list.
6210 : *
6211 : * This function is much like make_group_input_target, though not quite enough
6212 : * like it to share code. As in that function, we flatten most expressions
6213 : * into their component variables. But we do not want to flatten window
6214 : * PARTITION BY/ORDER BY clauses, since that might result in multiple
6215 : * evaluations of them, which would be bad (possibly even resulting in
6216 : * inconsistent answers, if they contain volatile functions).
6217 : * Also, we must not flatten GROUP BY clauses that were left unflattened by
6218 : * make_group_input_target, because we may no longer have access to the
6219 : * individual Vars in them.
6220 : *
6221 : * Another key difference from make_group_input_target is that we don't
6222 : * flatten Aggref expressions, since those are to be computed below the
6223 : * window functions and just referenced like Vars above that.
6224 : *
6225 : * 'final_target' is the query's final target list (in PathTarget form)
6226 : * 'activeWindows' is the list of active windows previously identified by
6227 : * select_active_windows.
6228 : *
6229 : * The result is the PathTarget to be computed by the plan node immediately
6230 : * below the first WindowAgg node.
6231 : */
6232 : static PathTarget *
6233 1338 : make_window_input_target(PlannerInfo *root,
6234 : PathTarget *final_target,
6235 : List *activeWindows)
6236 : {
6237 : PathTarget *input_target;
6238 : Bitmapset *sgrefs;
6239 : List *flattenable_cols;
6240 : List *flattenable_vars;
6241 : int i;
6242 : ListCell *lc;
6243 :
6244 : Assert(root->parse->hasWindowFuncs);
6245 :
6246 : /*
6247 : * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
6248 : * into a bitmapset for convenient reference below.
6249 : */
6250 1338 : sgrefs = NULL;
6251 2769 : foreach(lc, activeWindows)
6252 : {
6253 1431 : WindowClause *wc = lfirst_node(WindowClause, lc);
6254 : ListCell *lc2;
6255 :
6256 1809 : foreach(lc2, wc->partitionClause)
6257 : {
6258 378 : SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
6259 :
6260 378 : sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6261 : }
6262 2567 : foreach(lc2, wc->orderClause)
6263 : {
6264 1136 : SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
6265 :
6266 1136 : sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
6267 : }
6268 : }
6269 :
6270 : /* Add in sortgroupref numbers of GROUP BY clauses, too */
6271 1434 : foreach(lc, root->processed_groupClause)
6272 : {
6273 96 : SortGroupClause *grpcl = lfirst_node(SortGroupClause, lc);
6274 :
6275 96 : sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
6276 : }
6277 :
6278 : /*
6279 : * Construct a target containing all the non-flattenable targetlist items,
6280 : * and save aside the others for a moment.
6281 : */
6282 1338 : input_target = create_empty_pathtarget();
6283 1338 : flattenable_cols = NIL;
6284 :
6285 1338 : i = 0;
6286 5614 : foreach(lc, final_target->exprs)
6287 : {
6288 4276 : Expr *expr = (Expr *) lfirst(lc);
6289 4276 : Index sgref = get_pathtarget_sortgroupref(final_target, i);
6290 :
6291 : /*
6292 : * Don't want to deconstruct window clauses or GROUP BY items. (Note
6293 : * that such items can't contain window functions, so it's okay to
6294 : * compute them below the WindowAgg nodes.)
6295 : */
6296 4276 : if (sgref != 0 && bms_is_member(sgref, sgrefs))
6297 : {
6298 : /*
6299 : * Don't want to deconstruct this value, so add it to the input
6300 : * target as-is.
6301 : */
6302 1432 : add_column_to_pathtarget(input_target, expr, sgref);
6303 : }
6304 : else
6305 : {
6306 : /*
6307 : * Column is to be flattened, so just remember the expression for
6308 : * later call to pull_var_clause.
6309 : */
6310 2844 : flattenable_cols = lappend(flattenable_cols, expr);
6311 : }
6312 :
6313 4276 : i++;
6314 : }
6315 :
6316 : /*
6317 : * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
6318 : * add them to the input target if not already present. (Some might be
6319 : * there already because they're used directly as window/group clauses.)
6320 : *
6321 : * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
6322 : * Aggrefs are placed in the Agg node's tlist and not left to be computed
6323 : * at higher levels. On the other hand, we should recurse into
6324 : * WindowFuncs to make sure their input expressions are available.
6325 : */
6326 1338 : flattenable_vars = pull_var_clause((Node *) flattenable_cols,
6327 : PVC_INCLUDE_AGGREGATES |
6328 : PVC_RECURSE_WINDOWFUNCS |
6329 : PVC_INCLUDE_PLACEHOLDERS);
6330 1338 : add_new_columns_to_pathtarget(input_target, flattenable_vars);
6331 :
6332 : /* clean up cruft */
6333 1338 : list_free(flattenable_vars);
6334 1338 : list_free(flattenable_cols);
6335 :
6336 : /* XXX this causes some redundant cost calculation ... */
6337 1338 : return set_pathtarget_cost_width(root, input_target);
6338 : }
6339 :
6340 : /*
6341 : * make_pathkeys_for_window
6342 : * Create a pathkeys list describing the required input ordering
6343 : * for the given WindowClause.
6344 : *
6345 : * Modifies wc's partitionClause to remove any clauses which are deemed
6346 : * redundant by the pathkey logic.
6347 : *
6348 : * The required ordering is first the PARTITION keys, then the ORDER keys.
6349 : * In the future we might try to implement windowing using hashing, in which
6350 : * case the ordering could be relaxed, but for now we always sort.
6351 : */
6352 : static List *
6353 2876 : make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
6354 : List *tlist)
6355 : {
6356 2876 : List *window_pathkeys = NIL;
6357 :
6358 : /* Throw error if can't sort */
6359 2876 : if (!grouping_is_sortable(wc->partitionClause))
6360 0 : ereport(ERROR,
6361 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6362 : errmsg("could not implement window PARTITION BY"),
6363 : errdetail("Window partitioning columns must be of sortable datatypes.")));
6364 2876 : if (!grouping_is_sortable(wc->orderClause))
6365 0 : ereport(ERROR,
6366 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
6367 : errmsg("could not implement window ORDER BY"),
6368 : errdetail("Window ordering columns must be of sortable datatypes.")));
6369 :
6370 : /*
6371 : * First fetch the pathkeys for the PARTITION BY clause. We can safely
6372 : * remove any clauses from the wc->partitionClause for redundant pathkeys.
6373 : */
6374 2876 : if (wc->partitionClause != NIL)
6375 : {
6376 : bool sortable;
6377 :
6378 660 : window_pathkeys = make_pathkeys_for_sortclauses_extended(root,
6379 : &wc->partitionClause,
6380 : tlist,
6381 : true,
6382 : false,
6383 : &sortable,
6384 : false);
6385 :
6386 : Assert(sortable);
6387 : }
6388 :
6389 : /*
6390 : * In principle, we could also consider removing redundant ORDER BY items
6391 : * too as doing so does not alter the result of peer row checks done by
6392 : * the executor. However, we must *not* remove the ordering column for
6393 : * RANGE OFFSET cases, as the executor needs that for in_range tests even
6394 : * if it's known to be equal to some partitioning column.
6395 : */
6396 2876 : if (wc->orderClause != NIL)
6397 : {
6398 : List *orderby_pathkeys;
6399 :
6400 2225 : orderby_pathkeys = make_pathkeys_for_sortclauses(root,
6401 : wc->orderClause,
6402 : tlist);
6403 :
6404 : /* Okay, make the combined pathkeys */
6405 2225 : if (window_pathkeys != NIL)
6406 473 : window_pathkeys = append_pathkeys(window_pathkeys, orderby_pathkeys);
6407 : else
6408 1752 : window_pathkeys = orderby_pathkeys;
6409 : }
6410 :
6411 2876 : return window_pathkeys;
6412 : }
6413 :
6414 : /*
6415 : * make_sort_input_target
6416 : * Generate appropriate PathTarget for initial input to Sort step.
6417 : *
6418 : * If the query has ORDER BY, this function chooses the target to be computed
6419 : * by the node just below the Sort (and DISTINCT, if any, since Unique can't
6420 : * project) steps. This might or might not be identical to the query's final
6421 : * output target.
6422 : *
6423 : * The main argument for keeping the sort-input tlist the same as the final
6424 : * is that we avoid a separate projection node (which will be needed if
6425 : * they're different, because Sort can't project). However, there are also
6426 : * advantages to postponing tlist evaluation till after the Sort: it ensures
6427 : * a consistent order of evaluation for any volatile functions in the tlist,
6428 : * and if there's also a LIMIT, we can stop the query without ever computing
6429 : * tlist functions for later rows, which is beneficial for both volatile and
6430 : * expensive functions.
6431 : *
6432 : * Our current policy is to postpone volatile expressions till after the sort
6433 : * unconditionally (assuming that that's possible, ie they are in plain tlist
6434 : * columns and not ORDER BY/GROUP BY/DISTINCT columns). We also prefer to
6435 : * postpone set-returning expressions, because running them beforehand would
6436 : * bloat the sort dataset, and because it might cause unexpected output order
6437 : * if the sort isn't stable. However there's a constraint on that: all SRFs
6438 : * in the tlist should be evaluated at the same plan step, so that they can
6439 : * run in sync in nodeProjectSet. So if any SRFs are in sort columns, we
6440 : * mustn't postpone any SRFs. (Note that in principle that policy should
6441 : * probably get applied to the group/window input targetlists too, but we
6442 : * have not done that historically.) Lastly, expensive expressions are
6443 : * postponed if there is a LIMIT, or if root->tuple_fraction shows that
6444 : * partial evaluation of the query is possible (if neither is true, we expect
6445 : * to have to evaluate the expressions for every row anyway), or if there are
6446 : * any volatile or set-returning expressions (since once we've put in a
6447 : * projection at all, it won't cost any more to postpone more stuff).
6448 : *
6449 : * Another issue that could potentially be considered here is that
6450 : * evaluating tlist expressions could result in data that's either wider
6451 : * or narrower than the input Vars, thus changing the volume of data that
6452 : * has to go through the Sort. However, we usually have only a very bad
6453 : * idea of the output width of any expression more complex than a Var,
6454 : * so for now it seems too risky to try to optimize on that basis.
6455 : *
6456 : * Note that if we do produce a modified sort-input target, and then the
6457 : * query ends up not using an explicit Sort, no particular harm is done:
6458 : * we'll initially use the modified target for the preceding path nodes,
6459 : * but then change them to the final target with apply_projection_to_path.
6460 : * Moreover, in such a case the guarantees about evaluation order of
6461 : * volatile functions still hold, since the rows are sorted already.
6462 : *
6463 : * This function has some things in common with make_group_input_target and
6464 : * make_window_input_target, though the detailed rules for what to do are
6465 : * different. We never flatten/postpone any grouping or ordering columns;
6466 : * those are needed before the sort. If we do flatten a particular
6467 : * expression, we leave Aggref and WindowFunc nodes alone, since those were
6468 : * computed earlier.
6469 : *
6470 : * 'final_target' is the query's final target list (in PathTarget form)
6471 : * 'have_postponed_srfs' is an output argument, see below
6472 : *
6473 : * The result is the PathTarget to be computed by the plan node immediately
6474 : * below the Sort step (and the Distinct step, if any). This will be
6475 : * exactly final_target if we decide a projection step wouldn't be helpful.
6476 : *
6477 : * In addition, *have_postponed_srfs is set to true if we choose to postpone
6478 : * any set-returning functions to after the Sort.
6479 : */
6480 : static PathTarget *
6481 36082 : make_sort_input_target(PlannerInfo *root,
6482 : PathTarget *final_target,
6483 : bool *have_postponed_srfs)
6484 : {
6485 36082 : Query *parse = root->parse;
6486 : PathTarget *input_target;
6487 : int ncols;
6488 : bool *col_is_srf;
6489 : bool *postpone_col;
6490 : bool have_srf;
6491 : bool have_volatile;
6492 : bool have_expensive;
6493 : bool have_srf_sortcols;
6494 : bool postpone_srfs;
6495 : List *postponable_cols;
6496 : List *postponable_vars;
6497 : int i;
6498 : ListCell *lc;
6499 :
6500 : /* Shouldn't get here unless query has ORDER BY */
6501 : Assert(parse->sortClause);
6502 :
6503 36082 : *have_postponed_srfs = false; /* default result */
6504 :
6505 : /* Inspect tlist and collect per-column information */
6506 36082 : ncols = list_length(final_target->exprs);
6507 36082 : col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
6508 36082 : postpone_col = (bool *) palloc0(ncols * sizeof(bool));
6509 36082 : have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
6510 :
6511 36082 : i = 0;
6512 217803 : foreach(lc, final_target->exprs)
6513 : {
6514 181721 : Expr *expr = (Expr *) lfirst(lc);
6515 :
6516 : /*
6517 : * If the column has a sortgroupref, assume it has to be evaluated
6518 : * before sorting. Generally such columns would be ORDER BY, GROUP
6519 : * BY, etc targets. One exception is columns that were removed from
6520 : * GROUP BY by remove_useless_groupby_columns() ... but those would
6521 : * only be Vars anyway. There don't seem to be any cases where it
6522 : * would be worth the trouble to double-check.
6523 : */
6524 181721 : if (get_pathtarget_sortgroupref(final_target, i) == 0)
6525 : {
6526 : /*
6527 : * Check for SRF or volatile functions. Check the SRF case first
6528 : * because we must know whether we have any postponed SRFs.
6529 : */
6530 130979 : if (parse->hasTargetSRFs &&
6531 108 : expression_returns_set((Node *) expr))
6532 : {
6533 : /* We'll decide below whether these are postponable */
6534 48 : col_is_srf[i] = true;
6535 48 : have_srf = true;
6536 : }
6537 130823 : else if (contain_volatile_functions((Node *) expr))
6538 : {
6539 : /* Unconditionally postpone */
6540 124 : postpone_col[i] = true;
6541 124 : have_volatile = true;
6542 : }
6543 : else
6544 : {
6545 : /*
6546 : * Else check the cost. XXX it's annoying to have to do this
6547 : * when set_pathtarget_cost_width() just did it. Refactor to
6548 : * allow sharing the work?
6549 : */
6550 : QualCost cost;
6551 :
6552 130699 : cost_qual_eval_node(&cost, (Node *) expr, root);
6553 :
6554 : /*
6555 : * We arbitrarily define "expensive" as "more than 10X
6556 : * cpu_operator_cost". Note this will take in any PL function
6557 : * with default cost.
6558 : */
6559 130699 : if (cost.per_tuple > 10 * cpu_operator_cost)
6560 : {
6561 8448 : postpone_col[i] = true;
6562 8448 : have_expensive = true;
6563 : }
6564 : }
6565 : }
6566 : else
6567 : {
6568 : /* For sortgroupref cols, just check if any contain SRFs */
6569 50850 : if (!have_srf_sortcols &&
6570 51005 : parse->hasTargetSRFs &&
6571 167 : expression_returns_set((Node *) expr))
6572 74 : have_srf_sortcols = true;
6573 : }
6574 :
6575 181721 : i++;
6576 : }
6577 :
6578 : /*
6579 : * We can postpone SRFs if we have some but none are in sortgroupref cols.
6580 : */
6581 36082 : postpone_srfs = (have_srf && !have_srf_sortcols);
6582 :
6583 : /*
6584 : * If we don't need a post-sort projection, just return final_target.
6585 : */
6586 36082 : if (!(postpone_srfs || have_volatile ||
6587 35930 : (have_expensive &&
6588 4965 : (parse->limitCount || root->tuple_fraction > 0))))
6589 35912 : return final_target;
6590 :
6591 : /*
6592 : * Report whether the post-sort projection will contain set-returning
6593 : * functions. This is important because it affects whether the Sort can
6594 : * rely on the query's LIMIT (if any) to bound the number of rows it needs
6595 : * to return.
6596 : */
6597 170 : *have_postponed_srfs = postpone_srfs;
6598 :
6599 : /*
6600 : * Construct the sort-input target, taking all non-postponable columns and
6601 : * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
6602 : * the postponable ones.
6603 : */
6604 170 : input_target = create_empty_pathtarget();
6605 170 : postponable_cols = NIL;
6606 :
6607 170 : i = 0;
6608 1145 : foreach(lc, final_target->exprs)
6609 : {
6610 975 : Expr *expr = (Expr *) lfirst(lc);
6611 :
6612 975 : if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
6613 199 : postponable_cols = lappend(postponable_cols, expr);
6614 : else
6615 776 : add_column_to_pathtarget(input_target, expr,
6616 776 : get_pathtarget_sortgroupref(final_target, i));
6617 :
6618 975 : i++;
6619 : }
6620 :
6621 : /*
6622 : * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
6623 : * postponable columns, and add them to the sort-input target if not
6624 : * already present. (Some might be there already.) We mustn't
6625 : * deconstruct Aggrefs or WindowFuncs here, since the projection node
6626 : * would be unable to recompute them.
6627 : */
6628 170 : postponable_vars = pull_var_clause((Node *) postponable_cols,
6629 : PVC_INCLUDE_AGGREGATES |
6630 : PVC_INCLUDE_WINDOWFUNCS |
6631 : PVC_INCLUDE_PLACEHOLDERS);
6632 170 : add_new_columns_to_pathtarget(input_target, postponable_vars);
6633 :
6634 : /* clean up cruft */
6635 170 : list_free(postponable_vars);
6636 170 : list_free(postponable_cols);
6637 :
6638 : /* XXX this represents even more redundant cost calculation ... */
6639 170 : return set_pathtarget_cost_width(root, input_target);
6640 : }
6641 :
6642 : /*
6643 : * get_cheapest_fractional_path
6644 : * Find the cheapest path for retrieving a specified fraction of all
6645 : * the tuples expected to be returned by the given relation.
6646 : *
6647 : * Do not consider parameterized paths. If the caller needs a path for upper
6648 : * rel, it can't have parameterized paths. If the caller needs an append
6649 : * subpath, it could become limited by the treatment of similar
6650 : * parameterization of all the subpaths.
6651 : *
6652 : * We interpret tuple_fraction the same way as grouping_planner.
6653 : *
6654 : * We assume set_cheapest() has been run on the given rel.
6655 : */
6656 : Path *
6657 253196 : get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
6658 : {
6659 253196 : Path *best_path = rel->cheapest_total_path;
6660 : ListCell *l;
6661 :
6662 : /* If all tuples will be retrieved, just return the cheapest-total path */
6663 253196 : if (tuple_fraction <= 0.0)
6664 248415 : return best_path;
6665 :
6666 : /* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
6667 4781 : if (tuple_fraction >= 1.0 && best_path->rows > 0)
6668 1982 : tuple_fraction /= best_path->rows;
6669 :
6670 12467 : foreach(l, rel->pathlist)
6671 : {
6672 7686 : Path *path = (Path *) lfirst(l);
6673 :
6674 7686 : if (path->param_info)
6675 100 : continue;
6676 :
6677 10391 : if (path == rel->cheapest_total_path ||
6678 2805 : compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
6679 7324 : continue;
6680 :
6681 262 : best_path = path;
6682 : }
6683 :
6684 4781 : return best_path;
6685 : }
6686 :
6687 : /*
6688 : * adjust_paths_for_srfs
6689 : * Fix up the Paths of the given upperrel to handle tSRFs properly.
6690 : *
6691 : * The executor can only handle set-returning functions that appear at the
6692 : * top level of the targetlist of a ProjectSet plan node. If we have any SRFs
6693 : * that are not at top level, we need to split up the evaluation into multiple
6694 : * plan levels in which each level satisfies this constraint. This function
6695 : * modifies each Path of an upperrel that (might) compute any SRFs in its
6696 : * output tlist to insert appropriate projection steps.
6697 : *
6698 : * The given targets and targets_contain_srfs lists are from
6699 : * split_pathtarget_at_srfs(). We assume the existing Paths emit the first
6700 : * target in targets.
6701 : */
6702 : static void
6703 6395 : adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
6704 : List *targets, List *targets_contain_srfs)
6705 : {
6706 : ListCell *lc;
6707 :
6708 : Assert(list_length(targets) == list_length(targets_contain_srfs));
6709 : Assert(!linitial_int(targets_contain_srfs));
6710 :
6711 : /* If no SRFs appear at this plan level, nothing to do */
6712 6395 : if (list_length(targets) == 1)
6713 354 : return;
6714 :
6715 : /*
6716 : * Stack SRF-evaluation nodes atop each path for the rel.
6717 : *
6718 : * In principle we should re-run set_cheapest() here to identify the
6719 : * cheapest path, but it seems unlikely that adding the same tlist eval
6720 : * costs to all the paths would change that, so we don't bother. Instead,
6721 : * just assume that the cheapest-startup and cheapest-total paths remain
6722 : * so. (There should be no parameterized paths anymore, so we needn't
6723 : * worry about updating cheapest_parameterized_paths.)
6724 : */
6725 12100 : foreach(lc, rel->pathlist)
6726 : {
6727 6059 : Path *subpath = (Path *) lfirst(lc);
6728 6059 : Path *newpath = subpath;
6729 : ListCell *lc1,
6730 : *lc2;
6731 :
6732 : Assert(subpath->param_info == NULL);
6733 18775 : forboth(lc1, targets, lc2, targets_contain_srfs)
6734 : {
6735 12716 : PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6736 12716 : bool contains_srfs = (bool) lfirst_int(lc2);
6737 :
6738 : /* If this level doesn't contain SRFs, do regular projection */
6739 12716 : if (contains_srfs)
6740 6089 : newpath = (Path *) create_set_projection_path(root,
6741 : rel,
6742 : newpath,
6743 : thistarget);
6744 : else
6745 6627 : newpath = (Path *) apply_projection_to_path(root,
6746 : rel,
6747 : newpath,
6748 : thistarget);
6749 : }
6750 6059 : lfirst(lc) = newpath;
6751 6059 : if (subpath == rel->cheapest_startup_path)
6752 202 : rel->cheapest_startup_path = newpath;
6753 6059 : if (subpath == rel->cheapest_total_path)
6754 202 : rel->cheapest_total_path = newpath;
6755 : }
6756 :
6757 : /* Likewise for partial paths, if any */
6758 6050 : foreach(lc, rel->partial_pathlist)
6759 : {
6760 9 : Path *subpath = (Path *) lfirst(lc);
6761 9 : Path *newpath = subpath;
6762 : ListCell *lc1,
6763 : *lc2;
6764 :
6765 : Assert(subpath->param_info == NULL);
6766 36 : forboth(lc1, targets, lc2, targets_contain_srfs)
6767 : {
6768 27 : PathTarget *thistarget = lfirst_node(PathTarget, lc1);
6769 27 : bool contains_srfs = (bool) lfirst_int(lc2);
6770 :
6771 : /* If this level doesn't contain SRFs, do regular projection */
6772 27 : if (contains_srfs)
6773 9 : newpath = (Path *) create_set_projection_path(root,
6774 : rel,
6775 : newpath,
6776 : thistarget);
6777 : else
6778 : {
6779 : /* avoid apply_projection_to_path, in case of multiple refs */
6780 18 : newpath = (Path *) create_projection_path(root,
6781 : rel,
6782 : newpath,
6783 : thistarget);
6784 : }
6785 : }
6786 9 : lfirst(lc) = newpath;
6787 : }
6788 : }
6789 :
6790 : /*
6791 : * expression_planner
6792 : * Perform planner's transformations on a standalone expression.
6793 : *
6794 : * Various utility commands need to evaluate expressions that are not part
6795 : * of a plannable query. They can do so using the executor's regular
6796 : * expression-execution machinery, but first the expression has to be fed
6797 : * through here to transform it from parser output to something executable.
6798 : *
6799 : * Currently, we disallow sublinks in standalone expressions, so there's no
6800 : * real "planning" involved here. (That might not always be true though.)
6801 : * What we must do is run eval_const_expressions to ensure that any function
6802 : * calls are converted to positional notation and function default arguments
6803 : * get inserted. The fact that constant subexpressions get simplified is a
6804 : * side-effect that is useful when the expression will get evaluated more than
6805 : * once. Also, we must fix operator function IDs.
6806 : *
6807 : * This does not return any information about dependencies of the expression.
6808 : * Hence callers should use the results only for the duration of the current
6809 : * query. Callers that would like to cache the results for longer should use
6810 : * expression_planner_with_deps, probably via the plancache.
6811 : *
6812 : * Note: this must not make any damaging changes to the passed-in expression
6813 : * tree. (It would actually be okay to apply fix_opfuncids to it, but since
6814 : * we first do an expression_tree_mutator-based walk, what is returned will
6815 : * be a new node tree.) The result is constructed in the current memory
6816 : * context; beware that this can leak a lot of additional stuff there, too.
6817 : */
6818 : Expr *
6819 122499 : expression_planner(Expr *expr)
6820 : {
6821 : Node *result;
6822 :
6823 : /*
6824 : * Convert named-argument function calls, insert default arguments and
6825 : * simplify constant subexprs
6826 : */
6827 122499 : result = eval_const_expressions(NULL, (Node *) expr);
6828 :
6829 : /* Fill in opfuncid values if missing */
6830 122490 : fix_opfuncids(result);
6831 :
6832 122490 : return (Expr *) result;
6833 : }
6834 :
6835 : /*
6836 : * expression_planner_with_deps
6837 : * Perform planner's transformations on a standalone expression,
6838 : * returning expression dependency information along with the result.
6839 : *
6840 : * This is identical to expression_planner() except that it also returns
6841 : * information about possible dependencies of the expression, ie identities of
6842 : * objects whose definitions affect the result. As in a PlannedStmt, these
6843 : * are expressed as a list of relation Oids and a list of PlanInvalItems.
6844 : */
6845 : Expr *
6846 189 : expression_planner_with_deps(Expr *expr,
6847 : List **relationOids,
6848 : List **invalItems)
6849 : {
6850 : Node *result;
6851 : PlannerGlobal glob;
6852 : PlannerInfo root;
6853 :
6854 : /* Make up dummy planner state so we can use setrefs machinery */
6855 5481 : MemSet(&glob, 0, sizeof(glob));
6856 189 : glob.type = T_PlannerGlobal;
6857 189 : glob.relationOids = NIL;
6858 189 : glob.invalItems = NIL;
6859 :
6860 17577 : MemSet(&root, 0, sizeof(root));
6861 189 : root.type = T_PlannerInfo;
6862 189 : root.glob = &glob;
6863 :
6864 : /*
6865 : * Convert named-argument function calls, insert default arguments and
6866 : * simplify constant subexprs. Collect identities of inlined functions
6867 : * and elided domains, too.
6868 : */
6869 189 : result = eval_const_expressions(&root, (Node *) expr);
6870 :
6871 : /* Fill in opfuncid values if missing */
6872 189 : fix_opfuncids(result);
6873 :
6874 : /*
6875 : * Now walk the finished expression to find anything else we ought to
6876 : * record as an expression dependency.
6877 : */
6878 189 : (void) extract_query_dependencies_walker(result, &root);
6879 :
6880 189 : *relationOids = glob.relationOids;
6881 189 : *invalItems = glob.invalItems;
6882 :
6883 189 : return (Expr *) result;
6884 : }
6885 :
6886 :
6887 : /*
6888 : * plan_cluster_use_sort
6889 : * Use the planner to decide how CLUSTER should implement sorting
6890 : *
6891 : * tableOid is the OID of a table to be clustered on its index indexOid
6892 : * (which is already known to be a btree index). Decide whether it's
6893 : * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
6894 : * Return true to use sorting, false to use an indexscan.
6895 : *
6896 : * Note: caller had better already hold some type of lock on the table.
6897 : */
6898 : bool
6899 97 : plan_cluster_use_sort(Oid tableOid, Oid indexOid)
6900 : {
6901 : PlannerInfo *root;
6902 : Query *query;
6903 : PlannerGlobal *glob;
6904 : RangeTblEntry *rte;
6905 : RelOptInfo *rel;
6906 : IndexOptInfo *indexInfo;
6907 : QualCost indexExprCost;
6908 : Cost comparisonCost;
6909 : Path *seqScanPath;
6910 : Path seqScanAndSortPath;
6911 : IndexPath *indexScanPath;
6912 : ListCell *lc;
6913 :
6914 : /* We can short-circuit the cost comparison if indexscans are disabled */
6915 97 : if (!enable_indexscan)
6916 15 : return true; /* use sort */
6917 :
6918 : /* Set up mostly-dummy planner state */
6919 82 : query = makeNode(Query);
6920 82 : query->commandType = CMD_SELECT;
6921 :
6922 82 : glob = makeNode(PlannerGlobal);
6923 :
6924 82 : root = makeNode(PlannerInfo);
6925 82 : root->parse = query;
6926 82 : root->glob = glob;
6927 82 : root->query_level = 1;
6928 82 : root->planner_cxt = CurrentMemoryContext;
6929 82 : root->wt_param_id = -1;
6930 82 : root->join_domains = list_make1(makeNode(JoinDomain));
6931 :
6932 : /* Build a minimal RTE for the rel */
6933 82 : rte = makeNode(RangeTblEntry);
6934 82 : rte->rtekind = RTE_RELATION;
6935 82 : rte->relid = tableOid;
6936 82 : rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
6937 82 : rte->rellockmode = AccessShareLock;
6938 82 : rte->lateral = false;
6939 82 : rte->inh = false;
6940 82 : rte->inFromCl = true;
6941 82 : query->rtable = list_make1(rte);
6942 82 : addRTEPermissionInfo(&query->rteperminfos, rte);
6943 :
6944 : /* Set up RTE/RelOptInfo arrays */
6945 82 : setup_simple_rel_arrays(root);
6946 :
6947 : /* Build RelOptInfo */
6948 82 : rel = build_simple_rel(root, 1, NULL);
6949 :
6950 : /* Locate IndexOptInfo for the target index */
6951 82 : indexInfo = NULL;
6952 101 : foreach(lc, rel->indexlist)
6953 : {
6954 101 : indexInfo = lfirst_node(IndexOptInfo, lc);
6955 101 : if (indexInfo->indexoid == indexOid)
6956 82 : break;
6957 : }
6958 :
6959 : /*
6960 : * It's possible that get_relation_info did not generate an IndexOptInfo
6961 : * for the desired index; this could happen if it's not yet reached its
6962 : * indcheckxmin usability horizon, or if it's a system index and we're
6963 : * ignoring system indexes. In such cases we should tell CLUSTER to not
6964 : * trust the index contents but use seqscan-and-sort.
6965 : */
6966 82 : if (lc == NULL) /* not in the list? */
6967 0 : return true; /* use sort */
6968 :
6969 : /*
6970 : * Rather than doing all the pushups that would be needed to use
6971 : * set_baserel_size_estimates, just do a quick hack for rows and width.
6972 : */
6973 82 : rel->rows = rel->tuples;
6974 82 : rel->reltarget->width = get_relation_data_width(tableOid, NULL);
6975 :
6976 82 : root->total_table_pages = rel->pages;
6977 :
6978 : /*
6979 : * Determine eval cost of the index expressions, if any. We need to
6980 : * charge twice that amount for each tuple comparison that happens during
6981 : * the sort, since tuplesort.c will have to re-evaluate the index
6982 : * expressions each time. (XXX that's pretty inefficient...)
6983 : */
6984 82 : cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
6985 82 : comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
6986 :
6987 : /* Estimate the cost of seq scan + sort */
6988 82 : seqScanPath = create_seqscan_path(root, rel, NULL, 0);
6989 82 : cost_sort(&seqScanAndSortPath, root, NIL,
6990 : seqScanPath->disabled_nodes,
6991 82 : seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
6992 : comparisonCost, maintenance_work_mem, -1.0);
6993 :
6994 : /* Estimate the cost of index scan */
6995 82 : indexScanPath = create_index_path(root, indexInfo,
6996 : NIL, NIL, NIL, NIL,
6997 : ForwardScanDirection, false,
6998 : NULL, 1.0, false);
6999 :
7000 82 : return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
7001 : }
7002 :
7003 : /*
7004 : * plan_create_index_workers
7005 : * Use the planner to decide how many parallel worker processes
7006 : * CREATE INDEX should request for use
7007 : *
7008 : * tableOid is the table on which the index is to be built. indexOid is the
7009 : * OID of an index to be created or reindexed (which must be an index with
7010 : * support for parallel builds - currently btree, GIN, or BRIN).
7011 : *
7012 : * Return value is the number of parallel worker processes to request. It
7013 : * may be unsafe to proceed if this is 0. Note that this does not include the
7014 : * leader participating as a worker (value is always a number of parallel
7015 : * worker processes).
7016 : *
7017 : * Note: caller had better already hold some type of lock on the table and
7018 : * index.
7019 : */
7020 : int
7021 18308 : plan_create_index_workers(Oid tableOid, Oid indexOid)
7022 : {
7023 : PlannerInfo *root;
7024 : Query *query;
7025 : PlannerGlobal *glob;
7026 : RangeTblEntry *rte;
7027 : Relation heap;
7028 : Relation index;
7029 : RelOptInfo *rel;
7030 : int parallel_workers;
7031 : BlockNumber heap_blocks;
7032 : double reltuples;
7033 : double allvisfrac;
7034 :
7035 : /*
7036 : * We don't allow performing parallel operation in standalone backend or
7037 : * when parallelism is disabled.
7038 : */
7039 18308 : if (!IsUnderPostmaster || max_parallel_maintenance_workers == 0)
7040 257 : return 0;
7041 :
7042 : /* Set up largely-dummy planner state */
7043 18051 : query = makeNode(Query);
7044 18051 : query->commandType = CMD_SELECT;
7045 :
7046 18051 : glob = makeNode(PlannerGlobal);
7047 :
7048 18051 : root = makeNode(PlannerInfo);
7049 18051 : root->parse = query;
7050 18051 : root->glob = glob;
7051 18051 : root->query_level = 1;
7052 18051 : root->planner_cxt = CurrentMemoryContext;
7053 18051 : root->wt_param_id = -1;
7054 18051 : root->join_domains = list_make1(makeNode(JoinDomain));
7055 :
7056 : /*
7057 : * Build a minimal RTE.
7058 : *
7059 : * Mark the RTE with inh = true. This is a kludge to prevent
7060 : * get_relation_info() from fetching index info, which is necessary
7061 : * because it does not expect that any IndexOptInfo is currently
7062 : * undergoing REINDEX.
7063 : */
7064 18051 : rte = makeNode(RangeTblEntry);
7065 18051 : rte->rtekind = RTE_RELATION;
7066 18051 : rte->relid = tableOid;
7067 18051 : rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
7068 18051 : rte->rellockmode = AccessShareLock;
7069 18051 : rte->lateral = false;
7070 18051 : rte->inh = true;
7071 18051 : rte->inFromCl = true;
7072 18051 : query->rtable = list_make1(rte);
7073 18051 : addRTEPermissionInfo(&query->rteperminfos, rte);
7074 :
7075 : /* Set up RTE/RelOptInfo arrays */
7076 18051 : setup_simple_rel_arrays(root);
7077 :
7078 : /* Build RelOptInfo */
7079 18051 : rel = build_simple_rel(root, 1, NULL);
7080 :
7081 : /* Rels are assumed already locked by the caller */
7082 18051 : heap = table_open(tableOid, NoLock);
7083 18051 : index = index_open(indexOid, NoLock);
7084 :
7085 : /*
7086 : * Determine if it's safe to proceed.
7087 : *
7088 : * Currently, parallel workers can't access the leader's temporary tables.
7089 : * Furthermore, any index predicate or index expressions must be parallel
7090 : * safe.
7091 : */
7092 18051 : if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
7093 17011 : !is_parallel_safe(root, (Node *) RelationGetIndexExpressions(index)) ||
7094 16941 : !is_parallel_safe(root, (Node *) RelationGetIndexPredicate(index)))
7095 : {
7096 1110 : parallel_workers = 0;
7097 1110 : goto done;
7098 : }
7099 :
7100 : /*
7101 : * If parallel_workers storage parameter is set for the table, accept that
7102 : * as the number of parallel worker processes to launch (though still cap
7103 : * at max_parallel_maintenance_workers). Note that we deliberately do not
7104 : * consider any other factor when parallel_workers is set. (e.g., memory
7105 : * use by workers.)
7106 : */
7107 16941 : if (rel->rel_parallel_workers != -1)
7108 : {
7109 50 : parallel_workers = Min(rel->rel_parallel_workers,
7110 : max_parallel_maintenance_workers);
7111 50 : goto done;
7112 : }
7113 :
7114 : /*
7115 : * Estimate heap relation size ourselves, since rel->pages cannot be
7116 : * trusted (heap RTE was marked as inheritance parent)
7117 : */
7118 16891 : estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
7119 :
7120 : /*
7121 : * Determine number of workers to scan the heap relation using generic
7122 : * model
7123 : */
7124 16891 : parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
7125 : max_parallel_maintenance_workers);
7126 :
7127 : /*
7128 : * Cap workers based on available maintenance_work_mem as needed.
7129 : *
7130 : * Note that each tuplesort participant receives an even share of the
7131 : * total maintenance_work_mem budget. Aim to leave participants
7132 : * (including the leader as a participant) with no less than 32MB of
7133 : * memory. This leaves cases where maintenance_work_mem is set to 64MB
7134 : * immediately past the threshold of being capable of launching a single
7135 : * parallel worker to sort.
7136 : */
7137 16972 : while (parallel_workers > 0 &&
7138 163 : maintenance_work_mem / (parallel_workers + 1) < 32 * 1024)
7139 81 : parallel_workers--;
7140 :
7141 16891 : done:
7142 18051 : index_close(index, NoLock);
7143 18051 : table_close(heap, NoLock);
7144 :
7145 18051 : return parallel_workers;
7146 : }
7147 :
7148 : /*
7149 : * add_paths_to_grouping_rel
7150 : *
7151 : * Add non-partial paths to grouping relation.
7152 : */
7153 : static void
7154 23614 : add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
7155 : RelOptInfo *grouped_rel,
7156 : RelOptInfo *partially_grouped_rel,
7157 : const AggClauseCosts *agg_costs,
7158 : grouping_sets_data *gd,
7159 : GroupPathExtraData *extra)
7160 : {
7161 23614 : Query *parse = root->parse;
7162 23614 : Path *cheapest_path = input_rel->cheapest_total_path;
7163 23614 : Path *cheapest_partially_grouped_path = NULL;
7164 : ListCell *lc;
7165 23614 : bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7166 23614 : bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7167 23614 : List *havingQual = (List *) extra->havingQual;
7168 23614 : AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7169 23614 : double dNumGroups = 0;
7170 23614 : double dNumFinalGroups = 0;
7171 :
7172 : /*
7173 : * Estimate number of groups for non-split aggregation.
7174 : */
7175 23614 : dNumGroups = get_number_of_groups(root,
7176 : cheapest_path->rows,
7177 : gd,
7178 : extra->targetList);
7179 :
7180 23614 : if (partially_grouped_rel && partially_grouped_rel->pathlist)
7181 : {
7182 1499 : cheapest_partially_grouped_path =
7183 : partially_grouped_rel->cheapest_total_path;
7184 :
7185 : /*
7186 : * Estimate number of groups for final phase of partial aggregation.
7187 : */
7188 : dNumFinalGroups =
7189 1499 : get_number_of_groups(root,
7190 : cheapest_partially_grouped_path->rows,
7191 : gd,
7192 : extra->targetList);
7193 : }
7194 :
7195 23614 : if (can_sort)
7196 : {
7197 : /*
7198 : * Use any available suitably-sorted path as input, and also consider
7199 : * sorting the cheapest-total path and incremental sort on any paths
7200 : * with presorted keys.
7201 : */
7202 48936 : foreach(lc, input_rel->pathlist)
7203 : {
7204 : ListCell *lc2;
7205 25325 : Path *path = (Path *) lfirst(lc);
7206 25325 : Path *path_save = path;
7207 25325 : List *pathkey_orderings = NIL;
7208 :
7209 : /* generate alternative group orderings that might be useful */
7210 25325 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7211 :
7212 : Assert(list_length(pathkey_orderings) > 0);
7213 :
7214 50722 : foreach(lc2, pathkey_orderings)
7215 : {
7216 25397 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7217 :
7218 : /* restore the path (we replace it in the loop) */
7219 25397 : path = path_save;
7220 :
7221 25397 : path = make_ordered_path(root,
7222 : grouped_rel,
7223 : path,
7224 : cheapest_path,
7225 : info->pathkeys,
7226 : -1.0);
7227 25397 : if (path == NULL)
7228 193 : continue;
7229 :
7230 : /* Now decide what to stick atop it */
7231 25204 : if (parse->groupingSets)
7232 : {
7233 531 : consider_groupingsets_paths(root, grouped_rel,
7234 : path, true, can_hash,
7235 : gd, agg_costs, dNumGroups);
7236 : }
7237 24673 : else if (parse->hasAggs)
7238 : {
7239 : /*
7240 : * We have aggregation, possibly with plain GROUP BY. Make
7241 : * an AggPath.
7242 : */
7243 24281 : add_path(grouped_rel, (Path *)
7244 24281 : create_agg_path(root,
7245 : grouped_rel,
7246 : path,
7247 24281 : grouped_rel->reltarget,
7248 24281 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7249 : AGGSPLIT_SIMPLE,
7250 : info->clauses,
7251 : havingQual,
7252 : agg_costs,
7253 : dNumGroups));
7254 : }
7255 392 : else if (parse->groupClause)
7256 : {
7257 : /*
7258 : * We have GROUP BY without aggregation or grouping sets.
7259 : * Make a GroupPath.
7260 : */
7261 392 : add_path(grouped_rel, (Path *)
7262 392 : create_group_path(root,
7263 : grouped_rel,
7264 : path,
7265 : info->clauses,
7266 : havingQual,
7267 : dNumGroups));
7268 : }
7269 : else
7270 : {
7271 : /* Other cases should have been handled above */
7272 : Assert(false);
7273 : }
7274 : }
7275 : }
7276 :
7277 : /*
7278 : * Instead of operating directly on the input relation, we can
7279 : * consider finalizing a partially aggregated path.
7280 : */
7281 23611 : if (partially_grouped_rel != NULL)
7282 : {
7283 3839 : foreach(lc, partially_grouped_rel->pathlist)
7284 : {
7285 : ListCell *lc2;
7286 2340 : Path *path = (Path *) lfirst(lc);
7287 2340 : Path *path_save = path;
7288 2340 : List *pathkey_orderings = NIL;
7289 :
7290 : /* generate alternative group orderings that might be useful */
7291 2340 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7292 :
7293 : Assert(list_length(pathkey_orderings) > 0);
7294 :
7295 : /* process all potentially interesting grouping reorderings */
7296 4680 : foreach(lc2, pathkey_orderings)
7297 : {
7298 2340 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7299 :
7300 : /* restore the path (we replace it in the loop) */
7301 2340 : path = path_save;
7302 :
7303 2340 : path = make_ordered_path(root,
7304 : grouped_rel,
7305 : path,
7306 : cheapest_partially_grouped_path,
7307 : info->pathkeys,
7308 : -1.0);
7309 :
7310 2340 : if (path == NULL)
7311 102 : continue;
7312 :
7313 2238 : if (parse->hasAggs)
7314 2114 : add_path(grouped_rel, (Path *)
7315 2114 : create_agg_path(root,
7316 : grouped_rel,
7317 : path,
7318 2114 : grouped_rel->reltarget,
7319 2114 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7320 : AGGSPLIT_FINAL_DESERIAL,
7321 : info->clauses,
7322 : havingQual,
7323 : agg_final_costs,
7324 : dNumFinalGroups));
7325 : else
7326 124 : add_path(grouped_rel, (Path *)
7327 124 : create_group_path(root,
7328 : grouped_rel,
7329 : path,
7330 : info->clauses,
7331 : havingQual,
7332 : dNumFinalGroups));
7333 :
7334 : }
7335 : }
7336 : }
7337 : }
7338 :
7339 23614 : if (can_hash)
7340 : {
7341 3011 : if (parse->groupingSets)
7342 : {
7343 : /*
7344 : * Try for a hash-only groupingsets path over unsorted input.
7345 : */
7346 450 : consider_groupingsets_paths(root, grouped_rel,
7347 : cheapest_path, false, true,
7348 : gd, agg_costs, dNumGroups);
7349 : }
7350 : else
7351 : {
7352 : /*
7353 : * Generate a HashAgg Path. We just need an Agg over the
7354 : * cheapest-total input path, since input order won't matter.
7355 : */
7356 2561 : add_path(grouped_rel, (Path *)
7357 2561 : create_agg_path(root, grouped_rel,
7358 : cheapest_path,
7359 2561 : grouped_rel->reltarget,
7360 : AGG_HASHED,
7361 : AGGSPLIT_SIMPLE,
7362 : root->processed_groupClause,
7363 : havingQual,
7364 : agg_costs,
7365 : dNumGroups));
7366 : }
7367 :
7368 : /*
7369 : * Generate a Finalize HashAgg Path atop of the cheapest partially
7370 : * grouped path, assuming there is one
7371 : */
7372 3011 : if (partially_grouped_rel && partially_grouped_rel->pathlist)
7373 : {
7374 724 : add_path(grouped_rel, (Path *)
7375 724 : create_agg_path(root,
7376 : grouped_rel,
7377 : cheapest_partially_grouped_path,
7378 724 : grouped_rel->reltarget,
7379 : AGG_HASHED,
7380 : AGGSPLIT_FINAL_DESERIAL,
7381 : root->processed_groupClause,
7382 : havingQual,
7383 : agg_final_costs,
7384 : dNumFinalGroups));
7385 : }
7386 : }
7387 :
7388 : /*
7389 : * When partitionwise aggregate is used, we might have fully aggregated
7390 : * paths in the partial pathlist, because add_paths_to_append_rel() will
7391 : * consider a path for grouped_rel consisting of a Parallel Append of
7392 : * non-partial paths from each child.
7393 : */
7394 23614 : if (grouped_rel->partial_pathlist != NIL)
7395 159 : gather_grouping_paths(root, grouped_rel);
7396 23614 : }
7397 :
7398 : /*
7399 : * create_partial_grouping_paths
7400 : *
7401 : * Create a new upper relation representing the result of partial aggregation
7402 : * and populate it with appropriate paths. Note that we don't finalize the
7403 : * lists of paths here, so the caller can add additional partial or non-partial
7404 : * paths and must afterward call gather_grouping_paths and set_cheapest on
7405 : * the returned upper relation.
7406 : *
7407 : * All paths for this new upper relation -- both partial and non-partial --
7408 : * have been partially aggregated but require a subsequent FinalizeAggregate
7409 : * step.
7410 : *
7411 : * NB: This function is allowed to return NULL if it determines that there is
7412 : * no real need to create a new RelOptInfo.
7413 : */
7414 : static RelOptInfo *
7415 21558 : create_partial_grouping_paths(PlannerInfo *root,
7416 : RelOptInfo *grouped_rel,
7417 : RelOptInfo *input_rel,
7418 : grouping_sets_data *gd,
7419 : GroupPathExtraData *extra,
7420 : bool force_rel_creation)
7421 : {
7422 21558 : Query *parse = root->parse;
7423 : RelOptInfo *partially_grouped_rel;
7424 21558 : RelOptInfo *eager_agg_rel = NULL;
7425 21558 : AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
7426 21558 : AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
7427 21558 : Path *cheapest_partial_path = NULL;
7428 21558 : Path *cheapest_total_path = NULL;
7429 21558 : double dNumPartialGroups = 0;
7430 21558 : double dNumPartialPartialGroups = 0;
7431 : ListCell *lc;
7432 21558 : bool can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
7433 21558 : bool can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
7434 :
7435 : /*
7436 : * Check whether any partially aggregated paths have been generated
7437 : * through eager aggregation.
7438 : */
7439 21558 : if (input_rel->grouped_rel &&
7440 479 : !IS_DUMMY_REL(input_rel->grouped_rel) &&
7441 479 : input_rel->grouped_rel->pathlist != NIL)
7442 449 : eager_agg_rel = input_rel->grouped_rel;
7443 :
7444 : /*
7445 : * Consider whether we should generate partially aggregated non-partial
7446 : * paths. We can only do this if we have a non-partial path, and only if
7447 : * the parent of the input rel is performing partial partitionwise
7448 : * aggregation. (Note that extra->patype is the type of partitionwise
7449 : * aggregation being used at the parent level, not this level.)
7450 : */
7451 21558 : if (input_rel->pathlist != NIL &&
7452 21558 : extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
7453 429 : cheapest_total_path = input_rel->cheapest_total_path;
7454 :
7455 : /*
7456 : * If parallelism is possible for grouped_rel, then we should consider
7457 : * generating partially-grouped partial paths. However, if the input rel
7458 : * has no partial paths, then we can't.
7459 : */
7460 21558 : if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
7461 1637 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
7462 :
7463 : /*
7464 : * If we can't partially aggregate partial paths, and we can't partially
7465 : * aggregate non-partial paths, and no partially aggregated paths were
7466 : * generated by eager aggregation, then don't bother creating the new
7467 : * RelOptInfo at all, unless the caller specified force_rel_creation.
7468 : */
7469 21558 : if (cheapest_total_path == NULL &&
7470 19744 : cheapest_partial_path == NULL &&
7471 19679 : eager_agg_rel == NULL &&
7472 19679 : !force_rel_creation)
7473 19630 : return NULL;
7474 :
7475 : /*
7476 : * Build a new upper relation to represent the result of partially
7477 : * aggregating the rows from the input relation.
7478 : */
7479 1928 : partially_grouped_rel = fetch_upper_rel(root,
7480 : UPPERREL_PARTIAL_GROUP_AGG,
7481 : grouped_rel->relids);
7482 1928 : partially_grouped_rel->consider_parallel =
7483 1928 : grouped_rel->consider_parallel;
7484 1928 : partially_grouped_rel->pgs_mask = grouped_rel->pgs_mask;
7485 1928 : partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
7486 1928 : partially_grouped_rel->serverid = grouped_rel->serverid;
7487 1928 : partially_grouped_rel->userid = grouped_rel->userid;
7488 1928 : partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
7489 1928 : partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
7490 :
7491 : /*
7492 : * Build target list for partial aggregate paths. These paths cannot just
7493 : * emit the same tlist as regular aggregate paths, because (1) we must
7494 : * include Vars and Aggrefs needed in HAVING, which might not appear in
7495 : * the result tlist, and (2) the Aggrefs must be set in partial mode.
7496 : */
7497 1928 : partially_grouped_rel->reltarget =
7498 1928 : make_partial_grouping_target(root, grouped_rel->reltarget,
7499 : extra->havingQual);
7500 :
7501 1928 : if (!extra->partial_costs_set)
7502 : {
7503 : /*
7504 : * Collect statistics about aggregates for estimating costs of
7505 : * performing aggregation in parallel.
7506 : */
7507 6942 : MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
7508 6942 : MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
7509 1157 : if (parse->hasAggs)
7510 : {
7511 : /* partial phase */
7512 1090 : get_agg_clause_costs(root, AGGSPLIT_INITIAL_SERIAL,
7513 : agg_partial_costs);
7514 :
7515 : /* final phase */
7516 1090 : get_agg_clause_costs(root, AGGSPLIT_FINAL_DESERIAL,
7517 : agg_final_costs);
7518 : }
7519 :
7520 1157 : extra->partial_costs_set = true;
7521 : }
7522 :
7523 : /* Estimate number of partial groups. */
7524 1928 : if (cheapest_total_path != NULL)
7525 : dNumPartialGroups =
7526 429 : get_number_of_groups(root,
7527 : cheapest_total_path->rows,
7528 : gd,
7529 : extra->targetList);
7530 1928 : if (cheapest_partial_path != NULL)
7531 : dNumPartialPartialGroups =
7532 1637 : get_number_of_groups(root,
7533 : cheapest_partial_path->rows,
7534 : gd,
7535 : extra->targetList);
7536 :
7537 1928 : if (can_sort && cheapest_total_path != NULL)
7538 : {
7539 : /* This should have been checked previously */
7540 : Assert(parse->hasAggs || parse->groupClause);
7541 :
7542 : /*
7543 : * Use any available suitably-sorted path as input, and also consider
7544 : * sorting the cheapest partial path.
7545 : */
7546 858 : foreach(lc, input_rel->pathlist)
7547 : {
7548 : ListCell *lc2;
7549 429 : Path *path = (Path *) lfirst(lc);
7550 429 : Path *path_save = path;
7551 429 : List *pathkey_orderings = NIL;
7552 :
7553 : /* generate alternative group orderings that might be useful */
7554 429 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7555 :
7556 : Assert(list_length(pathkey_orderings) > 0);
7557 :
7558 : /* process all potentially interesting grouping reorderings */
7559 858 : foreach(lc2, pathkey_orderings)
7560 : {
7561 429 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7562 :
7563 : /* restore the path (we replace it in the loop) */
7564 429 : path = path_save;
7565 :
7566 429 : path = make_ordered_path(root,
7567 : partially_grouped_rel,
7568 : path,
7569 : cheapest_total_path,
7570 : info->pathkeys,
7571 : -1.0);
7572 :
7573 429 : if (path == NULL)
7574 0 : continue;
7575 :
7576 429 : if (parse->hasAggs)
7577 393 : add_path(partially_grouped_rel, (Path *)
7578 393 : create_agg_path(root,
7579 : partially_grouped_rel,
7580 : path,
7581 393 : partially_grouped_rel->reltarget,
7582 393 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7583 : AGGSPLIT_INITIAL_SERIAL,
7584 : info->clauses,
7585 : NIL,
7586 : agg_partial_costs,
7587 : dNumPartialGroups));
7588 : else
7589 36 : add_path(partially_grouped_rel, (Path *)
7590 36 : create_group_path(root,
7591 : partially_grouped_rel,
7592 : path,
7593 : info->clauses,
7594 : NIL,
7595 : dNumPartialGroups));
7596 : }
7597 : }
7598 : }
7599 :
7600 1928 : if (can_sort && cheapest_partial_path != NULL)
7601 : {
7602 : /* Similar to above logic, but for partial paths. */
7603 3535 : foreach(lc, input_rel->partial_pathlist)
7604 : {
7605 : ListCell *lc2;
7606 1898 : Path *path = (Path *) lfirst(lc);
7607 1898 : Path *path_save = path;
7608 1898 : List *pathkey_orderings = NIL;
7609 :
7610 : /* generate alternative group orderings that might be useful */
7611 1898 : pathkey_orderings = get_useful_group_keys_orderings(root, path);
7612 :
7613 : Assert(list_length(pathkey_orderings) > 0);
7614 :
7615 : /* process all potentially interesting grouping reorderings */
7616 3796 : foreach(lc2, pathkey_orderings)
7617 : {
7618 1898 : GroupByOrdering *info = (GroupByOrdering *) lfirst(lc2);
7619 :
7620 :
7621 : /* restore the path (we replace it in the loop) */
7622 1898 : path = path_save;
7623 :
7624 1898 : path = make_ordered_path(root,
7625 : partially_grouped_rel,
7626 : path,
7627 : cheapest_partial_path,
7628 : info->pathkeys,
7629 : -1.0);
7630 :
7631 1898 : if (path == NULL)
7632 3 : continue;
7633 :
7634 1895 : if (parse->hasAggs)
7635 1834 : add_partial_path(partially_grouped_rel, (Path *)
7636 1834 : create_agg_path(root,
7637 : partially_grouped_rel,
7638 : path,
7639 1834 : partially_grouped_rel->reltarget,
7640 1834 : parse->groupClause ? AGG_SORTED : AGG_PLAIN,
7641 : AGGSPLIT_INITIAL_SERIAL,
7642 : info->clauses,
7643 : NIL,
7644 : agg_partial_costs,
7645 : dNumPartialPartialGroups));
7646 : else
7647 61 : add_partial_path(partially_grouped_rel, (Path *)
7648 61 : create_group_path(root,
7649 : partially_grouped_rel,
7650 : path,
7651 : info->clauses,
7652 : NIL,
7653 : dNumPartialPartialGroups));
7654 : }
7655 : }
7656 : }
7657 :
7658 : /*
7659 : * Add a partially-grouped HashAgg Path where possible
7660 : */
7661 1928 : if (can_hash && cheapest_total_path != NULL)
7662 : {
7663 : /* Checked above */
7664 : Assert(parse->hasAggs || parse->groupClause);
7665 :
7666 429 : add_path(partially_grouped_rel, (Path *)
7667 429 : create_agg_path(root,
7668 : partially_grouped_rel,
7669 : cheapest_total_path,
7670 429 : partially_grouped_rel->reltarget,
7671 : AGG_HASHED,
7672 : AGGSPLIT_INITIAL_SERIAL,
7673 : root->processed_groupClause,
7674 : NIL,
7675 : agg_partial_costs,
7676 : dNumPartialGroups));
7677 : }
7678 :
7679 : /*
7680 : * Now add a partially-grouped HashAgg partial Path where possible
7681 : */
7682 1928 : if (can_hash && cheapest_partial_path != NULL)
7683 : {
7684 862 : add_partial_path(partially_grouped_rel, (Path *)
7685 862 : create_agg_path(root,
7686 : partially_grouped_rel,
7687 : cheapest_partial_path,
7688 862 : partially_grouped_rel->reltarget,
7689 : AGG_HASHED,
7690 : AGGSPLIT_INITIAL_SERIAL,
7691 : root->processed_groupClause,
7692 : NIL,
7693 : agg_partial_costs,
7694 : dNumPartialPartialGroups));
7695 : }
7696 :
7697 : /*
7698 : * Add any partially aggregated paths generated by eager aggregation to
7699 : * the new upper relation after applying projection steps as needed.
7700 : */
7701 1928 : if (eager_agg_rel)
7702 : {
7703 : /* Add the paths */
7704 1174 : foreach(lc, eager_agg_rel->pathlist)
7705 : {
7706 725 : Path *path = (Path *) lfirst(lc);
7707 :
7708 : /* Shouldn't have any parameterized paths anymore */
7709 : Assert(path->param_info == NULL);
7710 :
7711 725 : path = (Path *) create_projection_path(root,
7712 : partially_grouped_rel,
7713 : path,
7714 725 : partially_grouped_rel->reltarget);
7715 :
7716 725 : add_path(partially_grouped_rel, path);
7717 : }
7718 :
7719 : /*
7720 : * Likewise add the partial paths, but only if parallelism is possible
7721 : * for partially_grouped_rel.
7722 : */
7723 449 : if (partially_grouped_rel->consider_parallel)
7724 : {
7725 1014 : foreach(lc, eager_agg_rel->partial_pathlist)
7726 : {
7727 606 : Path *path = (Path *) lfirst(lc);
7728 :
7729 : /* Shouldn't have any parameterized paths anymore */
7730 : Assert(path->param_info == NULL);
7731 :
7732 606 : path = (Path *) create_projection_path(root,
7733 : partially_grouped_rel,
7734 : path,
7735 606 : partially_grouped_rel->reltarget);
7736 :
7737 606 : add_partial_path(partially_grouped_rel, path);
7738 : }
7739 : }
7740 : }
7741 :
7742 : /*
7743 : * If there is an FDW that's responsible for all baserels of the query,
7744 : * let it consider adding partially grouped ForeignPaths.
7745 : */
7746 1928 : if (partially_grouped_rel->fdwroutine &&
7747 3 : partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
7748 : {
7749 3 : FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
7750 :
7751 3 : fdwroutine->GetForeignUpperPaths(root,
7752 : UPPERREL_PARTIAL_GROUP_AGG,
7753 : input_rel, partially_grouped_rel,
7754 : extra);
7755 : }
7756 :
7757 1928 : return partially_grouped_rel;
7758 : }
7759 :
7760 : /*
7761 : * make_ordered_path
7762 : * Return a path ordered by 'pathkeys' based on the given 'path'. May
7763 : * return NULL if it doesn't make sense to generate an ordered path in
7764 : * this case.
7765 : */
7766 : static Path *
7767 33190 : make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path,
7768 : Path *cheapest_path, List *pathkeys, double limit_tuples)
7769 : {
7770 : bool is_sorted;
7771 : int presorted_keys;
7772 :
7773 33190 : is_sorted = pathkeys_count_contained_in(pathkeys,
7774 : path->pathkeys,
7775 : &presorted_keys);
7776 :
7777 33190 : if (!is_sorted)
7778 : {
7779 : /*
7780 : * Try at least sorting the cheapest path and also try incrementally
7781 : * sorting any path which is partially sorted already (no need to deal
7782 : * with paths which have presorted keys when incremental sort is
7783 : * disabled unless it's the cheapest input path).
7784 : */
7785 8568 : if (path != cheapest_path &&
7786 1656 : (presorted_keys == 0 || !enable_incremental_sort))
7787 743 : return NULL;
7788 :
7789 : /*
7790 : * We've no need to consider both a sort and incremental sort. We'll
7791 : * just do a sort if there are no presorted keys and an incremental
7792 : * sort when there are presorted keys.
7793 : */
7794 7825 : if (presorted_keys == 0 || !enable_incremental_sort)
7795 6825 : path = (Path *) create_sort_path(root,
7796 : rel,
7797 : path,
7798 : pathkeys,
7799 : limit_tuples);
7800 : else
7801 1000 : path = (Path *) create_incremental_sort_path(root,
7802 : rel,
7803 : path,
7804 : pathkeys,
7805 : presorted_keys,
7806 : limit_tuples);
7807 : }
7808 :
7809 32447 : return path;
7810 : }
7811 :
7812 : /*
7813 : * Generate Gather and Gather Merge paths for a grouping relation or partial
7814 : * grouping relation.
7815 : *
7816 : * generate_useful_gather_paths does most of the work, but we also consider a
7817 : * special case: we could try sorting the data by the group_pathkeys and then
7818 : * applying Gather Merge.
7819 : *
7820 : * NB: This function shouldn't be used for anything other than a grouped or
7821 : * partially grouped relation not only because of the fact that it explicitly
7822 : * references group_pathkeys but we pass "true" as the third argument to
7823 : * generate_useful_gather_paths().
7824 : */
7825 : static void
7826 1544 : gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
7827 : {
7828 : ListCell *lc;
7829 : Path *cheapest_partial_path;
7830 : List *groupby_pathkeys;
7831 :
7832 : /*
7833 : * This occurs after any partial aggregation has taken place, so trim off
7834 : * any pathkeys added for ORDER BY / DISTINCT aggregates.
7835 : */
7836 1544 : if (list_length(root->group_pathkeys) > root->num_groupby_pathkeys)
7837 9 : groupby_pathkeys = list_copy_head(root->group_pathkeys,
7838 : root->num_groupby_pathkeys);
7839 : else
7840 1535 : groupby_pathkeys = root->group_pathkeys;
7841 :
7842 : /* Try Gather for unordered paths and Gather Merge for ordered ones. */
7843 1544 : generate_useful_gather_paths(root, rel, true);
7844 :
7845 1544 : cheapest_partial_path = linitial(rel->partial_pathlist);
7846 :
7847 : /* XXX Shouldn't this also consider the group-key-reordering? */
7848 3652 : foreach(lc, rel->partial_pathlist)
7849 : {
7850 2108 : Path *path = (Path *) lfirst(lc);
7851 : bool is_sorted;
7852 : int presorted_keys;
7853 : double total_groups;
7854 :
7855 2108 : is_sorted = pathkeys_count_contained_in(groupby_pathkeys,
7856 : path->pathkeys,
7857 : &presorted_keys);
7858 :
7859 2108 : if (is_sorted)
7860 1379 : continue;
7861 :
7862 : /*
7863 : * Try at least sorting the cheapest path and also try incrementally
7864 : * sorting any path which is partially sorted already (no need to deal
7865 : * with paths which have presorted keys when incremental sort is
7866 : * disabled unless it's the cheapest input path).
7867 : */
7868 729 : if (path != cheapest_partial_path &&
7869 0 : (presorted_keys == 0 || !enable_incremental_sort))
7870 0 : continue;
7871 :
7872 : /*
7873 : * We've no need to consider both a sort and incremental sort. We'll
7874 : * just do a sort if there are no presorted keys and an incremental
7875 : * sort when there are presorted keys.
7876 : */
7877 729 : if (presorted_keys == 0 || !enable_incremental_sort)
7878 729 : path = (Path *) create_sort_path(root, rel, path,
7879 : groupby_pathkeys,
7880 : -1.0);
7881 : else
7882 0 : path = (Path *) create_incremental_sort_path(root,
7883 : rel,
7884 : path,
7885 : groupby_pathkeys,
7886 : presorted_keys,
7887 : -1.0);
7888 729 : total_groups = compute_gather_rows(path);
7889 : path = (Path *)
7890 729 : create_gather_merge_path(root,
7891 : rel,
7892 : path,
7893 729 : rel->reltarget,
7894 : groupby_pathkeys,
7895 : NULL,
7896 : &total_groups);
7897 :
7898 729 : add_path(rel, path);
7899 : }
7900 1544 : }
7901 :
7902 : /*
7903 : * can_partial_agg
7904 : *
7905 : * Determines whether or not partial grouping and/or aggregation is possible.
7906 : * Returns true when possible, false otherwise.
7907 : */
7908 : static bool
7909 22960 : can_partial_agg(PlannerInfo *root)
7910 : {
7911 22960 : Query *parse = root->parse;
7912 :
7913 22960 : if (!parse->hasAggs && parse->groupClause == NIL)
7914 : {
7915 : /*
7916 : * We don't know how to do parallel aggregation unless we have either
7917 : * some aggregates or a grouping clause.
7918 : */
7919 0 : return false;
7920 : }
7921 22960 : else if (parse->groupingSets)
7922 : {
7923 : /* We don't know how to do grouping sets in parallel. */
7924 498 : return false;
7925 : }
7926 22462 : else if (root->hasNonPartialAggs || root->hasNonSerialAggs)
7927 : {
7928 : /* Insufficient support for partial mode. */
7929 1951 : return false;
7930 : }
7931 :
7932 : /* Everything looks good. */
7933 20511 : return true;
7934 : }
7935 :
7936 : /*
7937 : * apply_scanjoin_target_to_paths
7938 : *
7939 : * Adjust the final scan/join relation, and recursively all of its children,
7940 : * to generate the final scan/join target. It would be more correct to model
7941 : * this as a separate planning step with a new RelOptInfo at the toplevel and
7942 : * for each child relation, but doing it this way is noticeably cheaper.
7943 : * Maybe that problem can be solved at some point, but for now we do this.
7944 : *
7945 : * If tlist_same_exprs is true, then the scan/join target to be applied has
7946 : * the same expressions as the existing reltarget, so we need only insert the
7947 : * appropriate sortgroupref information. By avoiding the creation of
7948 : * projection paths we save effort both immediately and at plan creation time.
7949 : */
7950 : static void
7951 283982 : apply_scanjoin_target_to_paths(PlannerInfo *root,
7952 : RelOptInfo *rel,
7953 : List *scanjoin_targets,
7954 : List *scanjoin_targets_contain_srfs,
7955 : bool scanjoin_target_parallel_safe,
7956 : bool tlist_same_exprs)
7957 : {
7958 283982 : bool rel_is_partitioned = IS_PARTITIONED_REL(rel);
7959 : PathTarget *scanjoin_target;
7960 : ListCell *lc;
7961 :
7962 : /* This recurses, so be paranoid. */
7963 283982 : check_stack_depth();
7964 :
7965 : /*
7966 : * If the rel only has Append and MergeAppend paths, we want to drop its
7967 : * existing paths and generate new ones. This function would still be
7968 : * correct if we kept the existing paths: we'd modify them to generate the
7969 : * correct target above the partitioning Append, and then they'd compete
7970 : * on cost with paths generating the target below the Append. However, in
7971 : * our current cost model the latter way is always the same or cheaper
7972 : * cost, so modifying the existing paths would just be useless work.
7973 : * Moreover, when the cost is the same, varying roundoff errors might
7974 : * sometimes allow an existing path to be picked, resulting in undesirable
7975 : * cross-platform plan variations. So we drop old paths and thereby force
7976 : * the work to be done below the Append.
7977 : *
7978 : * However, there are several cases when this optimization is not safe. If
7979 : * the rel isn't partitioned, then none of the paths will be Append or
7980 : * MergeAppend paths, so we should definitely not do this. If it is
7981 : * partitioned but is a joinrel, it may have Append and MergeAppend paths,
7982 : * but it can also have join paths that we can't afford to discard.
7983 : *
7984 : * Some care is needed, because we have to allow
7985 : * generate_useful_gather_paths to see the old partial paths in the next
7986 : * stanza. Hence, zap the main pathlist here, then allow
7987 : * generate_useful_gather_paths to add path(s) to the main list, and
7988 : * finally zap the partial pathlist.
7989 : */
7990 283982 : if (rel_is_partitioned && IS_SIMPLE_REL(rel))
7991 5856 : rel->pathlist = NIL;
7992 :
7993 : /*
7994 : * If the scan/join target is not parallel-safe, partial paths cannot
7995 : * generate it.
7996 : */
7997 283982 : if (!scanjoin_target_parallel_safe)
7998 : {
7999 : /*
8000 : * Since we can't generate the final scan/join target in parallel
8001 : * workers, this is our last opportunity to use any partial paths that
8002 : * exist; so build Gather path(s) that use them and emit whatever the
8003 : * current reltarget is. We don't do this in the case where the
8004 : * target is parallel-safe, since we will be able to generate superior
8005 : * paths by doing it after the final scan/join target has been
8006 : * applied.
8007 : */
8008 40621 : generate_useful_gather_paths(root, rel, false);
8009 :
8010 : /* Can't use parallel query above this level. */
8011 40621 : rel->partial_pathlist = NIL;
8012 40621 : rel->consider_parallel = false;
8013 : }
8014 :
8015 : /* Finish dropping old paths for a partitioned rel, per comment above */
8016 283982 : if (rel_is_partitioned && IS_SIMPLE_REL(rel))
8017 5856 : rel->partial_pathlist = NIL;
8018 :
8019 : /* Extract SRF-free scan/join target. */
8020 283982 : scanjoin_target = linitial_node(PathTarget, scanjoin_targets);
8021 :
8022 : /*
8023 : * Apply the SRF-free scan/join target to each existing path.
8024 : *
8025 : * If the tlist exprs are the same, we can just inject the sortgroupref
8026 : * information into the existing pathtargets. Otherwise, replace each
8027 : * path with a projection path that generates the SRF-free scan/join
8028 : * target. This can't change the ordering of paths within rel->pathlist,
8029 : * so we just modify the list in place.
8030 : */
8031 590035 : foreach(lc, rel->pathlist)
8032 : {
8033 306053 : Path *subpath = (Path *) lfirst(lc);
8034 :
8035 : /* Shouldn't have any parameterized paths anymore */
8036 : Assert(subpath->param_info == NULL);
8037 :
8038 306053 : if (tlist_same_exprs)
8039 108610 : subpath->pathtarget->sortgrouprefs =
8040 108610 : scanjoin_target->sortgrouprefs;
8041 : else
8042 : {
8043 : Path *newpath;
8044 :
8045 197443 : newpath = (Path *) create_projection_path(root, rel, subpath,
8046 : scanjoin_target);
8047 197443 : lfirst(lc) = newpath;
8048 : }
8049 : }
8050 :
8051 : /* Likewise adjust the targets for any partial paths. */
8052 296552 : foreach(lc, rel->partial_pathlist)
8053 : {
8054 12570 : Path *subpath = (Path *) lfirst(lc);
8055 :
8056 : /* Shouldn't have any parameterized paths anymore */
8057 : Assert(subpath->param_info == NULL);
8058 :
8059 12570 : if (tlist_same_exprs)
8060 10068 : subpath->pathtarget->sortgrouprefs =
8061 10068 : scanjoin_target->sortgrouprefs;
8062 : else
8063 : {
8064 : Path *newpath;
8065 :
8066 2502 : newpath = (Path *) create_projection_path(root, rel, subpath,
8067 : scanjoin_target);
8068 2502 : lfirst(lc) = newpath;
8069 : }
8070 : }
8071 :
8072 : /*
8073 : * Now, if final scan/join target contains SRFs, insert ProjectSetPath(s)
8074 : * atop each existing path. (Note that this function doesn't look at the
8075 : * cheapest-path fields, which is a good thing because they're bogus right
8076 : * now.)
8077 : */
8078 283982 : if (root->parse->hasTargetSRFs)
8079 6041 : adjust_paths_for_srfs(root, rel,
8080 : scanjoin_targets,
8081 : scanjoin_targets_contain_srfs);
8082 :
8083 : /*
8084 : * Update the rel's target to be the final (with SRFs) scan/join target.
8085 : * This now matches the actual output of all the paths, and we might get
8086 : * confused in createplan.c if they don't agree. We must do this now so
8087 : * that any append paths made in the next part will use the correct
8088 : * pathtarget (cf. create_append_path).
8089 : *
8090 : * Note that this is also necessary if GetForeignUpperPaths() gets called
8091 : * on the final scan/join relation or on any of its children, since the
8092 : * FDW might look at the rel's target to create ForeignPaths.
8093 : */
8094 283982 : rel->reltarget = llast_node(PathTarget, scanjoin_targets);
8095 :
8096 : /*
8097 : * If the relation is partitioned, recursively apply the scan/join target
8098 : * to all partitions, and generate brand-new Append paths in which the
8099 : * scan/join target is computed below the Append rather than above it.
8100 : * Since Append is not projection-capable, that might save a separate
8101 : * Result node, and it also is important for partitionwise aggregate.
8102 : */
8103 283982 : if (rel_is_partitioned)
8104 : {
8105 6639 : List *live_children = NIL;
8106 : int i;
8107 :
8108 : /* Adjust each partition. */
8109 6639 : i = -1;
8110 19006 : while ((i = bms_next_member(rel->live_parts, i)) >= 0)
8111 : {
8112 12367 : RelOptInfo *child_rel = rel->part_rels[i];
8113 : AppendRelInfo **appinfos;
8114 : int nappinfos;
8115 12367 : List *child_scanjoin_targets = NIL;
8116 :
8117 : Assert(child_rel != NULL);
8118 :
8119 : /* Dummy children can be ignored. */
8120 12367 : if (IS_DUMMY_REL(child_rel))
8121 21 : continue;
8122 :
8123 : /* Translate scan/join targets for this child. */
8124 12346 : appinfos = find_appinfos_by_relids(root, child_rel->relids,
8125 : &nappinfos);
8126 24692 : foreach(lc, scanjoin_targets)
8127 : {
8128 12346 : PathTarget *target = lfirst_node(PathTarget, lc);
8129 :
8130 12346 : target = copy_pathtarget(target);
8131 12346 : target->exprs = (List *)
8132 12346 : adjust_appendrel_attrs(root,
8133 12346 : (Node *) target->exprs,
8134 : nappinfos, appinfos);
8135 12346 : child_scanjoin_targets = lappend(child_scanjoin_targets,
8136 : target);
8137 : }
8138 12346 : pfree(appinfos);
8139 :
8140 : /* Recursion does the real work. */
8141 12346 : apply_scanjoin_target_to_paths(root, child_rel,
8142 : child_scanjoin_targets,
8143 : scanjoin_targets_contain_srfs,
8144 : scanjoin_target_parallel_safe,
8145 : tlist_same_exprs);
8146 :
8147 : /* Save non-dummy children for Append paths. */
8148 12346 : if (!IS_DUMMY_REL(child_rel))
8149 12346 : live_children = lappend(live_children, child_rel);
8150 : }
8151 :
8152 : /* Build new paths for this relation by appending child paths. */
8153 6639 : add_paths_to_append_rel(root, rel, live_children);
8154 : }
8155 :
8156 : /*
8157 : * Consider generating Gather or Gather Merge paths. We must only do this
8158 : * if the relation is parallel safe, and we don't do it for child rels to
8159 : * avoid creating multiple Gather nodes within the same plan. We must do
8160 : * this after all paths have been generated and before set_cheapest, since
8161 : * one of the generated paths may turn out to be the cheapest one.
8162 : */
8163 283982 : if (rel->consider_parallel && !IS_OTHER_REL(rel))
8164 90840 : generate_useful_gather_paths(root, rel, false);
8165 :
8166 : /*
8167 : * Reassess which paths are the cheapest, now that we've potentially added
8168 : * new Gather (or Gather Merge) and/or Append (or MergeAppend) paths to
8169 : * this relation.
8170 : */
8171 283982 : set_cheapest(rel);
8172 283982 : }
8173 :
8174 : /*
8175 : * create_partitionwise_grouping_paths
8176 : *
8177 : * If the partition keys of input relation are part of the GROUP BY clause, all
8178 : * the rows belonging to a given group come from a single partition. This
8179 : * allows aggregation/grouping over a partitioned relation to be broken down
8180 : * into aggregation/grouping on each partition. This should be no worse, and
8181 : * often better, than the normal approach.
8182 : *
8183 : * However, if the GROUP BY clause does not contain all the partition keys,
8184 : * rows from a given group may be spread across multiple partitions. In that
8185 : * case, we perform partial aggregation for each group, append the results,
8186 : * and then finalize aggregation. This is less certain to win than the
8187 : * previous case. It may win if the PartialAggregate stage greatly reduces
8188 : * the number of groups, because fewer rows will pass through the Append node.
8189 : * It may lose if we have lots of small groups.
8190 : */
8191 : static void
8192 413 : create_partitionwise_grouping_paths(PlannerInfo *root,
8193 : RelOptInfo *input_rel,
8194 : RelOptInfo *grouped_rel,
8195 : RelOptInfo *partially_grouped_rel,
8196 : const AggClauseCosts *agg_costs,
8197 : grouping_sets_data *gd,
8198 : PartitionwiseAggregateType patype,
8199 : GroupPathExtraData *extra)
8200 : {
8201 413 : List *grouped_live_children = NIL;
8202 413 : List *partially_grouped_live_children = NIL;
8203 413 : PathTarget *target = grouped_rel->reltarget;
8204 413 : bool partial_grouping_valid = true;
8205 : int i;
8206 :
8207 : Assert(patype != PARTITIONWISE_AGGREGATE_NONE);
8208 : Assert(patype != PARTITIONWISE_AGGREGATE_PARTIAL ||
8209 : partially_grouped_rel != NULL);
8210 :
8211 : /* Add paths for partitionwise aggregation/grouping. */
8212 413 : i = -1;
8213 1496 : while ((i = bms_next_member(input_rel->live_parts, i)) >= 0)
8214 : {
8215 1083 : RelOptInfo *child_input_rel = input_rel->part_rels[i];
8216 : PathTarget *child_target;
8217 : AppendRelInfo **appinfos;
8218 : int nappinfos;
8219 : GroupPathExtraData child_extra;
8220 : RelOptInfo *child_grouped_rel;
8221 : RelOptInfo *child_partially_grouped_rel;
8222 :
8223 : Assert(child_input_rel != NULL);
8224 :
8225 : /* Dummy children can be ignored. */
8226 1083 : if (IS_DUMMY_REL(child_input_rel))
8227 0 : continue;
8228 :
8229 1083 : child_target = copy_pathtarget(target);
8230 :
8231 : /*
8232 : * Copy the given "extra" structure as is and then override the
8233 : * members specific to this child.
8234 : */
8235 1083 : memcpy(&child_extra, extra, sizeof(child_extra));
8236 :
8237 1083 : appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
8238 : &nappinfos);
8239 :
8240 1083 : child_target->exprs = (List *)
8241 1083 : adjust_appendrel_attrs(root,
8242 1083 : (Node *) target->exprs,
8243 : nappinfos, appinfos);
8244 :
8245 : /* Translate havingQual and targetList. */
8246 1083 : child_extra.havingQual = (Node *)
8247 : adjust_appendrel_attrs(root,
8248 : extra->havingQual,
8249 : nappinfos, appinfos);
8250 1083 : child_extra.targetList = (List *)
8251 1083 : adjust_appendrel_attrs(root,
8252 1083 : (Node *) extra->targetList,
8253 : nappinfos, appinfos);
8254 :
8255 : /*
8256 : * extra->patype was the value computed for our parent rel; patype is
8257 : * the value for this relation. For the child, our value is its
8258 : * parent rel's value.
8259 : */
8260 1083 : child_extra.patype = patype;
8261 :
8262 : /*
8263 : * Create grouping relation to hold fully aggregated grouping and/or
8264 : * aggregation paths for the child.
8265 : */
8266 1083 : child_grouped_rel = make_grouping_rel(root, child_input_rel,
8267 : child_target,
8268 1083 : extra->target_parallel_safe,
8269 : child_extra.havingQual);
8270 :
8271 : /* Create grouping paths for this child relation. */
8272 1083 : create_ordinary_grouping_paths(root, child_input_rel,
8273 : child_grouped_rel,
8274 : agg_costs, gd, &child_extra,
8275 : &child_partially_grouped_rel);
8276 :
8277 1083 : if (child_partially_grouped_rel)
8278 : {
8279 : partially_grouped_live_children =
8280 771 : lappend(partially_grouped_live_children,
8281 : child_partially_grouped_rel);
8282 : }
8283 : else
8284 312 : partial_grouping_valid = false;
8285 :
8286 1083 : if (patype == PARTITIONWISE_AGGREGATE_FULL)
8287 : {
8288 654 : set_cheapest(child_grouped_rel);
8289 654 : grouped_live_children = lappend(grouped_live_children,
8290 : child_grouped_rel);
8291 : }
8292 :
8293 1083 : pfree(appinfos);
8294 : }
8295 :
8296 : /*
8297 : * Try to create append paths for partially grouped children. For full
8298 : * partitionwise aggregation, we might have paths in the partial_pathlist
8299 : * if parallel aggregation is possible. For partial partitionwise
8300 : * aggregation, we may have paths in both pathlist and partial_pathlist.
8301 : *
8302 : * NB: We must have a partially grouped path for every child in order to
8303 : * generate a partially grouped path for this relation.
8304 : */
8305 413 : if (partially_grouped_rel && partial_grouping_valid)
8306 : {
8307 : Assert(partially_grouped_live_children != NIL);
8308 :
8309 301 : add_paths_to_append_rel(root, partially_grouped_rel,
8310 : partially_grouped_live_children);
8311 : }
8312 :
8313 : /* If possible, create append paths for fully grouped children. */
8314 413 : if (patype == PARTITIONWISE_AGGREGATE_FULL)
8315 : {
8316 : Assert(grouped_live_children != NIL);
8317 :
8318 244 : add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
8319 : }
8320 413 : }
8321 :
8322 : /*
8323 : * group_by_has_partkey
8324 : *
8325 : * Returns true if all the partition keys of the given relation are part of
8326 : * the GROUP BY clauses, including having matching collation, false otherwise.
8327 : */
8328 : static bool
8329 386 : group_by_has_partkey(RelOptInfo *input_rel,
8330 : List *targetList,
8331 : List *groupClause)
8332 : {
8333 386 : List *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
8334 386 : int cnt = 0;
8335 : int partnatts;
8336 :
8337 : /* Input relation should be partitioned. */
8338 : Assert(input_rel->part_scheme);
8339 :
8340 : /* Rule out early, if there are no partition keys present. */
8341 386 : if (!input_rel->partexprs)
8342 0 : return false;
8343 :
8344 386 : partnatts = input_rel->part_scheme->partnatts;
8345 :
8346 648 : for (cnt = 0; cnt < partnatts; cnt++)
8347 : {
8348 404 : List *partexprs = input_rel->partexprs[cnt];
8349 : ListCell *lc;
8350 404 : bool found = false;
8351 :
8352 603 : foreach(lc, partexprs)
8353 : {
8354 : ListCell *lg;
8355 467 : Expr *partexpr = lfirst(lc);
8356 467 : Oid partcoll = input_rel->part_scheme->partcollation[cnt];
8357 :
8358 726 : foreach(lg, groupexprs)
8359 : {
8360 527 : Expr *groupexpr = lfirst(lg);
8361 527 : Oid groupcoll = exprCollation((Node *) groupexpr);
8362 :
8363 : /*
8364 : * Note: we can assume there is at most one RelabelType node;
8365 : * eval_const_expressions() will have simplified if more than
8366 : * one.
8367 : */
8368 527 : if (IsA(groupexpr, RelabelType))
8369 12 : groupexpr = ((RelabelType *) groupexpr)->arg;
8370 :
8371 527 : if (equal(groupexpr, partexpr))
8372 : {
8373 : /*
8374 : * Reject a match if the grouping collation does not match
8375 : * the partitioning collation.
8376 : */
8377 268 : if (OidIsValid(partcoll) && OidIsValid(groupcoll) &&
8378 : partcoll != groupcoll)
8379 6 : return false;
8380 :
8381 262 : found = true;
8382 262 : break;
8383 : }
8384 : }
8385 :
8386 461 : if (found)
8387 262 : break;
8388 : }
8389 :
8390 : /*
8391 : * If none of the partition key expressions match with any of the
8392 : * GROUP BY expression, return false.
8393 : */
8394 398 : if (!found)
8395 136 : return false;
8396 : }
8397 :
8398 244 : return true;
8399 : }
8400 :
8401 : /*
8402 : * generate_setop_child_grouplist
8403 : * Build a SortGroupClause list defining the sort/grouping properties
8404 : * of the child of a set operation.
8405 : *
8406 : * This is similar to generate_setop_grouplist() but differs as the setop
8407 : * child query's targetlist entries may already have a tleSortGroupRef
8408 : * assigned for other purposes, such as GROUP BYs. Here we keep the
8409 : * SortGroupClause list in the same order as 'op' groupClauses and just adjust
8410 : * the tleSortGroupRef to reference the TargetEntry's 'ressortgroupref'. If
8411 : * any of the columns in the targetlist don't match to the setop's colTypes
8412 : * then we return an empty list. This may leave some TLEs with unreferenced
8413 : * ressortgroupref markings, but that's harmless.
8414 : */
8415 : static List *
8416 6388 : generate_setop_child_grouplist(SetOperationStmt *op, List *targetlist)
8417 : {
8418 6388 : List *grouplist = copyObject(op->groupClauses);
8419 : ListCell *lg;
8420 : ListCell *lt;
8421 : ListCell *ct;
8422 :
8423 6388 : lg = list_head(grouplist);
8424 6388 : ct = list_head(op->colTypes);
8425 24627 : foreach(lt, targetlist)
8426 : {
8427 18446 : TargetEntry *tle = (TargetEntry *) lfirst(lt);
8428 : SortGroupClause *sgc;
8429 : Oid coltype;
8430 :
8431 : /* resjunk columns could have sortgrouprefs. Leave these alone */
8432 18446 : if (tle->resjunk)
8433 0 : continue;
8434 :
8435 : /*
8436 : * We expect every non-resjunk target to have a SortGroupClause and
8437 : * colTypes.
8438 : */
8439 : Assert(lg != NULL);
8440 : Assert(ct != NULL);
8441 18446 : sgc = (SortGroupClause *) lfirst(lg);
8442 18446 : coltype = lfirst_oid(ct);
8443 :
8444 : /* reject if target type isn't the same as the setop target type */
8445 18446 : if (coltype != exprType((Node *) tle->expr))
8446 207 : return NIL;
8447 :
8448 18239 : lg = lnext(grouplist, lg);
8449 18239 : ct = lnext(op->colTypes, ct);
8450 :
8451 : /* assign a tleSortGroupRef, or reuse the existing one */
8452 18239 : sgc->tleSortGroupRef = assignSortGroupRef(tle, targetlist);
8453 : }
8454 :
8455 : Assert(lg == NULL);
8456 : Assert(ct == NULL);
8457 :
8458 6181 : return grouplist;
8459 : }
8460 :
8461 : /*
8462 : * create_unique_paths
8463 : * Build a new RelOptInfo containing Paths that represent elimination of
8464 : * distinct rows from the input data. Distinct-ness is defined according to
8465 : * the needs of the semijoin represented by sjinfo. If it is not possible
8466 : * to identify how to make the data unique, NULL is returned.
8467 : *
8468 : * If used at all, this is likely to be called repeatedly on the same rel,
8469 : * so we cache the result.
8470 : */
8471 : RelOptInfo *
8472 4463 : create_unique_paths(PlannerInfo *root, RelOptInfo *rel, SpecialJoinInfo *sjinfo)
8473 : {
8474 : RelOptInfo *unique_rel;
8475 4463 : List *sortPathkeys = NIL;
8476 4463 : List *groupClause = NIL;
8477 : MemoryContext oldcontext;
8478 :
8479 : /* Caller made a mistake if SpecialJoinInfo is the wrong one */
8480 : Assert(sjinfo->jointype == JOIN_SEMI);
8481 : Assert(bms_equal(rel->relids, sjinfo->syn_righthand));
8482 :
8483 : /* If result already cached, return it */
8484 4463 : if (rel->unique_rel)
8485 918 : return rel->unique_rel;
8486 :
8487 : /* If it's not possible to unique-ify, return NULL */
8488 3545 : if (!(sjinfo->semi_can_btree || sjinfo->semi_can_hash))
8489 66 : return NULL;
8490 :
8491 : /*
8492 : * Punt if this is a child relation and we failed to build a unique-ified
8493 : * relation for its parent. This can happen if all the RHS columns were
8494 : * found to be equated to constants when unique-ifying the parent table,
8495 : * leaving no columns to unique-ify.
8496 : */
8497 3479 : if (IS_OTHER_REL(rel) && rel->top_parent->unique_rel == NULL)
8498 6 : return NULL;
8499 :
8500 : /*
8501 : * When called during GEQO join planning, we are in a short-lived memory
8502 : * context. We must make sure that the unique rel and any subsidiary data
8503 : * structures created for a baserel survive the GEQO cycle, else the
8504 : * baserel is trashed for future GEQO cycles. On the other hand, when we
8505 : * are creating those for a joinrel during GEQO, we don't want them to
8506 : * clutter the main planning context. Upshot is that the best solution is
8507 : * to explicitly allocate memory in the same context the given RelOptInfo
8508 : * is in.
8509 : */
8510 3473 : oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
8511 :
8512 3473 : unique_rel = makeNode(RelOptInfo);
8513 3473 : memcpy(unique_rel, rel, sizeof(RelOptInfo));
8514 :
8515 : /*
8516 : * clear path info
8517 : */
8518 3473 : unique_rel->pathlist = NIL;
8519 3473 : unique_rel->ppilist = NIL;
8520 3473 : unique_rel->partial_pathlist = NIL;
8521 3473 : unique_rel->cheapest_startup_path = NULL;
8522 3473 : unique_rel->cheapest_total_path = NULL;
8523 3473 : unique_rel->cheapest_parameterized_paths = NIL;
8524 :
8525 : /*
8526 : * Build the target list for the unique rel. We also build the pathkeys
8527 : * that represent the ordering requirements for the sort-based
8528 : * implementation, and the list of SortGroupClause nodes that represent
8529 : * the columns to be grouped on for the hash-based implementation.
8530 : *
8531 : * For a child rel, we can construct these fields from those of its
8532 : * parent.
8533 : */
8534 3473 : if (IS_OTHER_REL(rel))
8535 216 : {
8536 : PathTarget *child_unique_target;
8537 : PathTarget *parent_unique_target;
8538 :
8539 216 : parent_unique_target = rel->top_parent->unique_rel->reltarget;
8540 :
8541 216 : child_unique_target = copy_pathtarget(parent_unique_target);
8542 :
8543 : /* Translate the target expressions */
8544 216 : child_unique_target->exprs = (List *)
8545 216 : adjust_appendrel_attrs_multilevel(root,
8546 216 : (Node *) parent_unique_target->exprs,
8547 : rel,
8548 216 : rel->top_parent);
8549 :
8550 216 : unique_rel->reltarget = child_unique_target;
8551 :
8552 216 : sortPathkeys = rel->top_parent->unique_pathkeys;
8553 216 : groupClause = rel->top_parent->unique_groupclause;
8554 : }
8555 : else
8556 : {
8557 : List *newtlist;
8558 : int nextresno;
8559 3257 : List *sortList = NIL;
8560 : ListCell *lc1;
8561 : ListCell *lc2;
8562 :
8563 : /*
8564 : * The values we are supposed to unique-ify may be expressions in the
8565 : * variables of the input rel's targetlist. We have to add any such
8566 : * expressions to the unique rel's targetlist.
8567 : *
8568 : * To complicate matters, some of the values to be unique-ified may be
8569 : * known redundant by the EquivalenceClass machinery (e.g., because
8570 : * they have been equated to constants). There is no need to compare
8571 : * such values during unique-ification, and indeed we had better not
8572 : * try because the Vars involved may not have propagated as high as
8573 : * the semijoin's level. We use make_pathkeys_for_sortclauses to
8574 : * detect such cases, which is a tad inefficient but it doesn't seem
8575 : * worth building specialized infrastructure for this.
8576 : */
8577 3257 : newtlist = make_tlist_from_pathtarget(rel->reltarget);
8578 3257 : nextresno = list_length(newtlist) + 1;
8579 :
8580 6631 : forboth(lc1, sjinfo->semi_rhs_exprs, lc2, sjinfo->semi_operators)
8581 : {
8582 3374 : Expr *uniqexpr = lfirst(lc1);
8583 3374 : Oid in_oper = lfirst_oid(lc2);
8584 : Oid sortop;
8585 : TargetEntry *tle;
8586 3374 : bool made_tle = false;
8587 :
8588 3374 : tle = tlist_member(uniqexpr, newtlist);
8589 3374 : if (!tle)
8590 : {
8591 1634 : tle = makeTargetEntry(uniqexpr,
8592 : nextresno,
8593 : NULL,
8594 : false);
8595 1634 : newtlist = lappend(newtlist, tle);
8596 1634 : nextresno++;
8597 1634 : made_tle = true;
8598 : }
8599 :
8600 : /*
8601 : * Try to build an ORDER BY list to sort the input compatibly. We
8602 : * do this for each sortable clause even when the clauses are not
8603 : * all sortable, so that we can detect clauses that are redundant
8604 : * according to the pathkey machinery.
8605 : */
8606 3374 : sortop = get_ordering_op_for_equality_op(in_oper, false);
8607 3374 : if (OidIsValid(sortop))
8608 : {
8609 : Oid eqop;
8610 : SortGroupClause *sortcl;
8611 :
8612 : /*
8613 : * The Unique node will need equality operators. Normally
8614 : * these are the same as the IN clause operators, but if those
8615 : * are cross-type operators then the equality operators are
8616 : * the ones for the IN clause operators' RHS datatype.
8617 : */
8618 3374 : eqop = get_equality_op_for_ordering_op(sortop, NULL);
8619 3374 : if (!OidIsValid(eqop)) /* shouldn't happen */
8620 0 : elog(ERROR, "could not find equality operator for ordering operator %u",
8621 : sortop);
8622 :
8623 3374 : sortcl = makeNode(SortGroupClause);
8624 3374 : sortcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8625 3374 : sortcl->eqop = eqop;
8626 3374 : sortcl->sortop = sortop;
8627 3374 : sortcl->reverse_sort = false;
8628 3374 : sortcl->nulls_first = false;
8629 3374 : sortcl->hashable = false; /* no need to make this accurate */
8630 3374 : sortList = lappend(sortList, sortcl);
8631 :
8632 : /*
8633 : * At each step, convert the SortGroupClause list to pathkey
8634 : * form. If the just-added SortGroupClause is redundant, the
8635 : * result will be shorter than the SortGroupClause list.
8636 : */
8637 3374 : sortPathkeys = make_pathkeys_for_sortclauses(root, sortList,
8638 : newtlist);
8639 3374 : if (list_length(sortPathkeys) != list_length(sortList))
8640 : {
8641 : /* Drop the redundant SortGroupClause */
8642 1026 : sortList = list_delete_last(sortList);
8643 : Assert(list_length(sortPathkeys) == list_length(sortList));
8644 : /* Undo tlist addition, if we made one */
8645 1026 : if (made_tle)
8646 : {
8647 6 : newtlist = list_delete_last(newtlist);
8648 6 : nextresno--;
8649 : }
8650 : /* We need not consider this clause for hashing, either */
8651 1026 : continue;
8652 : }
8653 : }
8654 0 : else if (sjinfo->semi_can_btree) /* shouldn't happen */
8655 0 : elog(ERROR, "could not find ordering operator for equality operator %u",
8656 : in_oper);
8657 :
8658 2348 : if (sjinfo->semi_can_hash)
8659 : {
8660 : /* Create a GROUP BY list for the Agg node to use */
8661 : Oid eq_oper;
8662 : SortGroupClause *groupcl;
8663 :
8664 : /*
8665 : * Get the hashable equality operators for the Agg node to
8666 : * use. Normally these are the same as the IN clause
8667 : * operators, but if those are cross-type operators then the
8668 : * equality operators are the ones for the IN clause
8669 : * operators' RHS datatype.
8670 : */
8671 2348 : if (!get_compatible_hash_operators(in_oper, NULL, &eq_oper))
8672 0 : elog(ERROR, "could not find compatible hash operator for operator %u",
8673 : in_oper);
8674 :
8675 2348 : groupcl = makeNode(SortGroupClause);
8676 2348 : groupcl->tleSortGroupRef = assignSortGroupRef(tle, newtlist);
8677 2348 : groupcl->eqop = eq_oper;
8678 2348 : groupcl->sortop = sortop;
8679 2348 : groupcl->reverse_sort = false;
8680 2348 : groupcl->nulls_first = false;
8681 2348 : groupcl->hashable = true;
8682 2348 : groupClause = lappend(groupClause, groupcl);
8683 : }
8684 : }
8685 :
8686 : /*
8687 : * Done building the sortPathkeys and groupClause. But the
8688 : * sortPathkeys are bogus if not all the clauses were sortable.
8689 : */
8690 3257 : if (!sjinfo->semi_can_btree)
8691 0 : sortPathkeys = NIL;
8692 :
8693 : /*
8694 : * It can happen that all the RHS columns are equated to constants.
8695 : * We'd have to do something special to unique-ify in that case, and
8696 : * it's such an unlikely-in-the-real-world case that it's not worth
8697 : * the effort. So just punt if we found no columns to unique-ify.
8698 : */
8699 3257 : if (sortPathkeys == NIL && groupClause == NIL)
8700 : {
8701 975 : MemoryContextSwitchTo(oldcontext);
8702 975 : return NULL;
8703 : }
8704 :
8705 : /* Convert the required targetlist back to PathTarget form */
8706 2282 : unique_rel->reltarget = create_pathtarget(root, newtlist);
8707 : }
8708 :
8709 : /* build unique paths based on input rel's pathlist */
8710 2498 : create_final_unique_paths(root, rel, sortPathkeys, groupClause,
8711 : sjinfo, unique_rel);
8712 :
8713 : /* build unique paths based on input rel's partial_pathlist */
8714 2498 : create_partial_unique_paths(root, rel, sortPathkeys, groupClause,
8715 : sjinfo, unique_rel);
8716 :
8717 : /* Now choose the best path(s) */
8718 2498 : set_cheapest(unique_rel);
8719 :
8720 : /*
8721 : * There shouldn't be any partial paths for the unique relation;
8722 : * otherwise, we won't be able to properly guarantee uniqueness.
8723 : */
8724 : Assert(unique_rel->partial_pathlist == NIL);
8725 :
8726 : /* Cache the result */
8727 2498 : rel->unique_rel = unique_rel;
8728 2498 : rel->unique_pathkeys = sortPathkeys;
8729 2498 : rel->unique_groupclause = groupClause;
8730 :
8731 2498 : MemoryContextSwitchTo(oldcontext);
8732 :
8733 2498 : return unique_rel;
8734 : }
8735 :
8736 : /*
8737 : * create_final_unique_paths
8738 : * Create unique paths in 'unique_rel' based on 'input_rel' pathlist
8739 : */
8740 : static void
8741 4372 : create_final_unique_paths(PlannerInfo *root, RelOptInfo *input_rel,
8742 : List *sortPathkeys, List *groupClause,
8743 : SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
8744 : {
8745 4372 : Path *cheapest_input_path = input_rel->cheapest_total_path;
8746 :
8747 : /* Estimate number of output rows */
8748 4372 : unique_rel->rows = estimate_num_groups(root,
8749 : sjinfo->semi_rhs_exprs,
8750 : cheapest_input_path->rows,
8751 : NULL,
8752 : NULL);
8753 :
8754 : /* Consider sort-based implementations, if possible. */
8755 4372 : if (sjinfo->semi_can_btree)
8756 : {
8757 : ListCell *lc;
8758 :
8759 : /*
8760 : * Use any available suitably-sorted path as input, and also consider
8761 : * sorting the cheapest-total path and incremental sort on any paths
8762 : * with presorted keys.
8763 : *
8764 : * To save planning time, we ignore parameterized input paths unless
8765 : * they are the cheapest-total path.
8766 : */
8767 9528 : foreach(lc, input_rel->pathlist)
8768 : {
8769 5156 : Path *input_path = (Path *) lfirst(lc);
8770 : Path *path;
8771 : bool is_sorted;
8772 : int presorted_keys;
8773 :
8774 : /*
8775 : * Ignore parameterized paths that are not the cheapest-total
8776 : * path.
8777 : */
8778 5156 : if (input_path->param_info &&
8779 : input_path != cheapest_input_path)
8780 461 : continue;
8781 :
8782 4720 : is_sorted = pathkeys_count_contained_in(sortPathkeys,
8783 : input_path->pathkeys,
8784 : &presorted_keys);
8785 :
8786 : /*
8787 : * Ignore paths that are not suitably or partially sorted, unless
8788 : * they are the cheapest total path (no need to deal with paths
8789 : * which have presorted keys when incremental sort is disabled).
8790 : */
8791 4720 : if (!is_sorted && input_path != cheapest_input_path &&
8792 49 : (presorted_keys == 0 || !enable_incremental_sort))
8793 25 : continue;
8794 :
8795 : /*
8796 : * Make a separate ProjectionPath in case we need a Result node.
8797 : */
8798 4695 : path = (Path *) create_projection_path(root,
8799 : unique_rel,
8800 : input_path,
8801 4695 : unique_rel->reltarget);
8802 :
8803 4695 : if (!is_sorted)
8804 : {
8805 : /*
8806 : * We've no need to consider both a sort and incremental sort.
8807 : * We'll just do a sort if there are no presorted keys and an
8808 : * incremental sort when there are presorted keys.
8809 : */
8810 2491 : if (presorted_keys == 0 || !enable_incremental_sort)
8811 2467 : path = (Path *) create_sort_path(root,
8812 : unique_rel,
8813 : path,
8814 : sortPathkeys,
8815 : -1.0);
8816 : else
8817 24 : path = (Path *) create_incremental_sort_path(root,
8818 : unique_rel,
8819 : path,
8820 : sortPathkeys,
8821 : presorted_keys,
8822 : -1.0);
8823 : }
8824 :
8825 4695 : path = (Path *) create_unique_path(root, unique_rel, path,
8826 : list_length(sortPathkeys),
8827 : unique_rel->rows);
8828 :
8829 4695 : add_path(unique_rel, path);
8830 : }
8831 : }
8832 :
8833 : /* Consider hash-based implementation, if possible. */
8834 4372 : if (sjinfo->semi_can_hash)
8835 : {
8836 : Path *path;
8837 :
8838 : /*
8839 : * Make a separate ProjectionPath in case we need a Result node.
8840 : */
8841 4372 : path = (Path *) create_projection_path(root,
8842 : unique_rel,
8843 : cheapest_input_path,
8844 4372 : unique_rel->reltarget);
8845 :
8846 4372 : path = (Path *) create_agg_path(root,
8847 : unique_rel,
8848 : path,
8849 : cheapest_input_path->pathtarget,
8850 : AGG_HASHED,
8851 : AGGSPLIT_SIMPLE,
8852 : groupClause,
8853 : NIL,
8854 : NULL,
8855 : unique_rel->rows);
8856 :
8857 4372 : add_path(unique_rel, path);
8858 : }
8859 4372 : }
8860 :
8861 : /*
8862 : * create_partial_unique_paths
8863 : * Create unique paths in 'unique_rel' based on 'input_rel' partial_pathlist
8864 : */
8865 : static void
8866 2498 : create_partial_unique_paths(PlannerInfo *root, RelOptInfo *input_rel,
8867 : List *sortPathkeys, List *groupClause,
8868 : SpecialJoinInfo *sjinfo, RelOptInfo *unique_rel)
8869 : {
8870 : RelOptInfo *partial_unique_rel;
8871 : Path *cheapest_partial_path;
8872 :
8873 : /* nothing to do when there are no partial paths in the input rel */
8874 2498 : if (!input_rel->consider_parallel || input_rel->partial_pathlist == NIL)
8875 624 : return;
8876 :
8877 : /*
8878 : * nothing to do if there's anything in the targetlist that's
8879 : * parallel-restricted.
8880 : */
8881 1874 : if (!is_parallel_safe(root, (Node *) unique_rel->reltarget->exprs))
8882 0 : return;
8883 :
8884 1874 : cheapest_partial_path = linitial(input_rel->partial_pathlist);
8885 :
8886 1874 : partial_unique_rel = makeNode(RelOptInfo);
8887 1874 : memcpy(partial_unique_rel, input_rel, sizeof(RelOptInfo));
8888 :
8889 : /*
8890 : * clear path info
8891 : */
8892 1874 : partial_unique_rel->pathlist = NIL;
8893 1874 : partial_unique_rel->ppilist = NIL;
8894 1874 : partial_unique_rel->partial_pathlist = NIL;
8895 1874 : partial_unique_rel->cheapest_startup_path = NULL;
8896 1874 : partial_unique_rel->cheapest_total_path = NULL;
8897 1874 : partial_unique_rel->cheapest_parameterized_paths = NIL;
8898 :
8899 : /* Estimate number of output rows */
8900 1874 : partial_unique_rel->rows = estimate_num_groups(root,
8901 : sjinfo->semi_rhs_exprs,
8902 : cheapest_partial_path->rows,
8903 : NULL,
8904 : NULL);
8905 1874 : partial_unique_rel->reltarget = unique_rel->reltarget;
8906 :
8907 : /* Consider sort-based implementations, if possible. */
8908 1874 : if (sjinfo->semi_can_btree)
8909 : {
8910 : ListCell *lc;
8911 :
8912 : /*
8913 : * Use any available suitably-sorted path as input, and also consider
8914 : * sorting the cheapest partial path and incremental sort on any paths
8915 : * with presorted keys.
8916 : */
8917 3904 : foreach(lc, input_rel->partial_pathlist)
8918 : {
8919 2030 : Path *input_path = (Path *) lfirst(lc);
8920 : Path *path;
8921 : bool is_sorted;
8922 : int presorted_keys;
8923 :
8924 2030 : is_sorted = pathkeys_count_contained_in(sortPathkeys,
8925 : input_path->pathkeys,
8926 : &presorted_keys);
8927 :
8928 : /*
8929 : * Ignore paths that are not suitably or partially sorted, unless
8930 : * they are the cheapest partial path (no need to deal with paths
8931 : * which have presorted keys when incremental sort is disabled).
8932 : */
8933 2030 : if (!is_sorted && input_path != cheapest_partial_path &&
8934 0 : (presorted_keys == 0 || !enable_incremental_sort))
8935 0 : continue;
8936 :
8937 : /*
8938 : * Make a separate ProjectionPath in case we need a Result node.
8939 : */
8940 2030 : path = (Path *) create_projection_path(root,
8941 : partial_unique_rel,
8942 : input_path,
8943 2030 : partial_unique_rel->reltarget);
8944 :
8945 2030 : if (!is_sorted)
8946 : {
8947 : /*
8948 : * We've no need to consider both a sort and incremental sort.
8949 : * We'll just do a sort if there are no presorted keys and an
8950 : * incremental sort when there are presorted keys.
8951 : */
8952 1850 : if (presorted_keys == 0 || !enable_incremental_sort)
8953 1850 : path = (Path *) create_sort_path(root,
8954 : partial_unique_rel,
8955 : path,
8956 : sortPathkeys,
8957 : -1.0);
8958 : else
8959 0 : path = (Path *) create_incremental_sort_path(root,
8960 : partial_unique_rel,
8961 : path,
8962 : sortPathkeys,
8963 : presorted_keys,
8964 : -1.0);
8965 : }
8966 :
8967 2030 : path = (Path *) create_unique_path(root, partial_unique_rel, path,
8968 : list_length(sortPathkeys),
8969 : partial_unique_rel->rows);
8970 :
8971 2030 : add_partial_path(partial_unique_rel, path);
8972 : }
8973 : }
8974 :
8975 : /* Consider hash-based implementation, if possible. */
8976 1874 : if (sjinfo->semi_can_hash)
8977 : {
8978 : Path *path;
8979 :
8980 : /*
8981 : * Make a separate ProjectionPath in case we need a Result node.
8982 : */
8983 1874 : path = (Path *) create_projection_path(root,
8984 : partial_unique_rel,
8985 : cheapest_partial_path,
8986 1874 : partial_unique_rel->reltarget);
8987 :
8988 1874 : path = (Path *) create_agg_path(root,
8989 : partial_unique_rel,
8990 : path,
8991 : cheapest_partial_path->pathtarget,
8992 : AGG_HASHED,
8993 : AGGSPLIT_SIMPLE,
8994 : groupClause,
8995 : NIL,
8996 : NULL,
8997 : partial_unique_rel->rows);
8998 :
8999 1874 : add_partial_path(partial_unique_rel, path);
9000 : }
9001 :
9002 1874 : if (partial_unique_rel->partial_pathlist != NIL)
9003 : {
9004 1874 : generate_useful_gather_paths(root, partial_unique_rel, true);
9005 1874 : set_cheapest(partial_unique_rel);
9006 :
9007 : /*
9008 : * Finally, create paths to unique-ify the final result. This step is
9009 : * needed to remove any duplicates due to combining rows from parallel
9010 : * workers.
9011 : */
9012 1874 : create_final_unique_paths(root, partial_unique_rel,
9013 : sortPathkeys, groupClause,
9014 : sjinfo, unique_rel);
9015 : }
9016 : }
9017 :
9018 : /*
9019 : * Choose a unique name for some subroot.
9020 : *
9021 : * Modifies glob->subplanNames to track names already used.
9022 : */
9023 : char *
9024 42791 : choose_plan_name(PlannerGlobal *glob, const char *name, bool always_number)
9025 : {
9026 : unsigned n;
9027 :
9028 : /*
9029 : * If a numeric suffix is not required, then search the list of
9030 : * previously-assigned names for a match. If none is found, then we can
9031 : * use the provided name without modification.
9032 : */
9033 42791 : if (!always_number)
9034 : {
9035 13365 : bool found = false;
9036 :
9037 32018 : foreach_ptr(char, subplan_name, glob->subplanNames)
9038 : {
9039 8156 : if (strcmp(subplan_name, name) == 0)
9040 : {
9041 2868 : found = true;
9042 2868 : break;
9043 : }
9044 : }
9045 :
9046 13365 : if (!found)
9047 : {
9048 : /* pstrdup here is just to avoid cast-away-const */
9049 10497 : char *chosen_name = pstrdup(name);
9050 :
9051 10497 : glob->subplanNames = lappend(glob->subplanNames, chosen_name);
9052 10497 : return chosen_name;
9053 : }
9054 : }
9055 :
9056 : /*
9057 : * If a numeric suffix is required or if the un-suffixed name is already
9058 : * in use, then loop until we find a positive integer that produces a
9059 : * novel name.
9060 : */
9061 32294 : for (n = 1; true; ++n)
9062 27851 : {
9063 60145 : char *proposed_name = psprintf("%s_%u", name, n);
9064 60145 : bool found = false;
9065 :
9066 229448 : foreach_ptr(char, subplan_name, glob->subplanNames)
9067 : {
9068 137009 : if (strcmp(subplan_name, proposed_name) == 0)
9069 : {
9070 27851 : found = true;
9071 27851 : break;
9072 : }
9073 : }
9074 :
9075 60145 : if (!found)
9076 : {
9077 32294 : glob->subplanNames = lappend(glob->subplanNames, proposed_name);
9078 32294 : return proposed_name;
9079 : }
9080 :
9081 27851 : pfree(proposed_name);
9082 : }
9083 : }
|