Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeAgg.c
4 : * Routines to handle aggregate nodes.
5 : *
6 : * ExecAgg normally evaluates each aggregate in the following steps:
7 : *
8 : * transvalue = initcond
9 : * foreach input_tuple do
10 : * transvalue = transfunc(transvalue, input_value(s))
11 : * result = finalfunc(transvalue, direct_argument(s))
12 : *
13 : * If a finalfunc is not supplied then the result is just the ending
14 : * value of transvalue.
15 : *
16 : * Other behaviors can be selected by the "aggsplit" mode, which exists
17 : * to support partial aggregation. It is possible to:
18 : * * Skip running the finalfunc, so that the output is always the
19 : * final transvalue state.
20 : * * Substitute the combinefunc for the transfunc, so that transvalue
21 : * states (propagated up from a child partial-aggregation step) are merged
22 : * rather than processing raw input rows. (The statements below about
23 : * the transfunc apply equally to the combinefunc, when it's selected.)
24 : * * Apply the serializefunc to the output values (this only makes sense
25 : * when skipping the finalfunc, since the serializefunc works on the
26 : * transvalue data type).
27 : * * Apply the deserializefunc to the input values (this only makes sense
28 : * when using the combinefunc, for similar reasons).
29 : * It is the planner's responsibility to connect up Agg nodes using these
30 : * alternate behaviors in a way that makes sense, with partial aggregation
31 : * results being fed to nodes that expect them.
32 : *
33 : * If a normal aggregate call specifies DISTINCT or ORDER BY, we sort the
34 : * input tuples and eliminate duplicates (if required) before performing
35 : * the above-depicted process. (However, we don't do that for ordered-set
36 : * aggregates; their "ORDER BY" inputs are ordinary aggregate arguments
37 : * so far as this module is concerned.) Note that partial aggregation
38 : * is not supported in these cases, since we couldn't ensure global
39 : * ordering or distinctness of the inputs.
40 : *
41 : * If transfunc is marked "strict" in pg_proc and initcond is NULL,
42 : * then the first non-NULL input_value is assigned directly to transvalue,
43 : * and transfunc isn't applied until the second non-NULL input_value.
44 : * The agg's first input type and transtype must be the same in this case!
45 : *
46 : * If transfunc is marked "strict" then NULL input_values are skipped,
47 : * keeping the previous transvalue. If transfunc is not strict then it
48 : * is called for every input tuple and must deal with NULL initcond
49 : * or NULL input_values for itself.
50 : *
51 : * If finalfunc is marked "strict" then it is not called when the
52 : * ending transvalue is NULL, instead a NULL result is created
53 : * automatically (this is just the usual handling of strict functions,
54 : * of course). A non-strict finalfunc can make its own choice of
55 : * what to return for a NULL ending transvalue.
56 : *
57 : * Ordered-set aggregates are treated specially in one other way: we
58 : * evaluate any "direct" arguments and pass them to the finalfunc along
59 : * with the transition value.
60 : *
61 : * A finalfunc can have additional arguments beyond the transvalue and
62 : * any "direct" arguments, corresponding to the input arguments of the
63 : * aggregate. These are always just passed as NULL. Such arguments may be
64 : * needed to allow resolution of a polymorphic aggregate's result type.
65 : *
66 : * We compute aggregate input expressions and run the transition functions
67 : * in a temporary econtext (aggstate->tmpcontext). This is reset at least
68 : * once per input tuple, so when the transvalue datatype is
69 : * pass-by-reference, we have to be careful to copy it into a longer-lived
70 : * memory context, and free the prior value to avoid memory leakage. We
71 : * store transvalues in another set of econtexts, aggstate->aggcontexts
72 : * (one per grouping set, see below), which are also used for the hashtable
73 : * structures in AGG_HASHED mode. These econtexts are rescanned, not just
74 : * reset, at group boundaries so that aggregate transition functions can
75 : * register shutdown callbacks via AggRegisterCallback.
76 : *
77 : * The node's regular econtext (aggstate->ss.ps.ps_ExprContext) is used to
78 : * run finalize functions and compute the output tuple; this context can be
79 : * reset once per output tuple.
80 : *
81 : * The executor's AggState node is passed as the fmgr "context" value in
82 : * all transfunc and finalfunc calls. It is not recommended that the
83 : * transition functions look at the AggState node directly, but they can
84 : * use AggCheckCallContext() to verify that they are being called by
85 : * nodeAgg.c (and not as ordinary SQL functions). The main reason a
86 : * transition function might want to know this is so that it can avoid
87 : * palloc'ing a fixed-size pass-by-ref transition value on every call:
88 : * it can instead just scribble on and return its left input. Ordinarily
89 : * it is completely forbidden for functions to modify pass-by-ref inputs,
90 : * but in the aggregate case we know the left input is either the initial
91 : * transition value or a previous function result, and in either case its
92 : * value need not be preserved. See int8inc() for an example. Notice that
93 : * the EEOP_AGG_PLAIN_TRANS step is coded to avoid a data copy step when
94 : * the previous transition value pointer is returned. It is also possible
95 : * to avoid repeated data copying when the transition value is an expanded
96 : * object: to do that, the transition function must take care to return
97 : * an expanded object that is in a child context of the memory context
98 : * returned by AggCheckCallContext(). Also, some transition functions want
99 : * to store working state in addition to the nominal transition value; they
100 : * can use the memory context returned by AggCheckCallContext() to do that.
101 : *
102 : * Note: AggCheckCallContext() is available as of PostgreSQL 9.0. The
103 : * AggState is available as context in earlier releases (back to 8.1),
104 : * but direct examination of the node is needed to use it before 9.0.
105 : *
106 : * As of 9.4, aggregate transition functions can also use AggGetAggref()
107 : * to get hold of the Aggref expression node for their aggregate call.
108 : * This is mainly intended for ordered-set aggregates, which are not
109 : * supported as window functions. (A regular aggregate function would
110 : * need some fallback logic to use this, since there's no Aggref node
111 : * for a window function.)
112 : *
113 : * Grouping sets:
114 : *
115 : * A list of grouping sets which is structurally equivalent to a ROLLUP
116 : * clause (e.g. (a,b,c), (a,b), (a)) can be processed in a single pass over
117 : * ordered data. We do this by keeping a separate set of transition values
118 : * for each grouping set being concurrently processed; for each input tuple
119 : * we update them all, and on group boundaries we reset those states
120 : * (starting at the front of the list) whose grouping values have changed
121 : * (the list of grouping sets is ordered from most specific to least
122 : * specific).
123 : *
124 : * Where more complex grouping sets are used, we break them down into
125 : * "phases", where each phase has a different sort order (except phase 0
126 : * which is reserved for hashing). During each phase but the last, the
127 : * input tuples are additionally stored in a tuplesort which is keyed to the
128 : * next phase's sort order; during each phase but the first, the input
129 : * tuples are drawn from the previously sorted data. (The sorting of the
130 : * data for the first phase is handled by the planner, as it might be
131 : * satisfied by underlying nodes.)
132 : *
133 : * Hashing can be mixed with sorted grouping. To do this, we have an
134 : * AGG_MIXED strategy that populates the hashtables during the first sorted
135 : * phase, and switches to reading them out after completing all sort phases.
136 : * We can also support AGG_HASHED with multiple hash tables and no sorting
137 : * at all.
138 : *
139 : * From the perspective of aggregate transition and final functions, the
140 : * only issue regarding grouping sets is this: a single call site (flinfo)
141 : * of an aggregate function may be used for updating several different
142 : * transition values in turn. So the function must not cache in the flinfo
143 : * anything which logically belongs as part of the transition value (most
144 : * importantly, the memory context in which the transition value exists).
145 : * The support API functions (AggCheckCallContext, AggRegisterCallback) are
146 : * sensitive to the grouping set for which the aggregate function is
147 : * currently being called.
148 : *
149 : * Plan structure:
150 : *
151 : * What we get from the planner is actually one "real" Agg node which is
152 : * part of the plan tree proper, but which optionally has an additional list
153 : * of Agg nodes hung off the side via the "chain" field. This is because an
154 : * Agg node happens to be a convenient representation of all the data we
155 : * need for grouping sets.
156 : *
157 : * For many purposes, we treat the "real" node as if it were just the first
158 : * node in the chain. The chain must be ordered such that hashed entries
159 : * come before sorted/plain entries; the real node is marked AGG_MIXED if
160 : * there are both types present (in which case the real node describes one
161 : * of the hashed groupings, other AGG_HASHED nodes may optionally follow in
162 : * the chain, followed in turn by AGG_SORTED or (one) AGG_PLAIN node). If
163 : * the real node is marked AGG_HASHED or AGG_SORTED, then all the chained
164 : * nodes must be of the same type; if it is AGG_PLAIN, there can be no
165 : * chained nodes.
166 : *
167 : * We collect all hashed nodes into a single "phase", numbered 0, and create
168 : * a sorted phase (numbered 1..n) for each AGG_SORTED or AGG_PLAIN node.
169 : * Phase 0 is allocated even if there are no hashes, but remains unused in
170 : * that case.
171 : *
172 : * AGG_HASHED nodes actually refer to only a single grouping set each,
173 : * because for each hashed grouping we need a separate grpColIdx and
174 : * numGroups estimate. AGG_SORTED nodes represent a "rollup", a list of
175 : * grouping sets that share a sort order. Each AGG_SORTED node other than
176 : * the first one has an associated Sort node which describes the sort order
177 : * to be used; the first sorted node takes its input from the outer subtree,
178 : * which the planner has already arranged to provide ordered data.
179 : *
180 : * Memory and ExprContext usage:
181 : *
182 : * Because we're accumulating aggregate values across input rows, we need to
183 : * use more memory contexts than just simple input/output tuple contexts.
184 : * In fact, for a rollup, we need a separate context for each grouping set
185 : * so that we can reset the inner (finer-grained) aggregates on their group
186 : * boundaries while continuing to accumulate values for outer
187 : * (coarser-grained) groupings. On top of this, we might be simultaneously
188 : * populating hashtables; however, we only need one context for all the
189 : * hashtables.
190 : *
191 : * So we create an array, aggcontexts, with an ExprContext for each grouping
192 : * set in the largest rollup that we're going to process, and use the
193 : * per-tuple memory context of those ExprContexts to store the aggregate
194 : * transition values. hashcontext is the single context created to support
195 : * all hash tables.
196 : *
197 : * Spilling To Disk
198 : *
199 : * When performing hash aggregation, if the hash table memory exceeds the
200 : * limit (see hash_agg_check_limits()), we enter "spill mode". In spill
201 : * mode, we advance the transition states only for groups already in the
202 : * hash table. For tuples that would need to create a new hash table
203 : * entries (and initialize new transition states), we instead spill them to
204 : * disk to be processed later. The tuples are spilled in a partitioned
205 : * manner, so that subsequent batches are smaller and less likely to exceed
206 : * hash_mem (if a batch does exceed hash_mem, it must be spilled
207 : * recursively).
208 : *
209 : * Spilled data is written to logical tapes. These provide better control
210 : * over memory usage, disk space, and the number of files than if we were
211 : * to use a BufFile for each spill. We don't know the number of tapes needed
212 : * at the start of the algorithm (because it can recurse), so a tape set is
213 : * allocated at the beginning, and individual tapes are created as needed.
214 : * As a particular tape is read, logtape.c recycles its disk space. When a
215 : * tape is read to completion, it is destroyed entirely.
216 : *
217 : * Tapes' buffers can take up substantial memory when many tapes are open at
218 : * once. We only need one tape open at a time in read mode (using a buffer
219 : * that's a multiple of BLCKSZ); but we need one tape open in write mode (each
220 : * requiring a buffer of size BLCKSZ) for each partition.
221 : *
222 : * Note that it's possible for transition states to start small but then
223 : * grow very large; for instance in the case of ARRAY_AGG. In such cases,
224 : * it's still possible to significantly exceed hash_mem. We try to avoid
225 : * this situation by estimating what will fit in the available memory, and
226 : * imposing a limit on the number of groups separately from the amount of
227 : * memory consumed.
228 : *
229 : * Transition / Combine function invocation:
230 : *
231 : * For performance reasons transition functions, including combine
232 : * functions, aren't invoked one-by-one from nodeAgg.c after computing
233 : * arguments using the expression evaluation engine. Instead
234 : * ExecBuildAggTrans() builds one large expression that does both argument
235 : * evaluation and transition function invocation. That avoids performance
236 : * issues due to repeated uses of expression evaluation, complications due
237 : * to filter expressions having to be evaluated early, and allows to JIT
238 : * the entire expression into one native function.
239 : *
240 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
241 : * Portions Copyright (c) 1994, Regents of the University of California
242 : *
243 : * IDENTIFICATION
244 : * src/backend/executor/nodeAgg.c
245 : *
246 : *-------------------------------------------------------------------------
247 : */
248 :
249 : #include "postgres.h"
250 :
251 : #include "access/htup_details.h"
252 : #include "access/parallel.h"
253 : #include "catalog/objectaccess.h"
254 : #include "catalog/pg_aggregate.h"
255 : #include "catalog/pg_proc.h"
256 : #include "catalog/pg_type.h"
257 : #include "common/hashfn.h"
258 : #include "executor/execExpr.h"
259 : #include "executor/executor.h"
260 : #include "executor/nodeAgg.h"
261 : #include "lib/hyperloglog.h"
262 : #include "miscadmin.h"
263 : #include "nodes/nodeFuncs.h"
264 : #include "optimizer/optimizer.h"
265 : #include "parser/parse_agg.h"
266 : #include "parser/parse_coerce.h"
267 : #include "utils/acl.h"
268 : #include "utils/builtins.h"
269 : #include "utils/datum.h"
270 : #include "utils/dynahash.h"
271 : #include "utils/expandeddatum.h"
272 : #include "utils/injection_point.h"
273 : #include "utils/logtape.h"
274 : #include "utils/lsyscache.h"
275 : #include "utils/memutils.h"
276 : #include "utils/memutils_memorychunk.h"
277 : #include "utils/syscache.h"
278 : #include "utils/tuplesort.h"
279 :
280 : /*
281 : * Control how many partitions are created when spilling HashAgg to
282 : * disk.
283 : *
284 : * HASHAGG_PARTITION_FACTOR is multiplied by the estimated number of
285 : * partitions needed such that each partition will fit in memory. The factor
286 : * is set higher than one because there's not a high cost to having a few too
287 : * many partitions, and it makes it less likely that a partition will need to
288 : * be spilled recursively. Another benefit of having more, smaller partitions
289 : * is that small hash tables may perform better than large ones due to memory
290 : * caching effects.
291 : *
292 : * We also specify a min and max number of partitions per spill. Too few might
293 : * mean a lot of wasted I/O from repeated spilling of the same tuples. Too
294 : * many will result in lots of memory wasted buffering the spill files (which
295 : * could instead be spent on a larger hash table).
296 : */
297 : #define HASHAGG_PARTITION_FACTOR 1.50
298 : #define HASHAGG_MIN_PARTITIONS 4
299 : #define HASHAGG_MAX_PARTITIONS 1024
300 :
301 : /*
302 : * For reading from tapes, the buffer size must be a multiple of
303 : * BLCKSZ. Larger values help when reading from multiple tapes concurrently,
304 : * but that doesn't happen in HashAgg, so we simply use BLCKSZ. Writing to a
305 : * tape always uses a buffer of size BLCKSZ.
306 : */
307 : #define HASHAGG_READ_BUFFER_SIZE BLCKSZ
308 : #define HASHAGG_WRITE_BUFFER_SIZE BLCKSZ
309 :
310 : /*
311 : * HyperLogLog is used for estimating the cardinality of the spilled tuples in
312 : * a given partition. 5 bits corresponds to a size of about 32 bytes and a
313 : * worst-case error of around 18%. That's effective enough to choose a
314 : * reasonable number of partitions when recursing.
315 : */
316 : #define HASHAGG_HLL_BIT_WIDTH 5
317 :
318 : /*
319 : * Assume the palloc overhead always uses sizeof(MemoryChunk) bytes.
320 : */
321 : #define CHUNKHDRSZ sizeof(MemoryChunk)
322 :
323 : /*
324 : * Represents partitioned spill data for a single hashtable. Contains the
325 : * necessary information to route tuples to the correct partition, and to
326 : * transform the spilled data into new batches.
327 : *
328 : * The high bits are used for partition selection (when recursing, we ignore
329 : * the bits that have already been used for partition selection at an earlier
330 : * level).
331 : */
332 : typedef struct HashAggSpill
333 : {
334 : int npartitions; /* number of partitions */
335 : LogicalTape **partitions; /* spill partition tapes */
336 : int64 *ntuples; /* number of tuples in each partition */
337 : uint32 mask; /* mask to find partition from hash value */
338 : int shift; /* after masking, shift by this amount */
339 : hyperLogLogState *hll_card; /* cardinality estimate for contents */
340 : } HashAggSpill;
341 :
342 : /*
343 : * Represents work to be done for one pass of hash aggregation (with only one
344 : * grouping set).
345 : *
346 : * Also tracks the bits of the hash already used for partition selection by
347 : * earlier iterations, so that this batch can use new bits. If all bits have
348 : * already been used, no partitioning will be done (any spilled data will go
349 : * to a single output tape).
350 : */
351 : typedef struct HashAggBatch
352 : {
353 : int setno; /* grouping set */
354 : int used_bits; /* number of bits of hash already used */
355 : LogicalTape *input_tape; /* input partition tape */
356 : int64 input_tuples; /* number of tuples in this batch */
357 : double input_card; /* estimated group cardinality */
358 : } HashAggBatch;
359 :
360 : /* used to find referenced colnos */
361 : typedef struct FindColsContext
362 : {
363 : bool is_aggref; /* is under an aggref */
364 : Bitmapset *aggregated; /* column references under an aggref */
365 : Bitmapset *unaggregated; /* other column references */
366 : } FindColsContext;
367 :
368 : static void select_current_set(AggState *aggstate, int setno, bool is_hash);
369 : static void initialize_phase(AggState *aggstate, int newphase);
370 : static TupleTableSlot *fetch_input_tuple(AggState *aggstate);
371 : static void initialize_aggregates(AggState *aggstate,
372 : AggStatePerGroup *pergroups,
373 : int numReset);
374 : static void advance_transition_function(AggState *aggstate,
375 : AggStatePerTrans pertrans,
376 : AggStatePerGroup pergroupstate);
377 : static void advance_aggregates(AggState *aggstate);
378 : static void process_ordered_aggregate_single(AggState *aggstate,
379 : AggStatePerTrans pertrans,
380 : AggStatePerGroup pergroupstate);
381 : static void process_ordered_aggregate_multi(AggState *aggstate,
382 : AggStatePerTrans pertrans,
383 : AggStatePerGroup pergroupstate);
384 : static void finalize_aggregate(AggState *aggstate,
385 : AggStatePerAgg peragg,
386 : AggStatePerGroup pergroupstate,
387 : Datum *resultVal, bool *resultIsNull);
388 : static void finalize_partialaggregate(AggState *aggstate,
389 : AggStatePerAgg peragg,
390 : AggStatePerGroup pergroupstate,
391 : Datum *resultVal, bool *resultIsNull);
392 : static inline void prepare_hash_slot(AggStatePerHash perhash,
393 : TupleTableSlot *inputslot,
394 : TupleTableSlot *hashslot);
395 : static void prepare_projection_slot(AggState *aggstate,
396 : TupleTableSlot *slot,
397 : int currentSet);
398 : static void finalize_aggregates(AggState *aggstate,
399 : AggStatePerAgg peraggs,
400 : AggStatePerGroup pergroup);
401 : static TupleTableSlot *project_aggregates(AggState *aggstate);
402 : static void find_cols(AggState *aggstate, Bitmapset **aggregated,
403 : Bitmapset **unaggregated);
404 : static bool find_cols_walker(Node *node, FindColsContext *context);
405 : static void build_hash_tables(AggState *aggstate);
406 : static void build_hash_table(AggState *aggstate, int setno, long nbuckets);
407 : static void hashagg_recompile_expressions(AggState *aggstate, bool minslot,
408 : bool nullcheck);
409 : static long hash_choose_num_buckets(double hashentrysize,
410 : long ngroups, Size memory);
411 : static int hash_choose_num_partitions(double input_groups,
412 : double hashentrysize,
413 : int used_bits,
414 : int *log2_npartitions);
415 : static void initialize_hash_entry(AggState *aggstate,
416 : TupleHashTable hashtable,
417 : TupleHashEntry entry);
418 : static void lookup_hash_entries(AggState *aggstate);
419 : static TupleTableSlot *agg_retrieve_direct(AggState *aggstate);
420 : static void agg_fill_hash_table(AggState *aggstate);
421 : static bool agg_refill_hash_table(AggState *aggstate);
422 : static TupleTableSlot *agg_retrieve_hash_table(AggState *aggstate);
423 : static TupleTableSlot *agg_retrieve_hash_table_in_memory(AggState *aggstate);
424 : static void hash_agg_check_limits(AggState *aggstate);
425 : static void hash_agg_enter_spill_mode(AggState *aggstate);
426 : static void hash_agg_update_metrics(AggState *aggstate, bool from_tape,
427 : int npartitions);
428 : static void hashagg_finish_initial_spills(AggState *aggstate);
429 : static void hashagg_reset_spill_state(AggState *aggstate);
430 : static HashAggBatch *hashagg_batch_new(LogicalTape *input_tape, int setno,
431 : int64 input_tuples, double input_card,
432 : int used_bits);
433 : static MinimalTuple hashagg_batch_read(HashAggBatch *batch, uint32 *hashp);
434 : static void hashagg_spill_init(HashAggSpill *spill, LogicalTapeSet *tapeset,
435 : int used_bits, double input_groups,
436 : double hashentrysize);
437 : static Size hashagg_spill_tuple(AggState *aggstate, HashAggSpill *spill,
438 : TupleTableSlot *inputslot, uint32 hash);
439 : static void hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill,
440 : int setno);
441 : static Datum GetAggInitVal(Datum textInitVal, Oid transtype);
442 : static void build_pertrans_for_aggref(AggStatePerTrans pertrans,
443 : AggState *aggstate, EState *estate,
444 : Aggref *aggref, Oid transfn_oid,
445 : Oid aggtranstype, Oid aggserialfn,
446 : Oid aggdeserialfn, Datum initValue,
447 : bool initValueIsNull, Oid *inputTypes,
448 : int numArguments);
449 :
450 :
451 : /*
452 : * Select the current grouping set; affects current_set and
453 : * curaggcontext.
454 : */
455 : static void
456 6483874 : select_current_set(AggState *aggstate, int setno, bool is_hash)
457 : {
458 : /*
459 : * When changing this, also adapt ExecAggPlainTransByVal() and
460 : * ExecAggPlainTransByRef().
461 : */
462 6483874 : if (is_hash)
463 5820308 : aggstate->curaggcontext = aggstate->hashcontext;
464 : else
465 663566 : aggstate->curaggcontext = aggstate->aggcontexts[setno];
466 :
467 6483874 : aggstate->current_set = setno;
468 6483874 : }
469 :
470 : /*
471 : * Switch to phase "newphase", which must either be 0 or 1 (to reset) or
472 : * current_phase + 1. Juggle the tuplesorts accordingly.
473 : *
474 : * Phase 0 is for hashing, which we currently handle last in the AGG_MIXED
475 : * case, so when entering phase 0, all we need to do is drop open sorts.
476 : */
477 : static void
478 84478 : initialize_phase(AggState *aggstate, int newphase)
479 : {
480 : Assert(newphase <= 1 || newphase == aggstate->current_phase + 1);
481 :
482 : /*
483 : * Whatever the previous state, we're now done with whatever input
484 : * tuplesort was in use.
485 : */
486 84478 : if (aggstate->sort_in)
487 : {
488 42 : tuplesort_end(aggstate->sort_in);
489 42 : aggstate->sort_in = NULL;
490 : }
491 :
492 84478 : if (newphase <= 1)
493 : {
494 : /*
495 : * Discard any existing output tuplesort.
496 : */
497 84292 : if (aggstate->sort_out)
498 : {
499 6 : tuplesort_end(aggstate->sort_out);
500 6 : aggstate->sort_out = NULL;
501 : }
502 : }
503 : else
504 : {
505 : /*
506 : * The old output tuplesort becomes the new input one, and this is the
507 : * right time to actually sort it.
508 : */
509 186 : aggstate->sort_in = aggstate->sort_out;
510 186 : aggstate->sort_out = NULL;
511 : Assert(aggstate->sort_in);
512 186 : tuplesort_performsort(aggstate->sort_in);
513 : }
514 :
515 : /*
516 : * If this isn't the last phase, we need to sort appropriately for the
517 : * next phase in sequence.
518 : */
519 84478 : if (newphase > 0 && newphase < aggstate->numphases - 1)
520 : {
521 234 : Sort *sortnode = aggstate->phases[newphase + 1].sortnode;
522 234 : PlanState *outerNode = outerPlanState(aggstate);
523 234 : TupleDesc tupDesc = ExecGetResultType(outerNode);
524 :
525 234 : aggstate->sort_out = tuplesort_begin_heap(tupDesc,
526 : sortnode->numCols,
527 : sortnode->sortColIdx,
528 : sortnode->sortOperators,
529 : sortnode->collations,
530 : sortnode->nullsFirst,
531 : work_mem,
532 : NULL, TUPLESORT_NONE);
533 : }
534 :
535 84478 : aggstate->current_phase = newphase;
536 84478 : aggstate->phase = &aggstate->phases[newphase];
537 84478 : }
538 :
539 : /*
540 : * Fetch a tuple from either the outer plan (for phase 1) or from the sorter
541 : * populated by the previous phase. Copy it to the sorter for the next phase
542 : * if any.
543 : *
544 : * Callers cannot rely on memory for tuple in returned slot remaining valid
545 : * past any subsequently fetched tuple.
546 : */
547 : static TupleTableSlot *
548 25780176 : fetch_input_tuple(AggState *aggstate)
549 : {
550 : TupleTableSlot *slot;
551 :
552 25780176 : if (aggstate->sort_in)
553 : {
554 : /* make sure we check for interrupts in either path through here */
555 174882 : CHECK_FOR_INTERRUPTS();
556 174882 : if (!tuplesort_gettupleslot(aggstate->sort_in, true, false,
557 : aggstate->sort_slot, NULL))
558 186 : return NULL;
559 174696 : slot = aggstate->sort_slot;
560 : }
561 : else
562 25605294 : slot = ExecProcNode(outerPlanState(aggstate));
563 :
564 25779972 : if (!TupIsNull(slot) && aggstate->sort_out)
565 174696 : tuplesort_puttupleslot(aggstate->sort_out, slot);
566 :
567 25779972 : return slot;
568 : }
569 :
570 : /*
571 : * (Re)Initialize an individual aggregate.
572 : *
573 : * This function handles only one grouping set, already set in
574 : * aggstate->current_set.
575 : *
576 : * When called, CurrentMemoryContext should be the per-query context.
577 : */
578 : static void
579 1060798 : initialize_aggregate(AggState *aggstate, AggStatePerTrans pertrans,
580 : AggStatePerGroup pergroupstate)
581 : {
582 : /*
583 : * Start a fresh sort operation for each DISTINCT/ORDER BY aggregate.
584 : */
585 1060798 : if (pertrans->aggsortrequired)
586 : {
587 : /*
588 : * In case of rescan, maybe there could be an uncompleted sort
589 : * operation? Clean it up if so.
590 : */
591 53828 : if (pertrans->sortstates[aggstate->current_set])
592 0 : tuplesort_end(pertrans->sortstates[aggstate->current_set]);
593 :
594 :
595 : /*
596 : * We use a plain Datum sorter when there's a single input column;
597 : * otherwise sort the full tuple. (See comments for
598 : * process_ordered_aggregate_single.)
599 : */
600 53828 : if (pertrans->numInputs == 1)
601 : {
602 53756 : Form_pg_attribute attr = TupleDescAttr(pertrans->sortdesc, 0);
603 :
604 53756 : pertrans->sortstates[aggstate->current_set] =
605 53756 : tuplesort_begin_datum(attr->atttypid,
606 53756 : pertrans->sortOperators[0],
607 53756 : pertrans->sortCollations[0],
608 53756 : pertrans->sortNullsFirst[0],
609 : work_mem, NULL, TUPLESORT_NONE);
610 : }
611 : else
612 72 : pertrans->sortstates[aggstate->current_set] =
613 72 : tuplesort_begin_heap(pertrans->sortdesc,
614 : pertrans->numSortCols,
615 : pertrans->sortColIdx,
616 : pertrans->sortOperators,
617 : pertrans->sortCollations,
618 : pertrans->sortNullsFirst,
619 : work_mem, NULL, TUPLESORT_NONE);
620 : }
621 :
622 : /*
623 : * (Re)set transValue to the initial value.
624 : *
625 : * Note that when the initial value is pass-by-ref, we must copy it (into
626 : * the aggcontext) since we will pfree the transValue later.
627 : */
628 1060798 : if (pertrans->initValueIsNull)
629 531214 : pergroupstate->transValue = pertrans->initValue;
630 : else
631 : {
632 : MemoryContext oldContext;
633 :
634 529584 : oldContext = MemoryContextSwitchTo(aggstate->curaggcontext->ecxt_per_tuple_memory);
635 1059168 : pergroupstate->transValue = datumCopy(pertrans->initValue,
636 529584 : pertrans->transtypeByVal,
637 529584 : pertrans->transtypeLen);
638 529584 : MemoryContextSwitchTo(oldContext);
639 : }
640 1060798 : pergroupstate->transValueIsNull = pertrans->initValueIsNull;
641 :
642 : /*
643 : * If the initial value for the transition state doesn't exist in the
644 : * pg_aggregate table then we will let the first non-NULL value returned
645 : * from the outer procNode become the initial value. (This is useful for
646 : * aggregates like max() and min().) The noTransValue flag signals that we
647 : * still need to do this.
648 : */
649 1060798 : pergroupstate->noTransValue = pertrans->initValueIsNull;
650 1060798 : }
651 :
652 : /*
653 : * Initialize all aggregate transition states for a new group of input values.
654 : *
655 : * If there are multiple grouping sets, we initialize only the first numReset
656 : * of them (the grouping sets are ordered so that the most specific one, which
657 : * is reset most often, is first). As a convenience, if numReset is 0, we
658 : * reinitialize all sets.
659 : *
660 : * NB: This cannot be used for hash aggregates, as for those the grouping set
661 : * number has to be specified from further up.
662 : *
663 : * When called, CurrentMemoryContext should be the per-query context.
664 : */
665 : static void
666 297836 : initialize_aggregates(AggState *aggstate,
667 : AggStatePerGroup *pergroups,
668 : int numReset)
669 : {
670 : int transno;
671 297836 : int numGroupingSets = Max(aggstate->phase->numsets, 1);
672 297836 : int setno = 0;
673 297836 : int numTrans = aggstate->numtrans;
674 297836 : AggStatePerTrans transstates = aggstate->pertrans;
675 :
676 297836 : if (numReset == 0)
677 0 : numReset = numGroupingSets;
678 :
679 609834 : for (setno = 0; setno < numReset; setno++)
680 : {
681 311998 : AggStatePerGroup pergroup = pergroups[setno];
682 :
683 311998 : select_current_set(aggstate, setno, false);
684 :
685 975994 : for (transno = 0; transno < numTrans; transno++)
686 : {
687 663996 : AggStatePerTrans pertrans = &transstates[transno];
688 663996 : AggStatePerGroup pergroupstate = &pergroup[transno];
689 :
690 663996 : initialize_aggregate(aggstate, pertrans, pergroupstate);
691 : }
692 : }
693 297836 : }
694 :
695 : /*
696 : * Given new input value(s), advance the transition function of one aggregate
697 : * state within one grouping set only (already set in aggstate->current_set)
698 : *
699 : * The new values (and null flags) have been preloaded into argument positions
700 : * 1 and up in pertrans->transfn_fcinfo, so that we needn't copy them again to
701 : * pass to the transition function. We also expect that the static fields of
702 : * the fcinfo are already initialized; that was done by ExecInitAgg().
703 : *
704 : * It doesn't matter which memory context this is called in.
705 : */
706 : static void
707 724256 : advance_transition_function(AggState *aggstate,
708 : AggStatePerTrans pertrans,
709 : AggStatePerGroup pergroupstate)
710 : {
711 724256 : FunctionCallInfo fcinfo = pertrans->transfn_fcinfo;
712 : MemoryContext oldContext;
713 : Datum newVal;
714 :
715 724256 : if (pertrans->transfn.fn_strict)
716 : {
717 : /*
718 : * For a strict transfn, nothing happens when there's a NULL input; we
719 : * just keep the prior transValue.
720 : */
721 225000 : int numTransInputs = pertrans->numTransInputs;
722 : int i;
723 :
724 450000 : for (i = 1; i <= numTransInputs; i++)
725 : {
726 225000 : if (fcinfo->args[i].isnull)
727 0 : return;
728 : }
729 225000 : if (pergroupstate->noTransValue)
730 : {
731 : /*
732 : * transValue has not been initialized. This is the first non-NULL
733 : * input value. We use it as the initial value for transValue. (We
734 : * already checked that the agg's input type is binary-compatible
735 : * with its transtype, so straight copy here is OK.)
736 : *
737 : * We must copy the datum into aggcontext if it is pass-by-ref. We
738 : * do not need to pfree the old transValue, since it's NULL.
739 : */
740 0 : oldContext = MemoryContextSwitchTo(aggstate->curaggcontext->ecxt_per_tuple_memory);
741 0 : pergroupstate->transValue = datumCopy(fcinfo->args[1].value,
742 0 : pertrans->transtypeByVal,
743 0 : pertrans->transtypeLen);
744 0 : pergroupstate->transValueIsNull = false;
745 0 : pergroupstate->noTransValue = false;
746 0 : MemoryContextSwitchTo(oldContext);
747 0 : return;
748 : }
749 225000 : if (pergroupstate->transValueIsNull)
750 : {
751 : /*
752 : * Don't call a strict function with NULL inputs. Note it is
753 : * possible to get here despite the above tests, if the transfn is
754 : * strict *and* returned a NULL on a prior cycle. If that happens
755 : * we will propagate the NULL all the way to the end.
756 : */
757 0 : return;
758 : }
759 : }
760 :
761 : /* We run the transition functions in per-input-tuple memory context */
762 724256 : oldContext = MemoryContextSwitchTo(aggstate->tmpcontext->ecxt_per_tuple_memory);
763 :
764 : /* set up aggstate->curpertrans for AggGetAggref() */
765 724256 : aggstate->curpertrans = pertrans;
766 :
767 : /*
768 : * OK to call the transition function
769 : */
770 724256 : fcinfo->args[0].value = pergroupstate->transValue;
771 724256 : fcinfo->args[0].isnull = pergroupstate->transValueIsNull;
772 724256 : fcinfo->isnull = false; /* just in case transfn doesn't set it */
773 :
774 724256 : newVal = FunctionCallInvoke(fcinfo);
775 :
776 724256 : aggstate->curpertrans = NULL;
777 :
778 : /*
779 : * If pass-by-ref datatype, must copy the new value into aggcontext and
780 : * free the prior transValue. But if transfn returned a pointer to its
781 : * first input, we don't need to do anything.
782 : *
783 : * It's safe to compare newVal with pergroup->transValue without regard
784 : * for either being NULL, because ExecAggCopyTransValue takes care to set
785 : * transValue to 0 when NULL. Otherwise we could end up accidentally not
786 : * reparenting, when the transValue has the same numerical value as
787 : * newValue, despite being NULL. This is a somewhat hot path, making it
788 : * undesirable to instead solve this with another branch for the common
789 : * case of the transition function returning its (modified) input
790 : * argument.
791 : */
792 724256 : if (!pertrans->transtypeByVal &&
793 0 : DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
794 0 : newVal = ExecAggCopyTransValue(aggstate, pertrans,
795 0 : newVal, fcinfo->isnull,
796 : pergroupstate->transValue,
797 0 : pergroupstate->transValueIsNull);
798 :
799 724256 : pergroupstate->transValue = newVal;
800 724256 : pergroupstate->transValueIsNull = fcinfo->isnull;
801 :
802 724256 : MemoryContextSwitchTo(oldContext);
803 : }
804 :
805 : /*
806 : * Advance each aggregate transition state for one input tuple. The input
807 : * tuple has been stored in tmpcontext->ecxt_outertuple, so that it is
808 : * accessible to ExecEvalExpr.
809 : *
810 : * We have two sets of transition states to handle: one for sorted aggregation
811 : * and one for hashed; we do them both here, to avoid multiple evaluation of
812 : * the inputs.
813 : *
814 : * When called, CurrentMemoryContext should be the per-query context.
815 : */
816 : static void
817 25929794 : advance_aggregates(AggState *aggstate)
818 : {
819 : bool dummynull;
820 :
821 25929794 : ExecEvalExprSwitchContext(aggstate->phase->evaltrans,
822 : aggstate->tmpcontext,
823 : &dummynull);
824 25929716 : }
825 :
826 : /*
827 : * Run the transition function for a DISTINCT or ORDER BY aggregate
828 : * with only one input. This is called after we have completed
829 : * entering all the input values into the sort object. We complete the
830 : * sort, read out the values in sorted order, and run the transition
831 : * function on each value (applying DISTINCT if appropriate).
832 : *
833 : * Note that the strictness of the transition function was checked when
834 : * entering the values into the sort, so we don't check it again here;
835 : * we just apply standard SQL DISTINCT logic.
836 : *
837 : * The one-input case is handled separately from the multi-input case
838 : * for performance reasons: for single by-value inputs, such as the
839 : * common case of count(distinct id), the tuplesort_getdatum code path
840 : * is around 300% faster. (The speedup for by-reference types is less
841 : * but still noticeable.)
842 : *
843 : * This function handles only one grouping set (already set in
844 : * aggstate->current_set).
845 : *
846 : * When called, CurrentMemoryContext should be the per-query context.
847 : */
848 : static void
849 53756 : process_ordered_aggregate_single(AggState *aggstate,
850 : AggStatePerTrans pertrans,
851 : AggStatePerGroup pergroupstate)
852 : {
853 53756 : Datum oldVal = (Datum) 0;
854 53756 : bool oldIsNull = true;
855 53756 : bool haveOldVal = false;
856 53756 : MemoryContext workcontext = aggstate->tmpcontext->ecxt_per_tuple_memory;
857 : MemoryContext oldContext;
858 53756 : bool isDistinct = (pertrans->numDistinctCols > 0);
859 53756 : Datum newAbbrevVal = (Datum) 0;
860 53756 : Datum oldAbbrevVal = (Datum) 0;
861 53756 : FunctionCallInfo fcinfo = pertrans->transfn_fcinfo;
862 : Datum *newVal;
863 : bool *isNull;
864 :
865 : Assert(pertrans->numDistinctCols < 2);
866 :
867 53756 : tuplesort_performsort(pertrans->sortstates[aggstate->current_set]);
868 :
869 : /* Load the column into argument 1 (arg 0 will be transition value) */
870 53756 : newVal = &fcinfo->args[1].value;
871 53756 : isNull = &fcinfo->args[1].isnull;
872 :
873 : /*
874 : * Note: if input type is pass-by-ref, the datums returned by the sort are
875 : * freshly palloc'd in the per-query context, so we must be careful to
876 : * pfree them when they are no longer needed.
877 : */
878 :
879 898140 : while (tuplesort_getdatum(pertrans->sortstates[aggstate->current_set],
880 : true, false, newVal, isNull, &newAbbrevVal))
881 : {
882 : /*
883 : * Clear and select the working context for evaluation of the equality
884 : * function and transition function.
885 : */
886 844384 : MemoryContextReset(workcontext);
887 844384 : oldContext = MemoryContextSwitchTo(workcontext);
888 :
889 : /*
890 : * If DISTINCT mode, and not distinct from prior, skip it.
891 : */
892 844384 : if (isDistinct &&
893 310322 : haveOldVal &&
894 0 : ((oldIsNull && *isNull) ||
895 310322 : (!oldIsNull && !*isNull &&
896 605684 : oldAbbrevVal == newAbbrevVal &&
897 295362 : DatumGetBool(FunctionCall2Coll(&pertrans->equalfnOne,
898 : pertrans->aggCollation,
899 : oldVal, *newVal)))))
900 : {
901 120308 : MemoryContextSwitchTo(oldContext);
902 120308 : continue;
903 : }
904 : else
905 : {
906 724076 : advance_transition_function(aggstate, pertrans, pergroupstate);
907 :
908 724076 : MemoryContextSwitchTo(oldContext);
909 :
910 : /*
911 : * Forget the old value, if any, and remember the new one for
912 : * subsequent equality checks.
913 : */
914 724076 : if (!pertrans->inputtypeByVal)
915 : {
916 525288 : if (!oldIsNull)
917 525108 : pfree(DatumGetPointer(oldVal));
918 525288 : if (!*isNull)
919 525228 : oldVal = datumCopy(*newVal, pertrans->inputtypeByVal,
920 525228 : pertrans->inputtypeLen);
921 : }
922 : else
923 198788 : oldVal = *newVal;
924 724076 : oldAbbrevVal = newAbbrevVal;
925 724076 : oldIsNull = *isNull;
926 724076 : haveOldVal = true;
927 : }
928 : }
929 :
930 53756 : if (!oldIsNull && !pertrans->inputtypeByVal)
931 120 : pfree(DatumGetPointer(oldVal));
932 :
933 53756 : tuplesort_end(pertrans->sortstates[aggstate->current_set]);
934 53756 : pertrans->sortstates[aggstate->current_set] = NULL;
935 53756 : }
936 :
937 : /*
938 : * Run the transition function for a DISTINCT or ORDER BY aggregate
939 : * with more than one input. This is called after we have completed
940 : * entering all the input values into the sort object. We complete the
941 : * sort, read out the values in sorted order, and run the transition
942 : * function on each value (applying DISTINCT if appropriate).
943 : *
944 : * This function handles only one grouping set (already set in
945 : * aggstate->current_set).
946 : *
947 : * When called, CurrentMemoryContext should be the per-query context.
948 : */
949 : static void
950 72 : process_ordered_aggregate_multi(AggState *aggstate,
951 : AggStatePerTrans pertrans,
952 : AggStatePerGroup pergroupstate)
953 : {
954 72 : ExprContext *tmpcontext = aggstate->tmpcontext;
955 72 : FunctionCallInfo fcinfo = pertrans->transfn_fcinfo;
956 72 : TupleTableSlot *slot1 = pertrans->sortslot;
957 72 : TupleTableSlot *slot2 = pertrans->uniqslot;
958 72 : int numTransInputs = pertrans->numTransInputs;
959 72 : int numDistinctCols = pertrans->numDistinctCols;
960 72 : Datum newAbbrevVal = (Datum) 0;
961 72 : Datum oldAbbrevVal = (Datum) 0;
962 72 : bool haveOldValue = false;
963 72 : TupleTableSlot *save = aggstate->tmpcontext->ecxt_outertuple;
964 : int i;
965 :
966 72 : tuplesort_performsort(pertrans->sortstates[aggstate->current_set]);
967 :
968 72 : ExecClearTuple(slot1);
969 72 : if (slot2)
970 0 : ExecClearTuple(slot2);
971 :
972 252 : while (tuplesort_gettupleslot(pertrans->sortstates[aggstate->current_set],
973 : true, true, slot1, &newAbbrevVal))
974 : {
975 180 : CHECK_FOR_INTERRUPTS();
976 :
977 180 : tmpcontext->ecxt_outertuple = slot1;
978 180 : tmpcontext->ecxt_innertuple = slot2;
979 :
980 180 : if (numDistinctCols == 0 ||
981 0 : !haveOldValue ||
982 0 : newAbbrevVal != oldAbbrevVal ||
983 0 : !ExecQual(pertrans->equalfnMulti, tmpcontext))
984 : {
985 : /*
986 : * Extract the first numTransInputs columns as datums to pass to
987 : * the transfn.
988 : */
989 180 : slot_getsomeattrs(slot1, numTransInputs);
990 :
991 : /* Load values into fcinfo */
992 : /* Start from 1, since the 0th arg will be the transition value */
993 540 : for (i = 0; i < numTransInputs; i++)
994 : {
995 360 : fcinfo->args[i + 1].value = slot1->tts_values[i];
996 360 : fcinfo->args[i + 1].isnull = slot1->tts_isnull[i];
997 : }
998 :
999 180 : advance_transition_function(aggstate, pertrans, pergroupstate);
1000 :
1001 180 : if (numDistinctCols > 0)
1002 : {
1003 : /* swap the slot pointers to retain the current tuple */
1004 0 : TupleTableSlot *tmpslot = slot2;
1005 :
1006 0 : slot2 = slot1;
1007 0 : slot1 = tmpslot;
1008 : /* avoid ExecQual() calls by reusing abbreviated keys */
1009 0 : oldAbbrevVal = newAbbrevVal;
1010 0 : haveOldValue = true;
1011 : }
1012 : }
1013 :
1014 : /* Reset context each time */
1015 180 : ResetExprContext(tmpcontext);
1016 :
1017 180 : ExecClearTuple(slot1);
1018 : }
1019 :
1020 72 : if (slot2)
1021 0 : ExecClearTuple(slot2);
1022 :
1023 72 : tuplesort_end(pertrans->sortstates[aggstate->current_set]);
1024 72 : pertrans->sortstates[aggstate->current_set] = NULL;
1025 :
1026 : /* restore previous slot, potentially in use for grouping sets */
1027 72 : tmpcontext->ecxt_outertuple = save;
1028 72 : }
1029 :
1030 : /*
1031 : * Compute the final value of one aggregate.
1032 : *
1033 : * This function handles only one grouping set (already set in
1034 : * aggstate->current_set).
1035 : *
1036 : * The finalfn will be run, and the result delivered, in the
1037 : * output-tuple context; caller's CurrentMemoryContext does not matter.
1038 : * (But note that in some cases, such as when there is no finalfn, the
1039 : * result might be a pointer to or into the agg's transition value.)
1040 : *
1041 : * The finalfn uses the state as set in the transno. This also might be
1042 : * being used by another aggregate function, so it's important that we do
1043 : * nothing destructive here. Moreover, the aggregate's final value might
1044 : * get used in multiple places, so we mustn't return a R/W expanded datum.
1045 : */
1046 : static void
1047 1052696 : finalize_aggregate(AggState *aggstate,
1048 : AggStatePerAgg peragg,
1049 : AggStatePerGroup pergroupstate,
1050 : Datum *resultVal, bool *resultIsNull)
1051 : {
1052 1052696 : LOCAL_FCINFO(fcinfo, FUNC_MAX_ARGS);
1053 1052696 : bool anynull = false;
1054 : MemoryContext oldContext;
1055 : int i;
1056 : ListCell *lc;
1057 1052696 : AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
1058 :
1059 1052696 : oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
1060 :
1061 : /*
1062 : * Evaluate any direct arguments. We do this even if there's no finalfn
1063 : * (which is unlikely anyway), so that side-effects happen as expected.
1064 : * The direct arguments go into arg positions 1 and up, leaving position 0
1065 : * for the transition state value.
1066 : */
1067 1052696 : i = 1;
1068 1053670 : foreach(lc, peragg->aggdirectargs)
1069 : {
1070 974 : ExprState *expr = (ExprState *) lfirst(lc);
1071 :
1072 974 : fcinfo->args[i].value = ExecEvalExpr(expr,
1073 : aggstate->ss.ps.ps_ExprContext,
1074 : &fcinfo->args[i].isnull);
1075 974 : anynull |= fcinfo->args[i].isnull;
1076 974 : i++;
1077 : }
1078 :
1079 : /*
1080 : * Apply the agg's finalfn if one is provided, else return transValue.
1081 : */
1082 1052696 : if (OidIsValid(peragg->finalfn_oid))
1083 : {
1084 276036 : int numFinalArgs = peragg->numFinalArgs;
1085 :
1086 : /* set up aggstate->curperagg for AggGetAggref() */
1087 276036 : aggstate->curperagg = peragg;
1088 :
1089 276036 : InitFunctionCallInfoData(*fcinfo, &peragg->finalfn,
1090 : numFinalArgs,
1091 : pertrans->aggCollation,
1092 : (Node *) aggstate, NULL);
1093 :
1094 : /* Fill in the transition state value */
1095 276036 : fcinfo->args[0].value =
1096 276036 : MakeExpandedObjectReadOnly(pergroupstate->transValue,
1097 : pergroupstate->transValueIsNull,
1098 : pertrans->transtypeLen);
1099 276036 : fcinfo->args[0].isnull = pergroupstate->transValueIsNull;
1100 276036 : anynull |= pergroupstate->transValueIsNull;
1101 :
1102 : /* Fill any remaining argument positions with nulls */
1103 366536 : for (; i < numFinalArgs; i++)
1104 : {
1105 90500 : fcinfo->args[i].value = (Datum) 0;
1106 90500 : fcinfo->args[i].isnull = true;
1107 90500 : anynull = true;
1108 : }
1109 :
1110 276036 : if (fcinfo->flinfo->fn_strict && anynull)
1111 : {
1112 : /* don't call a strict function with NULL inputs */
1113 0 : *resultVal = (Datum) 0;
1114 0 : *resultIsNull = true;
1115 : }
1116 : else
1117 : {
1118 : Datum result;
1119 :
1120 276036 : result = FunctionCallInvoke(fcinfo);
1121 276024 : *resultIsNull = fcinfo->isnull;
1122 276024 : *resultVal = MakeExpandedObjectReadOnly(result,
1123 : fcinfo->isnull,
1124 : peragg->resulttypeLen);
1125 : }
1126 276024 : aggstate->curperagg = NULL;
1127 : }
1128 : else
1129 : {
1130 776660 : *resultVal =
1131 776660 : MakeExpandedObjectReadOnly(pergroupstate->transValue,
1132 : pergroupstate->transValueIsNull,
1133 : pertrans->transtypeLen);
1134 776660 : *resultIsNull = pergroupstate->transValueIsNull;
1135 : }
1136 :
1137 1052684 : MemoryContextSwitchTo(oldContext);
1138 1052684 : }
1139 :
1140 : /*
1141 : * Compute the output value of one partial aggregate.
1142 : *
1143 : * The serialization function will be run, and the result delivered, in the
1144 : * output-tuple context; caller's CurrentMemoryContext does not matter.
1145 : */
1146 : static void
1147 11726 : finalize_partialaggregate(AggState *aggstate,
1148 : AggStatePerAgg peragg,
1149 : AggStatePerGroup pergroupstate,
1150 : Datum *resultVal, bool *resultIsNull)
1151 : {
1152 11726 : AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
1153 : MemoryContext oldContext;
1154 :
1155 11726 : oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
1156 :
1157 : /*
1158 : * serialfn_oid will be set if we must serialize the transvalue before
1159 : * returning it
1160 : */
1161 11726 : if (OidIsValid(pertrans->serialfn_oid))
1162 : {
1163 : /* Don't call a strict serialization function with NULL input. */
1164 506 : if (pertrans->serialfn.fn_strict && pergroupstate->transValueIsNull)
1165 : {
1166 128 : *resultVal = (Datum) 0;
1167 128 : *resultIsNull = true;
1168 : }
1169 : else
1170 : {
1171 378 : FunctionCallInfo fcinfo = pertrans->serialfn_fcinfo;
1172 : Datum result;
1173 :
1174 378 : fcinfo->args[0].value =
1175 378 : MakeExpandedObjectReadOnly(pergroupstate->transValue,
1176 : pergroupstate->transValueIsNull,
1177 : pertrans->transtypeLen);
1178 378 : fcinfo->args[0].isnull = pergroupstate->transValueIsNull;
1179 378 : fcinfo->isnull = false;
1180 :
1181 378 : result = FunctionCallInvoke(fcinfo);
1182 378 : *resultIsNull = fcinfo->isnull;
1183 378 : *resultVal = MakeExpandedObjectReadOnly(result,
1184 : fcinfo->isnull,
1185 : peragg->resulttypeLen);
1186 : }
1187 : }
1188 : else
1189 : {
1190 11220 : *resultVal =
1191 11220 : MakeExpandedObjectReadOnly(pergroupstate->transValue,
1192 : pergroupstate->transValueIsNull,
1193 : pertrans->transtypeLen);
1194 11220 : *resultIsNull = pergroupstate->transValueIsNull;
1195 : }
1196 :
1197 11726 : MemoryContextSwitchTo(oldContext);
1198 11726 : }
1199 :
1200 : /*
1201 : * Extract the attributes that make up the grouping key into the
1202 : * hashslot. This is necessary to compute the hash or perform a lookup.
1203 : */
1204 : static inline void
1205 6322820 : prepare_hash_slot(AggStatePerHash perhash,
1206 : TupleTableSlot *inputslot,
1207 : TupleTableSlot *hashslot)
1208 : {
1209 : int i;
1210 :
1211 : /* transfer just the needed columns into hashslot */
1212 6322820 : slot_getsomeattrs(inputslot, perhash->largestGrpColIdx);
1213 6322820 : ExecClearTuple(hashslot);
1214 :
1215 15594396 : for (i = 0; i < perhash->numhashGrpCols; i++)
1216 : {
1217 9271576 : int varNumber = perhash->hashGrpColIdxInput[i] - 1;
1218 :
1219 9271576 : hashslot->tts_values[i] = inputslot->tts_values[varNumber];
1220 9271576 : hashslot->tts_isnull[i] = inputslot->tts_isnull[varNumber];
1221 : }
1222 6322820 : ExecStoreVirtualTuple(hashslot);
1223 6322820 : }
1224 :
1225 : /*
1226 : * Prepare to finalize and project based on the specified representative tuple
1227 : * slot and grouping set.
1228 : *
1229 : * In the specified tuple slot, force to null all attributes that should be
1230 : * read as null in the context of the current grouping set. Also stash the
1231 : * current group bitmap where GroupingExpr can get at it.
1232 : *
1233 : * This relies on three conditions:
1234 : *
1235 : * 1) Nothing is ever going to try and extract the whole tuple from this slot,
1236 : * only reference it in evaluations, which will only access individual
1237 : * attributes.
1238 : *
1239 : * 2) No system columns are going to need to be nulled. (If a system column is
1240 : * referenced in a group clause, it is actually projected in the outer plan
1241 : * tlist.)
1242 : *
1243 : * 3) Within a given phase, we never need to recover the value of an attribute
1244 : * once it has been set to null.
1245 : *
1246 : * Poking into the slot this way is a bit ugly, but the consensus is that the
1247 : * alternative was worse.
1248 : */
1249 : static void
1250 765994 : prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet)
1251 : {
1252 765994 : if (aggstate->phase->grouped_cols)
1253 : {
1254 483414 : Bitmapset *grouped_cols = aggstate->phase->grouped_cols[currentSet];
1255 :
1256 483414 : aggstate->grouped_cols = grouped_cols;
1257 :
1258 483414 : if (TTS_EMPTY(slot))
1259 : {
1260 : /*
1261 : * Force all values to be NULL if working on an empty input tuple
1262 : * (i.e. an empty grouping set for which no input rows were
1263 : * supplied).
1264 : */
1265 48 : ExecStoreAllNullTuple(slot);
1266 : }
1267 483366 : else if (aggstate->all_grouped_cols)
1268 : {
1269 : ListCell *lc;
1270 :
1271 : /* all_grouped_cols is arranged in desc order */
1272 483318 : slot_getsomeattrs(slot, linitial_int(aggstate->all_grouped_cols));
1273 :
1274 1373718 : foreach(lc, aggstate->all_grouped_cols)
1275 : {
1276 890400 : int attnum = lfirst_int(lc);
1277 :
1278 890400 : if (!bms_is_member(attnum, grouped_cols))
1279 57538 : slot->tts_isnull[attnum - 1] = true;
1280 : }
1281 : }
1282 : }
1283 765994 : }
1284 :
1285 : /*
1286 : * Compute the final value of all aggregates for one group.
1287 : *
1288 : * This function handles only one grouping set at a time, which the caller must
1289 : * have selected. It's also the caller's responsibility to adjust the supplied
1290 : * pergroup parameter to point to the current set's transvalues.
1291 : *
1292 : * Results are stored in the output econtext aggvalues/aggnulls.
1293 : */
1294 : static void
1295 765994 : finalize_aggregates(AggState *aggstate,
1296 : AggStatePerAgg peraggs,
1297 : AggStatePerGroup pergroup)
1298 : {
1299 765994 : ExprContext *econtext = aggstate->ss.ps.ps_ExprContext;
1300 765994 : Datum *aggvalues = econtext->ecxt_aggvalues;
1301 765994 : bool *aggnulls = econtext->ecxt_aggnulls;
1302 : int aggno;
1303 :
1304 : /*
1305 : * If there were any DISTINCT and/or ORDER BY aggregates, sort their
1306 : * inputs and run the transition functions.
1307 : */
1308 1830158 : for (int transno = 0; transno < aggstate->numtrans; transno++)
1309 : {
1310 1064164 : AggStatePerTrans pertrans = &aggstate->pertrans[transno];
1311 : AggStatePerGroup pergroupstate;
1312 :
1313 1064164 : pergroupstate = &pergroup[transno];
1314 :
1315 1064164 : if (pertrans->aggsortrequired)
1316 : {
1317 : Assert(aggstate->aggstrategy != AGG_HASHED &&
1318 : aggstate->aggstrategy != AGG_MIXED);
1319 :
1320 53828 : if (pertrans->numInputs == 1)
1321 53756 : process_ordered_aggregate_single(aggstate,
1322 : pertrans,
1323 : pergroupstate);
1324 : else
1325 72 : process_ordered_aggregate_multi(aggstate,
1326 : pertrans,
1327 : pergroupstate);
1328 : }
1329 1010336 : else if (pertrans->numDistinctCols > 0 && pertrans->haslast)
1330 : {
1331 18360 : pertrans->haslast = false;
1332 :
1333 18360 : if (pertrans->numDistinctCols == 1)
1334 : {
1335 18264 : if (!pertrans->inputtypeByVal && !pertrans->lastisnull)
1336 262 : pfree(DatumGetPointer(pertrans->lastdatum));
1337 :
1338 18264 : pertrans->lastisnull = false;
1339 18264 : pertrans->lastdatum = (Datum) 0;
1340 : }
1341 : else
1342 96 : ExecClearTuple(pertrans->uniqslot);
1343 : }
1344 : }
1345 :
1346 : /*
1347 : * Run the final functions.
1348 : */
1349 1830404 : for (aggno = 0; aggno < aggstate->numaggs; aggno++)
1350 : {
1351 1064422 : AggStatePerAgg peragg = &peraggs[aggno];
1352 1064422 : int transno = peragg->transno;
1353 : AggStatePerGroup pergroupstate;
1354 :
1355 1064422 : pergroupstate = &pergroup[transno];
1356 :
1357 1064422 : if (DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit))
1358 11726 : finalize_partialaggregate(aggstate, peragg, pergroupstate,
1359 11726 : &aggvalues[aggno], &aggnulls[aggno]);
1360 : else
1361 1052696 : finalize_aggregate(aggstate, peragg, pergroupstate,
1362 1052696 : &aggvalues[aggno], &aggnulls[aggno]);
1363 : }
1364 765982 : }
1365 :
1366 : /*
1367 : * Project the result of a group (whose aggs have already been calculated by
1368 : * finalize_aggregates). Returns the result slot, or NULL if no row is
1369 : * projected (suppressed by qual).
1370 : */
1371 : static TupleTableSlot *
1372 765982 : project_aggregates(AggState *aggstate)
1373 : {
1374 765982 : ExprContext *econtext = aggstate->ss.ps.ps_ExprContext;
1375 :
1376 : /*
1377 : * Check the qual (HAVING clause); if the group does not match, ignore it.
1378 : */
1379 765982 : if (ExecQual(aggstate->ss.ps.qual, econtext))
1380 : {
1381 : /*
1382 : * Form and return projection tuple using the aggregate results and
1383 : * the representative input tuple.
1384 : */
1385 659588 : return ExecProject(aggstate->ss.ps.ps_ProjInfo);
1386 : }
1387 : else
1388 106394 : InstrCountFiltered1(aggstate, 1);
1389 :
1390 106394 : return NULL;
1391 : }
1392 :
1393 : /*
1394 : * Find input-tuple columns that are needed, dividing them into
1395 : * aggregated and unaggregated sets.
1396 : */
1397 : static void
1398 5666 : find_cols(AggState *aggstate, Bitmapset **aggregated, Bitmapset **unaggregated)
1399 : {
1400 5666 : Agg *agg = (Agg *) aggstate->ss.ps.plan;
1401 : FindColsContext context;
1402 :
1403 5666 : context.is_aggref = false;
1404 5666 : context.aggregated = NULL;
1405 5666 : context.unaggregated = NULL;
1406 :
1407 : /* Examine tlist and quals */
1408 5666 : (void) find_cols_walker((Node *) agg->plan.targetlist, &context);
1409 5666 : (void) find_cols_walker((Node *) agg->plan.qual, &context);
1410 :
1411 : /* In some cases, grouping columns will not appear in the tlist */
1412 14360 : for (int i = 0; i < agg->numCols; i++)
1413 8694 : context.unaggregated = bms_add_member(context.unaggregated,
1414 8694 : agg->grpColIdx[i]);
1415 :
1416 5666 : *aggregated = context.aggregated;
1417 5666 : *unaggregated = context.unaggregated;
1418 5666 : }
1419 :
1420 : static bool
1421 66958 : find_cols_walker(Node *node, FindColsContext *context)
1422 : {
1423 66958 : if (node == NULL)
1424 11948 : return false;
1425 55010 : if (IsA(node, Var))
1426 : {
1427 14652 : Var *var = (Var *) node;
1428 :
1429 : /* setrefs.c should have set the varno to OUTER_VAR */
1430 : Assert(var->varno == OUTER_VAR);
1431 : Assert(var->varlevelsup == 0);
1432 14652 : if (context->is_aggref)
1433 4580 : context->aggregated = bms_add_member(context->aggregated,
1434 4580 : var->varattno);
1435 : else
1436 10072 : context->unaggregated = bms_add_member(context->unaggregated,
1437 10072 : var->varattno);
1438 14652 : return false;
1439 : }
1440 40358 : if (IsA(node, Aggref))
1441 : {
1442 : Assert(!context->is_aggref);
1443 6758 : context->is_aggref = true;
1444 6758 : expression_tree_walker(node, find_cols_walker, context);
1445 6758 : context->is_aggref = false;
1446 6758 : return false;
1447 : }
1448 33600 : return expression_tree_walker(node, find_cols_walker, context);
1449 : }
1450 :
1451 : /*
1452 : * (Re-)initialize the hash table(s) to empty.
1453 : *
1454 : * To implement hashed aggregation, we need a hashtable that stores a
1455 : * representative tuple and an array of AggStatePerGroup structs for each
1456 : * distinct set of GROUP BY column values. We compute the hash key from the
1457 : * GROUP BY columns. The per-group data is allocated in initialize_hash_entry(),
1458 : * for each entry.
1459 : *
1460 : * We have a separate hashtable and associated perhash data structure for each
1461 : * grouping set for which we're doing hashing.
1462 : *
1463 : * The contents of the hash tables always live in the hashcontext's per-tuple
1464 : * memory context (there is only one of these for all tables together, since
1465 : * they are all reset at the same time).
1466 : */
1467 : static void
1468 15370 : build_hash_tables(AggState *aggstate)
1469 : {
1470 : int setno;
1471 :
1472 31060 : for (setno = 0; setno < aggstate->num_hashes; ++setno)
1473 : {
1474 15690 : AggStatePerHash perhash = &aggstate->perhash[setno];
1475 : long nbuckets;
1476 : Size memory;
1477 :
1478 15690 : if (perhash->hashtable != NULL)
1479 : {
1480 11094 : ResetTupleHashTable(perhash->hashtable);
1481 11094 : continue;
1482 : }
1483 :
1484 : Assert(perhash->aggnode->numGroups > 0);
1485 :
1486 4596 : memory = aggstate->hash_mem_limit / aggstate->num_hashes;
1487 :
1488 : /* choose reasonable number of buckets per hashtable */
1489 4596 : nbuckets = hash_choose_num_buckets(aggstate->hashentrysize,
1490 4596 : perhash->aggnode->numGroups,
1491 : memory);
1492 :
1493 : #ifdef USE_INJECTION_POINTS
1494 4596 : if (IS_INJECTION_POINT_ATTACHED("hash-aggregate-oversize-table"))
1495 : {
1496 0 : nbuckets = memory / sizeof(TupleHashEntryData);
1497 0 : INJECTION_POINT_CACHED("hash-aggregate-oversize-table");
1498 : }
1499 : #endif
1500 :
1501 4596 : build_hash_table(aggstate, setno, nbuckets);
1502 : }
1503 :
1504 15370 : aggstate->hash_ngroups_current = 0;
1505 15370 : }
1506 :
1507 : /*
1508 : * Build a single hashtable for this grouping set.
1509 : */
1510 : static void
1511 4596 : build_hash_table(AggState *aggstate, int setno, long nbuckets)
1512 : {
1513 4596 : AggStatePerHash perhash = &aggstate->perhash[setno];
1514 4596 : MemoryContext metacxt = aggstate->hash_metacxt;
1515 4596 : MemoryContext hashcxt = aggstate->hashcontext->ecxt_per_tuple_memory;
1516 4596 : MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory;
1517 : Size additionalsize;
1518 :
1519 : Assert(aggstate->aggstrategy == AGG_HASHED ||
1520 : aggstate->aggstrategy == AGG_MIXED);
1521 :
1522 : /*
1523 : * Used to make sure initial hash table allocation does not exceed
1524 : * hash_mem. Note that the estimate does not include space for
1525 : * pass-by-reference transition data values, nor for the representative
1526 : * tuple of each group.
1527 : */
1528 4596 : additionalsize = aggstate->numtrans * sizeof(AggStatePerGroupData);
1529 :
1530 9192 : perhash->hashtable = BuildTupleHashTable(&aggstate->ss.ps,
1531 4596 : perhash->hashslot->tts_tupleDescriptor,
1532 4596 : perhash->hashslot->tts_ops,
1533 : perhash->numCols,
1534 : perhash->hashGrpColIdxHash,
1535 4596 : perhash->eqfuncoids,
1536 : perhash->hashfunctions,
1537 4596 : perhash->aggnode->grpCollations,
1538 : nbuckets,
1539 : additionalsize,
1540 : metacxt,
1541 : hashcxt,
1542 : tmpcxt,
1543 4596 : DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
1544 4596 : }
1545 :
1546 : /*
1547 : * Compute columns that actually need to be stored in hashtable entries. The
1548 : * incoming tuples from the child plan node will contain grouping columns,
1549 : * other columns referenced in our targetlist and qual, columns used to
1550 : * compute the aggregate functions, and perhaps just junk columns we don't use
1551 : * at all. Only columns of the first two types need to be stored in the
1552 : * hashtable, and getting rid of the others can make the table entries
1553 : * significantly smaller. The hashtable only contains the relevant columns,
1554 : * and is packed/unpacked in lookup_hash_entries() / agg_retrieve_hash_table()
1555 : * into the format of the normal input descriptor.
1556 : *
1557 : * Additional columns, in addition to the columns grouped by, come from two
1558 : * sources: Firstly functionally dependent columns that we don't need to group
1559 : * by themselves, and secondly ctids for row-marks.
1560 : *
1561 : * To eliminate duplicates, we build a bitmapset of the needed columns, and
1562 : * then build an array of the columns included in the hashtable. We might
1563 : * still have duplicates if the passed-in grpColIdx has them, which can happen
1564 : * in edge cases from semijoins/distinct; these can't always be removed,
1565 : * because it's not certain that the duplicate cols will be using the same
1566 : * hash function.
1567 : *
1568 : * Note that the array is preserved over ExecReScanAgg, so we allocate it in
1569 : * the per-query context (unlike the hash table itself).
1570 : */
1571 : static void
1572 5666 : find_hash_columns(AggState *aggstate)
1573 : {
1574 : Bitmapset *base_colnos;
1575 : Bitmapset *aggregated_colnos;
1576 5666 : TupleDesc scanDesc = aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor;
1577 5666 : List *outerTlist = outerPlanState(aggstate)->plan->targetlist;
1578 5666 : int numHashes = aggstate->num_hashes;
1579 5666 : EState *estate = aggstate->ss.ps.state;
1580 : int j;
1581 :
1582 : /* Find Vars that will be needed in tlist and qual */
1583 5666 : find_cols(aggstate, &aggregated_colnos, &base_colnos);
1584 5666 : aggstate->colnos_needed = bms_union(base_colnos, aggregated_colnos);
1585 5666 : aggstate->max_colno_needed = 0;
1586 5666 : aggstate->all_cols_needed = true;
1587 :
1588 24300 : for (int i = 0; i < scanDesc->natts; i++)
1589 : {
1590 18634 : int colno = i + 1;
1591 :
1592 18634 : if (bms_is_member(colno, aggstate->colnos_needed))
1593 12150 : aggstate->max_colno_needed = colno;
1594 : else
1595 6484 : aggstate->all_cols_needed = false;
1596 : }
1597 :
1598 11798 : for (j = 0; j < numHashes; ++j)
1599 : {
1600 6132 : AggStatePerHash perhash = &aggstate->perhash[j];
1601 6132 : Bitmapset *colnos = bms_copy(base_colnos);
1602 6132 : AttrNumber *grpColIdx = perhash->aggnode->grpColIdx;
1603 6132 : List *hashTlist = NIL;
1604 : TupleDesc hashDesc;
1605 : int maxCols;
1606 : int i;
1607 :
1608 6132 : perhash->largestGrpColIdx = 0;
1609 :
1610 : /*
1611 : * If we're doing grouping sets, then some Vars might be referenced in
1612 : * tlist/qual for the benefit of other grouping sets, but not needed
1613 : * when hashing; i.e. prepare_projection_slot will null them out, so
1614 : * there'd be no point storing them. Use prepare_projection_slot's
1615 : * logic to determine which.
1616 : */
1617 6132 : if (aggstate->phases[0].grouped_cols)
1618 : {
1619 6132 : Bitmapset *grouped_cols = aggstate->phases[0].grouped_cols[j];
1620 : ListCell *lc;
1621 :
1622 16498 : foreach(lc, aggstate->all_grouped_cols)
1623 : {
1624 10366 : int attnum = lfirst_int(lc);
1625 :
1626 10366 : if (!bms_is_member(attnum, grouped_cols))
1627 1104 : colnos = bms_del_member(colnos, attnum);
1628 : }
1629 : }
1630 :
1631 : /*
1632 : * Compute maximum number of input columns accounting for possible
1633 : * duplications in the grpColIdx array, which can happen in some edge
1634 : * cases where HashAggregate was generated as part of a semijoin or a
1635 : * DISTINCT.
1636 : */
1637 6132 : maxCols = bms_num_members(colnos) + perhash->numCols;
1638 :
1639 6132 : perhash->hashGrpColIdxInput =
1640 6132 : palloc(maxCols * sizeof(AttrNumber));
1641 6132 : perhash->hashGrpColIdxHash =
1642 6132 : palloc(perhash->numCols * sizeof(AttrNumber));
1643 :
1644 : /* Add all the grouping columns to colnos */
1645 15400 : for (i = 0; i < perhash->numCols; i++)
1646 9268 : colnos = bms_add_member(colnos, grpColIdx[i]);
1647 :
1648 : /*
1649 : * First build mapping for columns directly hashed. These are the
1650 : * first, because they'll be accessed when computing hash values and
1651 : * comparing tuples for exact matches. We also build simple mapping
1652 : * for execGrouping, so it knows where to find the to-be-hashed /
1653 : * compared columns in the input.
1654 : */
1655 15400 : for (i = 0; i < perhash->numCols; i++)
1656 : {
1657 9268 : perhash->hashGrpColIdxInput[i] = grpColIdx[i];
1658 9268 : perhash->hashGrpColIdxHash[i] = i + 1;
1659 9268 : perhash->numhashGrpCols++;
1660 : /* delete already mapped columns */
1661 9268 : colnos = bms_del_member(colnos, grpColIdx[i]);
1662 : }
1663 :
1664 : /* and add the remaining columns */
1665 6132 : i = -1;
1666 6848 : while ((i = bms_next_member(colnos, i)) >= 0)
1667 : {
1668 716 : perhash->hashGrpColIdxInput[perhash->numhashGrpCols] = i;
1669 716 : perhash->numhashGrpCols++;
1670 : }
1671 :
1672 : /* and build a tuple descriptor for the hashtable */
1673 16116 : for (i = 0; i < perhash->numhashGrpCols; i++)
1674 : {
1675 9984 : int varNumber = perhash->hashGrpColIdxInput[i] - 1;
1676 :
1677 9984 : hashTlist = lappend(hashTlist, list_nth(outerTlist, varNumber));
1678 9984 : perhash->largestGrpColIdx =
1679 9984 : Max(varNumber + 1, perhash->largestGrpColIdx);
1680 : }
1681 :
1682 6132 : hashDesc = ExecTypeFromTL(hashTlist);
1683 :
1684 6132 : execTuplesHashPrepare(perhash->numCols,
1685 6132 : perhash->aggnode->grpOperators,
1686 : &perhash->eqfuncoids,
1687 : &perhash->hashfunctions);
1688 6132 : perhash->hashslot =
1689 6132 : ExecAllocTableSlot(&estate->es_tupleTable, hashDesc,
1690 : &TTSOpsMinimalTuple);
1691 :
1692 6132 : list_free(hashTlist);
1693 6132 : bms_free(colnos);
1694 : }
1695 :
1696 5666 : bms_free(base_colnos);
1697 5666 : }
1698 :
1699 : /*
1700 : * Estimate per-hash-table-entry overhead.
1701 : */
1702 : Size
1703 26606 : hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace)
1704 : {
1705 : Size tupleChunkSize;
1706 : Size pergroupChunkSize;
1707 : Size transitionChunkSize;
1708 26606 : Size tupleSize = (MAXALIGN(SizeofMinimalTupleHeader) +
1709 : tupleWidth);
1710 26606 : Size pergroupSize = numTrans * sizeof(AggStatePerGroupData);
1711 :
1712 26606 : tupleChunkSize = CHUNKHDRSZ + tupleSize;
1713 :
1714 26606 : if (pergroupSize > 0)
1715 14098 : pergroupChunkSize = CHUNKHDRSZ + pergroupSize;
1716 : else
1717 12508 : pergroupChunkSize = 0;
1718 :
1719 26606 : if (transitionSpace > 0)
1720 4750 : transitionChunkSize = CHUNKHDRSZ + transitionSpace;
1721 : else
1722 21856 : transitionChunkSize = 0;
1723 :
1724 : return
1725 : sizeof(TupleHashEntryData) +
1726 26606 : tupleChunkSize +
1727 26606 : pergroupChunkSize +
1728 : transitionChunkSize;
1729 : }
1730 :
1731 : /*
1732 : * hashagg_recompile_expressions()
1733 : *
1734 : * Identifies the right phase, compiles the right expression given the
1735 : * arguments, and then sets phase->evalfunc to that expression.
1736 : *
1737 : * Different versions of the compiled expression are needed depending on
1738 : * whether hash aggregation has spilled or not, and whether it's reading from
1739 : * the outer plan or a tape. Before spilling to disk, the expression reads
1740 : * from the outer plan and does not need to perform a NULL check. After
1741 : * HashAgg begins to spill, new groups will not be created in the hash table,
1742 : * and the AggStatePerGroup array may be NULL; therefore we need to add a null
1743 : * pointer check to the expression. Then, when reading spilled data from a
1744 : * tape, we change the outer slot type to be a fixed minimal tuple slot.
1745 : *
1746 : * It would be wasteful to recompile every time, so cache the compiled
1747 : * expressions in the AggStatePerPhase, and reuse when appropriate.
1748 : */
1749 : static void
1750 64406 : hashagg_recompile_expressions(AggState *aggstate, bool minslot, bool nullcheck)
1751 : {
1752 : AggStatePerPhase phase;
1753 64406 : int i = minslot ? 1 : 0;
1754 64406 : int j = nullcheck ? 1 : 0;
1755 :
1756 : Assert(aggstate->aggstrategy == AGG_HASHED ||
1757 : aggstate->aggstrategy == AGG_MIXED);
1758 :
1759 64406 : if (aggstate->aggstrategy == AGG_HASHED)
1760 11834 : phase = &aggstate->phases[0];
1761 : else /* AGG_MIXED */
1762 52572 : phase = &aggstate->phases[1];
1763 :
1764 64406 : if (phase->evaltrans_cache[i][j] == NULL)
1765 : {
1766 76 : const TupleTableSlotOps *outerops = aggstate->ss.ps.outerops;
1767 76 : bool outerfixed = aggstate->ss.ps.outeropsfixed;
1768 76 : bool dohash = true;
1769 76 : bool dosort = false;
1770 :
1771 : /*
1772 : * If minslot is true, that means we are processing a spilled batch
1773 : * (inside agg_refill_hash_table()), and we must not advance the
1774 : * sorted grouping sets.
1775 : */
1776 76 : if (aggstate->aggstrategy == AGG_MIXED && !minslot)
1777 12 : dosort = true;
1778 :
1779 : /* temporarily change the outerops while compiling the expression */
1780 76 : if (minslot)
1781 : {
1782 38 : aggstate->ss.ps.outerops = &TTSOpsMinimalTuple;
1783 38 : aggstate->ss.ps.outeropsfixed = true;
1784 : }
1785 :
1786 76 : phase->evaltrans_cache[i][j] = ExecBuildAggTrans(aggstate, phase,
1787 : dosort, dohash,
1788 : nullcheck);
1789 :
1790 : /* change back */
1791 76 : aggstate->ss.ps.outerops = outerops;
1792 76 : aggstate->ss.ps.outeropsfixed = outerfixed;
1793 : }
1794 :
1795 64406 : phase->evaltrans = phase->evaltrans_cache[i][j];
1796 64406 : }
1797 :
1798 : /*
1799 : * Set limits that trigger spilling to avoid exceeding hash_mem. Consider the
1800 : * number of partitions we expect to create (if we do spill).
1801 : *
1802 : * There are two limits: a memory limit, and also an ngroups limit. The
1803 : * ngroups limit becomes important when we expect transition values to grow
1804 : * substantially larger than the initial value.
1805 : */
1806 : void
1807 51284 : hash_agg_set_limits(double hashentrysize, double input_groups, int used_bits,
1808 : Size *mem_limit, uint64 *ngroups_limit,
1809 : int *num_partitions)
1810 : {
1811 : int npartitions;
1812 : Size partition_mem;
1813 51284 : Size hash_mem_limit = get_hash_memory_limit();
1814 :
1815 : /* if not expected to spill, use all of hash_mem */
1816 51284 : if (input_groups * hashentrysize <= hash_mem_limit)
1817 : {
1818 48854 : if (num_partitions != NULL)
1819 24268 : *num_partitions = 0;
1820 48854 : *mem_limit = hash_mem_limit;
1821 48854 : *ngroups_limit = hash_mem_limit / hashentrysize;
1822 48854 : return;
1823 : }
1824 :
1825 : /*
1826 : * Calculate expected memory requirements for spilling, which is the size
1827 : * of the buffers needed for all the tapes that need to be open at once.
1828 : * Then, subtract that from the memory available for holding hash tables.
1829 : */
1830 2430 : npartitions = hash_choose_num_partitions(input_groups,
1831 : hashentrysize,
1832 : used_bits,
1833 : NULL);
1834 2430 : if (num_partitions != NULL)
1835 96 : *num_partitions = npartitions;
1836 :
1837 2430 : partition_mem =
1838 2430 : HASHAGG_READ_BUFFER_SIZE +
1839 : HASHAGG_WRITE_BUFFER_SIZE * npartitions;
1840 :
1841 : /*
1842 : * Don't set the limit below 3/4 of hash_mem. In that case, we are at the
1843 : * minimum number of partitions, so we aren't going to dramatically exceed
1844 : * work mem anyway.
1845 : */
1846 2430 : if (hash_mem_limit > 4 * partition_mem)
1847 0 : *mem_limit = hash_mem_limit - partition_mem;
1848 : else
1849 2430 : *mem_limit = hash_mem_limit * 0.75;
1850 :
1851 2430 : if (*mem_limit > hashentrysize)
1852 2430 : *ngroups_limit = *mem_limit / hashentrysize;
1853 : else
1854 0 : *ngroups_limit = 1;
1855 : }
1856 :
1857 : /*
1858 : * hash_agg_check_limits
1859 : *
1860 : * After adding a new group to the hash table, check whether we need to enter
1861 : * spill mode. Allocations may happen without adding new groups (for instance,
1862 : * if the transition state size grows), so this check is imperfect.
1863 : */
1864 : static void
1865 452170 : hash_agg_check_limits(AggState *aggstate)
1866 : {
1867 452170 : uint64 ngroups = aggstate->hash_ngroups_current;
1868 452170 : Size meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt,
1869 : true);
1870 452170 : Size hashkey_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory,
1871 : true);
1872 452170 : bool do_spill = false;
1873 :
1874 : #ifdef USE_INJECTION_POINTS
1875 452170 : if (ngroups >= 1000)
1876 : {
1877 65632 : if (IS_INJECTION_POINT_ATTACHED("hash-aggregate-spill-1000"))
1878 : {
1879 10 : do_spill = true;
1880 10 : INJECTION_POINT_CACHED("hash-aggregate-spill-1000");
1881 : }
1882 : }
1883 : #endif
1884 :
1885 : /*
1886 : * Don't spill unless there's at least one group in the hash table so we
1887 : * can be sure to make progress even in edge cases.
1888 : */
1889 452170 : if (aggstate->hash_ngroups_current > 0 &&
1890 452170 : (meta_mem + hashkey_mem > aggstate->hash_mem_limit ||
1891 425758 : ngroups > aggstate->hash_ngroups_limit))
1892 : {
1893 26442 : do_spill = true;
1894 : }
1895 :
1896 452170 : if (do_spill)
1897 26452 : hash_agg_enter_spill_mode(aggstate);
1898 452170 : }
1899 :
1900 : /*
1901 : * Enter "spill mode", meaning that no new groups are added to any of the hash
1902 : * tables. Tuples that would create a new group are instead spilled, and
1903 : * processed later.
1904 : */
1905 : static void
1906 26452 : hash_agg_enter_spill_mode(AggState *aggstate)
1907 : {
1908 26452 : INJECTION_POINT("hash-aggregate-enter-spill-mode");
1909 26452 : aggstate->hash_spill_mode = true;
1910 26452 : hashagg_recompile_expressions(aggstate, aggstate->table_filled, true);
1911 :
1912 26452 : if (!aggstate->hash_ever_spilled)
1913 : {
1914 : Assert(aggstate->hash_tapeset == NULL);
1915 : Assert(aggstate->hash_spills == NULL);
1916 :
1917 56 : aggstate->hash_ever_spilled = true;
1918 :
1919 56 : aggstate->hash_tapeset = LogicalTapeSetCreate(true, NULL, -1);
1920 :
1921 56 : aggstate->hash_spills = palloc(sizeof(HashAggSpill) * aggstate->num_hashes);
1922 :
1923 172 : for (int setno = 0; setno < aggstate->num_hashes; setno++)
1924 : {
1925 116 : AggStatePerHash perhash = &aggstate->perhash[setno];
1926 116 : HashAggSpill *spill = &aggstate->hash_spills[setno];
1927 :
1928 116 : hashagg_spill_init(spill, aggstate->hash_tapeset, 0,
1929 116 : perhash->aggnode->numGroups,
1930 : aggstate->hashentrysize);
1931 : }
1932 : }
1933 26452 : }
1934 :
1935 : /*
1936 : * Update metrics after filling the hash table.
1937 : *
1938 : * If reading from the outer plan, from_tape should be false; if reading from
1939 : * another tape, from_tape should be true.
1940 : */
1941 : static void
1942 42006 : hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
1943 : {
1944 : Size meta_mem;
1945 : Size hashkey_mem;
1946 : Size buffer_mem;
1947 : Size total_mem;
1948 :
1949 42006 : if (aggstate->aggstrategy != AGG_MIXED &&
1950 15600 : aggstate->aggstrategy != AGG_HASHED)
1951 0 : return;
1952 :
1953 : /* memory for the hash table itself */
1954 42006 : meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, true);
1955 :
1956 : /* memory for the group keys and transition states */
1957 42006 : hashkey_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, true);
1958 :
1959 : /* memory for read/write tape buffers, if spilled */
1960 42006 : buffer_mem = npartitions * HASHAGG_WRITE_BUFFER_SIZE;
1961 42006 : if (from_tape)
1962 26920 : buffer_mem += HASHAGG_READ_BUFFER_SIZE;
1963 :
1964 : /* update peak mem */
1965 42006 : total_mem = meta_mem + hashkey_mem + buffer_mem;
1966 42006 : if (total_mem > aggstate->hash_mem_peak)
1967 4096 : aggstate->hash_mem_peak = total_mem;
1968 :
1969 : /* update disk usage */
1970 42006 : if (aggstate->hash_tapeset != NULL)
1971 : {
1972 26976 : uint64 disk_used = LogicalTapeSetBlocks(aggstate->hash_tapeset) * (BLCKSZ / 1024);
1973 :
1974 26976 : if (aggstate->hash_disk_used < disk_used)
1975 46 : aggstate->hash_disk_used = disk_used;
1976 : }
1977 :
1978 : /* update hashentrysize estimate based on contents */
1979 42006 : if (aggstate->hash_ngroups_current > 0)
1980 : {
1981 41656 : aggstate->hashentrysize =
1982 41656 : sizeof(TupleHashEntryData) +
1983 41656 : (hashkey_mem / (double) aggstate->hash_ngroups_current);
1984 : }
1985 : }
1986 :
1987 : /*
1988 : * Choose a reasonable number of buckets for the initial hash table size.
1989 : */
1990 : static long
1991 4596 : hash_choose_num_buckets(double hashentrysize, long ngroups, Size memory)
1992 : {
1993 : long max_nbuckets;
1994 4596 : long nbuckets = ngroups;
1995 :
1996 4596 : max_nbuckets = memory / hashentrysize;
1997 :
1998 : /*
1999 : * Underestimating is better than overestimating. Too many buckets crowd
2000 : * out space for group keys and transition state values.
2001 : */
2002 4596 : max_nbuckets >>= 1;
2003 :
2004 4596 : if (nbuckets > max_nbuckets)
2005 72 : nbuckets = max_nbuckets;
2006 :
2007 4596 : return Max(nbuckets, 1);
2008 : }
2009 :
2010 : /*
2011 : * Determine the number of partitions to create when spilling, which will
2012 : * always be a power of two. If log2_npartitions is non-NULL, set
2013 : * *log2_npartitions to the log2() of the number of partitions.
2014 : */
2015 : static int
2016 15046 : hash_choose_num_partitions(double input_groups, double hashentrysize,
2017 : int used_bits, int *log2_npartitions)
2018 : {
2019 15046 : Size hash_mem_limit = get_hash_memory_limit();
2020 : double partition_limit;
2021 : double mem_wanted;
2022 : double dpartitions;
2023 : int npartitions;
2024 : int partition_bits;
2025 :
2026 : /*
2027 : * Avoid creating so many partitions that the memory requirements of the
2028 : * open partition files are greater than 1/4 of hash_mem.
2029 : */
2030 15046 : partition_limit =
2031 15046 : (hash_mem_limit * 0.25 - HASHAGG_READ_BUFFER_SIZE) /
2032 : HASHAGG_WRITE_BUFFER_SIZE;
2033 :
2034 15046 : mem_wanted = HASHAGG_PARTITION_FACTOR * input_groups * hashentrysize;
2035 :
2036 : /* make enough partitions so that each one is likely to fit in memory */
2037 15046 : dpartitions = 1 + (mem_wanted / hash_mem_limit);
2038 :
2039 15046 : if (dpartitions > partition_limit)
2040 15024 : dpartitions = partition_limit;
2041 :
2042 15046 : if (dpartitions < HASHAGG_MIN_PARTITIONS)
2043 15046 : dpartitions = HASHAGG_MIN_PARTITIONS;
2044 15046 : if (dpartitions > HASHAGG_MAX_PARTITIONS)
2045 0 : dpartitions = HASHAGG_MAX_PARTITIONS;
2046 :
2047 : /* HASHAGG_MAX_PARTITIONS limit makes this safe */
2048 15046 : npartitions = (int) dpartitions;
2049 :
2050 : /* ceil(log2(npartitions)) */
2051 15046 : partition_bits = my_log2(npartitions);
2052 :
2053 : /* make sure that we don't exhaust the hash bits */
2054 15046 : if (partition_bits + used_bits >= 32)
2055 0 : partition_bits = 32 - used_bits;
2056 :
2057 15046 : if (log2_npartitions != NULL)
2058 12616 : *log2_npartitions = partition_bits;
2059 :
2060 : /* number of partitions will be a power of two */
2061 15046 : npartitions = 1 << partition_bits;
2062 :
2063 15046 : return npartitions;
2064 : }
2065 :
2066 : /*
2067 : * Initialize a freshly-created TupleHashEntry.
2068 : */
2069 : static void
2070 452170 : initialize_hash_entry(AggState *aggstate, TupleHashTable hashtable,
2071 : TupleHashEntry entry)
2072 : {
2073 : AggStatePerGroup pergroup;
2074 : int transno;
2075 :
2076 452170 : aggstate->hash_ngroups_current++;
2077 452170 : hash_agg_check_limits(aggstate);
2078 :
2079 : /* no need to allocate or initialize per-group state */
2080 452170 : if (aggstate->numtrans == 0)
2081 200934 : return;
2082 :
2083 : pergroup = (AggStatePerGroup)
2084 251236 : MemoryContextAlloc(hashtable->tablecxt,
2085 251236 : sizeof(AggStatePerGroupData) * aggstate->numtrans);
2086 :
2087 251236 : entry->additional = pergroup;
2088 :
2089 : /*
2090 : * Initialize aggregates for new tuple group, lookup_hash_entries()
2091 : * already has selected the relevant grouping set.
2092 : */
2093 648038 : for (transno = 0; transno < aggstate->numtrans; transno++)
2094 : {
2095 396802 : AggStatePerTrans pertrans = &aggstate->pertrans[transno];
2096 396802 : AggStatePerGroup pergroupstate = &pergroup[transno];
2097 :
2098 396802 : initialize_aggregate(aggstate, pertrans, pergroupstate);
2099 : }
2100 : }
2101 :
2102 : /*
2103 : * Look up hash entries for the current tuple in all hashed grouping sets.
2104 : *
2105 : * Some entries may be left NULL if we are in "spill mode". The same tuple
2106 : * will belong to different groups for each grouping set, so may match a group
2107 : * already in memory for one set and match a group not in memory for another
2108 : * set. When in "spill mode", the tuple will be spilled for each grouping set
2109 : * where it doesn't match a group in memory.
2110 : *
2111 : * NB: It's possible to spill the same tuple for several different grouping
2112 : * sets. This may seem wasteful, but it's actually a trade-off: if we spill
2113 : * the tuple multiple times for multiple grouping sets, it can be partitioned
2114 : * for each grouping set, making the refilling of the hash table very
2115 : * efficient.
2116 : */
2117 : static void
2118 5510380 : lookup_hash_entries(AggState *aggstate)
2119 : {
2120 5510380 : AggStatePerGroup *pergroup = aggstate->hash_pergroup;
2121 5510380 : TupleTableSlot *outerslot = aggstate->tmpcontext->ecxt_outertuple;
2122 : int setno;
2123 :
2124 11155164 : for (setno = 0; setno < aggstate->num_hashes; setno++)
2125 : {
2126 5644784 : AggStatePerHash perhash = &aggstate->perhash[setno];
2127 5644784 : TupleHashTable hashtable = perhash->hashtable;
2128 5644784 : TupleTableSlot *hashslot = perhash->hashslot;
2129 : TupleHashEntry entry;
2130 : uint32 hash;
2131 5644784 : bool isnew = false;
2132 : bool *p_isnew;
2133 :
2134 : /* if hash table already spilled, don't create new entries */
2135 5644784 : p_isnew = aggstate->hash_spill_mode ? NULL : &isnew;
2136 :
2137 5644784 : select_current_set(aggstate, setno, true);
2138 5644784 : prepare_hash_slot(perhash,
2139 : outerslot,
2140 : hashslot);
2141 :
2142 5644784 : entry = LookupTupleHashEntry(hashtable, hashslot,
2143 : p_isnew, &hash);
2144 :
2145 5644784 : if (entry != NULL)
2146 : {
2147 5413492 : if (isnew)
2148 348870 : initialize_hash_entry(aggstate, hashtable, entry);
2149 5413492 : pergroup[setno] = entry->additional;
2150 : }
2151 : else
2152 : {
2153 231292 : HashAggSpill *spill = &aggstate->hash_spills[setno];
2154 231292 : TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple;
2155 :
2156 231292 : if (spill->partitions == NULL)
2157 0 : hashagg_spill_init(spill, aggstate->hash_tapeset, 0,
2158 0 : perhash->aggnode->numGroups,
2159 : aggstate->hashentrysize);
2160 :
2161 231292 : hashagg_spill_tuple(aggstate, spill, slot, hash);
2162 231292 : pergroup[setno] = NULL;
2163 : }
2164 : }
2165 5510380 : }
2166 :
2167 : /*
2168 : * ExecAgg -
2169 : *
2170 : * ExecAgg receives tuples from its outer subplan and aggregates over
2171 : * the appropriate attribute for each aggregate function use (Aggref
2172 : * node) appearing in the targetlist or qual of the node. The number
2173 : * of tuples to aggregate over depends on whether grouped or plain
2174 : * aggregation is selected. In grouped aggregation, we produce a result
2175 : * row for each group; in plain aggregation there's a single result row
2176 : * for the whole query. In either case, the value of each aggregate is
2177 : * stored in the expression context to be used when ExecProject evaluates
2178 : * the result tuple.
2179 : */
2180 : static TupleTableSlot *
2181 737286 : ExecAgg(PlanState *pstate)
2182 : {
2183 737286 : AggState *node = castNode(AggState, pstate);
2184 737286 : TupleTableSlot *result = NULL;
2185 :
2186 737286 : CHECK_FOR_INTERRUPTS();
2187 :
2188 737286 : if (!node->agg_done)
2189 : {
2190 : /* Dispatch based on strategy */
2191 676366 : switch (node->phase->aggstrategy)
2192 : {
2193 406852 : case AGG_HASHED:
2194 406852 : if (!node->table_filled)
2195 14942 : agg_fill_hash_table(node);
2196 : /* FALLTHROUGH */
2197 : case AGG_MIXED:
2198 434214 : result = agg_retrieve_hash_table(node);
2199 434214 : break;
2200 242152 : case AGG_PLAIN:
2201 : case AGG_SORTED:
2202 242152 : result = agg_retrieve_direct(node);
2203 242032 : break;
2204 : }
2205 :
2206 676246 : if (!TupIsNull(result))
2207 659576 : return result;
2208 : }
2209 :
2210 77590 : return NULL;
2211 : }
2212 :
2213 : /*
2214 : * ExecAgg for non-hashed case
2215 : */
2216 : static TupleTableSlot *
2217 242152 : agg_retrieve_direct(AggState *aggstate)
2218 : {
2219 242152 : Agg *node = aggstate->phase->aggnode;
2220 : ExprContext *econtext;
2221 : ExprContext *tmpcontext;
2222 : AggStatePerAgg peragg;
2223 : AggStatePerGroup *pergroups;
2224 : TupleTableSlot *outerslot;
2225 : TupleTableSlot *firstSlot;
2226 : TupleTableSlot *result;
2227 242152 : bool hasGroupingSets = aggstate->phase->numsets > 0;
2228 242152 : int numGroupingSets = Max(aggstate->phase->numsets, 1);
2229 : int currentSet;
2230 : int nextSetSize;
2231 : int numReset;
2232 : int i;
2233 :
2234 : /*
2235 : * get state info from node
2236 : *
2237 : * econtext is the per-output-tuple expression context
2238 : *
2239 : * tmpcontext is the per-input-tuple expression context
2240 : */
2241 242152 : econtext = aggstate->ss.ps.ps_ExprContext;
2242 242152 : tmpcontext = aggstate->tmpcontext;
2243 :
2244 242152 : peragg = aggstate->peragg;
2245 242152 : pergroups = aggstate->pergroups;
2246 242152 : firstSlot = aggstate->ss.ss_ScanTupleSlot;
2247 :
2248 : /*
2249 : * We loop retrieving groups until we find one matching
2250 : * aggstate->ss.ps.qual
2251 : *
2252 : * For grouping sets, we have the invariant that aggstate->projected_set
2253 : * is either -1 (initial call) or the index (starting from 0) in
2254 : * gset_lengths for the group we just completed (either by projecting a
2255 : * row or by discarding it in the qual).
2256 : */
2257 312932 : while (!aggstate->agg_done)
2258 : {
2259 : /*
2260 : * Clear the per-output-tuple context for each group, as well as
2261 : * aggcontext (which contains any pass-by-ref transvalues of the old
2262 : * group). Some aggregate functions store working state in child
2263 : * contexts; those now get reset automatically without us needing to
2264 : * do anything special.
2265 : *
2266 : * We use ReScanExprContext not just ResetExprContext because we want
2267 : * any registered shutdown callbacks to be called. That allows
2268 : * aggregate functions to ensure they've cleaned up any non-memory
2269 : * resources.
2270 : */
2271 312726 : ReScanExprContext(econtext);
2272 :
2273 : /*
2274 : * Determine how many grouping sets need to be reset at this boundary.
2275 : */
2276 312726 : if (aggstate->projected_set >= 0 &&
2277 246102 : aggstate->projected_set < numGroupingSets)
2278 246096 : numReset = aggstate->projected_set + 1;
2279 : else
2280 66630 : numReset = numGroupingSets;
2281 :
2282 : /*
2283 : * numReset can change on a phase boundary, but that's OK; we want to
2284 : * reset the contexts used in _this_ phase, and later, after possibly
2285 : * changing phase, initialize the right number of aggregates for the
2286 : * _new_ phase.
2287 : */
2288 :
2289 647706 : for (i = 0; i < numReset; i++)
2290 : {
2291 334980 : ReScanExprContext(aggstate->aggcontexts[i]);
2292 : }
2293 :
2294 : /*
2295 : * Check if input is complete and there are no more groups to project
2296 : * in this phase; move to next phase or mark as done.
2297 : */
2298 312726 : if (aggstate->input_done == true &&
2299 1554 : aggstate->projected_set >= (numGroupingSets - 1))
2300 : {
2301 744 : if (aggstate->current_phase < aggstate->numphases - 1)
2302 : {
2303 186 : initialize_phase(aggstate, aggstate->current_phase + 1);
2304 186 : aggstate->input_done = false;
2305 186 : aggstate->projected_set = -1;
2306 186 : numGroupingSets = Max(aggstate->phase->numsets, 1);
2307 186 : node = aggstate->phase->aggnode;
2308 186 : numReset = numGroupingSets;
2309 : }
2310 558 : else if (aggstate->aggstrategy == AGG_MIXED)
2311 : {
2312 : /*
2313 : * Mixed mode; we've output all the grouped stuff and have
2314 : * full hashtables, so switch to outputting those.
2315 : */
2316 156 : initialize_phase(aggstate, 0);
2317 156 : aggstate->table_filled = true;
2318 156 : ResetTupleHashIterator(aggstate->perhash[0].hashtable,
2319 : &aggstate->perhash[0].hashiter);
2320 156 : select_current_set(aggstate, 0, true);
2321 156 : return agg_retrieve_hash_table(aggstate);
2322 : }
2323 : else
2324 : {
2325 402 : aggstate->agg_done = true;
2326 402 : break;
2327 : }
2328 : }
2329 :
2330 : /*
2331 : * Get the number of columns in the next grouping set after the last
2332 : * projected one (if any). This is the number of columns to compare to
2333 : * see if we reached the boundary of that set too.
2334 : */
2335 312168 : if (aggstate->projected_set >= 0 &&
2336 245358 : aggstate->projected_set < (numGroupingSets - 1))
2337 27282 : nextSetSize = aggstate->phase->gset_lengths[aggstate->projected_set + 1];
2338 : else
2339 284886 : nextSetSize = 0;
2340 :
2341 : /*----------
2342 : * If a subgroup for the current grouping set is present, project it.
2343 : *
2344 : * We have a new group if:
2345 : * - we're out of input but haven't projected all grouping sets
2346 : * (checked above)
2347 : * OR
2348 : * - we already projected a row that wasn't from the last grouping
2349 : * set
2350 : * AND
2351 : * - the next grouping set has at least one grouping column (since
2352 : * empty grouping sets project only once input is exhausted)
2353 : * AND
2354 : * - the previous and pending rows differ on the grouping columns
2355 : * of the next grouping set
2356 : *----------
2357 : */
2358 312168 : tmpcontext->ecxt_innertuple = econtext->ecxt_outertuple;
2359 312168 : if (aggstate->input_done ||
2360 311358 : (node->aggstrategy != AGG_PLAIN &&
2361 246260 : aggstate->projected_set != -1 &&
2362 244548 : aggstate->projected_set < (numGroupingSets - 1) &&
2363 19940 : nextSetSize > 0 &&
2364 19940 : !ExecQualAndReset(aggstate->phase->eqfunctions[nextSetSize - 1],
2365 : tmpcontext)))
2366 : {
2367 14144 : aggstate->projected_set += 1;
2368 :
2369 : Assert(aggstate->projected_set < numGroupingSets);
2370 14144 : Assert(nextSetSize > 0 || aggstate->input_done);
2371 : }
2372 : else
2373 : {
2374 : /*
2375 : * We no longer care what group we just projected, the next
2376 : * projection will always be the first (or only) grouping set
2377 : * (unless the input proves to be empty).
2378 : */
2379 298024 : aggstate->projected_set = 0;
2380 :
2381 : /*
2382 : * If we don't already have the first tuple of the new group,
2383 : * fetch it from the outer plan.
2384 : */
2385 298024 : if (aggstate->grp_firstTuple == NULL)
2386 : {
2387 66810 : outerslot = fetch_input_tuple(aggstate);
2388 66792 : if (!TupIsNull(outerslot))
2389 : {
2390 : /*
2391 : * Make a copy of the first input tuple; we will use this
2392 : * for comparisons (in group mode) and for projection.
2393 : */
2394 53406 : aggstate->grp_firstTuple = ExecCopySlotHeapTuple(outerslot);
2395 : }
2396 : else
2397 : {
2398 : /* outer plan produced no tuples at all */
2399 13386 : if (hasGroupingSets)
2400 : {
2401 : /*
2402 : * If there was no input at all, we need to project
2403 : * rows only if there are grouping sets of size 0.
2404 : * Note that this implies that there can't be any
2405 : * references to ungrouped Vars, which would otherwise
2406 : * cause issues with the empty output slot.
2407 : *
2408 : * XXX: This is no longer true, we currently deal with
2409 : * this in finalize_aggregates().
2410 : */
2411 54 : aggstate->input_done = true;
2412 :
2413 78 : while (aggstate->phase->gset_lengths[aggstate->projected_set] > 0)
2414 : {
2415 30 : aggstate->projected_set += 1;
2416 30 : if (aggstate->projected_set >= numGroupingSets)
2417 : {
2418 : /*
2419 : * We can't set agg_done here because we might
2420 : * have more phases to do, even though the
2421 : * input is empty. So we need to restart the
2422 : * whole outer loop.
2423 : */
2424 6 : break;
2425 : }
2426 : }
2427 :
2428 54 : if (aggstate->projected_set >= numGroupingSets)
2429 6 : continue;
2430 : }
2431 : else
2432 : {
2433 13332 : aggstate->agg_done = true;
2434 : /* If we are grouping, we should produce no tuples too */
2435 13332 : if (node->aggstrategy != AGG_PLAIN)
2436 164 : return NULL;
2437 : }
2438 : }
2439 : }
2440 :
2441 : /*
2442 : * Initialize working state for a new input tuple group.
2443 : */
2444 297836 : initialize_aggregates(aggstate, pergroups, numReset);
2445 :
2446 297836 : if (aggstate->grp_firstTuple != NULL)
2447 : {
2448 : /*
2449 : * Store the copied first input tuple in the tuple table slot
2450 : * reserved for it. The tuple will be deleted when it is
2451 : * cleared from the slot.
2452 : */
2453 284620 : ExecForceStoreHeapTuple(aggstate->grp_firstTuple,
2454 : firstSlot, true);
2455 284620 : aggstate->grp_firstTuple = NULL; /* don't keep two pointers */
2456 :
2457 : /* set up for first advance_aggregates call */
2458 284620 : tmpcontext->ecxt_outertuple = firstSlot;
2459 :
2460 : /*
2461 : * Process each outer-plan tuple, and then fetch the next one,
2462 : * until we exhaust the outer plan or cross a group boundary.
2463 : */
2464 : for (;;)
2465 : {
2466 : /*
2467 : * During phase 1 only of a mixed agg, we need to update
2468 : * hashtables as well in advance_aggregates.
2469 : */
2470 20226184 : if (aggstate->aggstrategy == AGG_MIXED &&
2471 38062 : aggstate->current_phase == 1)
2472 : {
2473 38062 : lookup_hash_entries(aggstate);
2474 : }
2475 :
2476 : /* Advance the aggregates (or combine functions) */
2477 20226184 : advance_aggregates(aggstate);
2478 :
2479 : /* Reset per-input-tuple context after each tuple */
2480 20226106 : ResetExprContext(tmpcontext);
2481 :
2482 20226106 : outerslot = fetch_input_tuple(aggstate);
2483 20226106 : if (TupIsNull(outerslot))
2484 : {
2485 : /* no more outer-plan tuples available */
2486 :
2487 : /* if we built hash tables, finalize any spills */
2488 53322 : if (aggstate->aggstrategy == AGG_MIXED &&
2489 144 : aggstate->current_phase == 1)
2490 144 : hashagg_finish_initial_spills(aggstate);
2491 :
2492 53322 : if (hasGroupingSets)
2493 : {
2494 690 : aggstate->input_done = true;
2495 690 : break;
2496 : }
2497 : else
2498 : {
2499 52632 : aggstate->agg_done = true;
2500 52632 : break;
2501 : }
2502 : }
2503 : /* set up for next advance_aggregates call */
2504 20172784 : tmpcontext->ecxt_outertuple = outerslot;
2505 :
2506 : /*
2507 : * If we are grouping, check whether we've crossed a group
2508 : * boundary.
2509 : */
2510 20172784 : if (node->aggstrategy != AGG_PLAIN && node->numCols > 0)
2511 : {
2512 1802852 : tmpcontext->ecxt_innertuple = firstSlot;
2513 1802852 : if (!ExecQual(aggstate->phase->eqfunctions[node->numCols - 1],
2514 : tmpcontext))
2515 : {
2516 231220 : aggstate->grp_firstTuple = ExecCopySlotHeapTuple(outerslot);
2517 231220 : break;
2518 : }
2519 : }
2520 : }
2521 : }
2522 :
2523 : /*
2524 : * Use the representative input tuple for any references to
2525 : * non-aggregated input columns in aggregate direct args, the node
2526 : * qual, and the tlist. (If we are not grouping, and there are no
2527 : * input rows at all, we will come here with an empty firstSlot
2528 : * ... but if not grouping, there can't be any references to
2529 : * non-aggregated input columns, so no problem.)
2530 : */
2531 297758 : econtext->ecxt_outertuple = firstSlot;
2532 : }
2533 :
2534 : Assert(aggstate->projected_set >= 0);
2535 :
2536 311902 : currentSet = aggstate->projected_set;
2537 :
2538 311902 : prepare_projection_slot(aggstate, econtext->ecxt_outertuple, currentSet);
2539 :
2540 311902 : select_current_set(aggstate, currentSet, false);
2541 :
2542 311902 : finalize_aggregates(aggstate,
2543 : peragg,
2544 311902 : pergroups[currentSet]);
2545 :
2546 : /*
2547 : * If there's no row to project right now, we must continue rather
2548 : * than returning a null since there might be more groups.
2549 : */
2550 311890 : result = project_aggregates(aggstate);
2551 311878 : if (result)
2552 241104 : return result;
2553 : }
2554 :
2555 : /* No more groups */
2556 608 : return NULL;
2557 : }
2558 :
2559 : /*
2560 : * ExecAgg for hashed case: read input and build hash table
2561 : */
2562 : static void
2563 14942 : agg_fill_hash_table(AggState *aggstate)
2564 : {
2565 : TupleTableSlot *outerslot;
2566 14942 : ExprContext *tmpcontext = aggstate->tmpcontext;
2567 :
2568 : /*
2569 : * Process each outer-plan tuple, and then fetch the next one, until we
2570 : * exhaust the outer plan.
2571 : */
2572 : for (;;)
2573 : {
2574 5487260 : outerslot = fetch_input_tuple(aggstate);
2575 5487260 : if (TupIsNull(outerslot))
2576 : break;
2577 :
2578 : /* set up for lookup_hash_entries and advance_aggregates */
2579 5472318 : tmpcontext->ecxt_outertuple = outerslot;
2580 :
2581 : /* Find or build hashtable entries */
2582 5472318 : lookup_hash_entries(aggstate);
2583 :
2584 : /* Advance the aggregates (or combine functions) */
2585 5472318 : advance_aggregates(aggstate);
2586 :
2587 : /*
2588 : * Reset per-input-tuple context after each tuple, but note that the
2589 : * hash lookups do this too
2590 : */
2591 5472318 : ResetExprContext(aggstate->tmpcontext);
2592 : }
2593 :
2594 : /* finalize spills, if any */
2595 14942 : hashagg_finish_initial_spills(aggstate);
2596 :
2597 14942 : aggstate->table_filled = true;
2598 : /* Initialize to walk the first hash table */
2599 14942 : select_current_set(aggstate, 0, true);
2600 14942 : ResetTupleHashIterator(aggstate->perhash[0].hashtable,
2601 : &aggstate->perhash[0].hashiter);
2602 14942 : }
2603 :
2604 : /*
2605 : * If any data was spilled during hash aggregation, reset the hash table and
2606 : * reprocess one batch of spilled data. After reprocessing a batch, the hash
2607 : * table will again contain data, ready to be consumed by
2608 : * agg_retrieve_hash_table_in_memory().
2609 : *
2610 : * Should only be called after all in memory hash table entries have been
2611 : * finalized and emitted.
2612 : *
2613 : * Return false when input is exhausted and there's no more work to be done;
2614 : * otherwise return true.
2615 : */
2616 : static bool
2617 42818 : agg_refill_hash_table(AggState *aggstate)
2618 : {
2619 : HashAggBatch *batch;
2620 : AggStatePerHash perhash;
2621 : HashAggSpill spill;
2622 42818 : LogicalTapeSet *tapeset = aggstate->hash_tapeset;
2623 42818 : bool spill_initialized = false;
2624 :
2625 42818 : if (aggstate->hash_batches == NIL)
2626 15898 : return false;
2627 :
2628 : /* hash_batches is a stack, with the top item at the end of the list */
2629 26920 : batch = llast(aggstate->hash_batches);
2630 26920 : aggstate->hash_batches = list_delete_last(aggstate->hash_batches);
2631 :
2632 26920 : hash_agg_set_limits(aggstate->hashentrysize, batch->input_card,
2633 : batch->used_bits, &aggstate->hash_mem_limit,
2634 : &aggstate->hash_ngroups_limit, NULL);
2635 :
2636 : /*
2637 : * Each batch only processes one grouping set; set the rest to NULL so
2638 : * that advance_aggregates() knows to ignore them. We don't touch
2639 : * pergroups for sorted grouping sets here, because they will be needed if
2640 : * we rescan later. The expressions for sorted grouping sets will not be
2641 : * evaluated after we recompile anyway.
2642 : */
2643 207380 : MemSet(aggstate->hash_pergroup, 0,
2644 : sizeof(AggStatePerGroup) * aggstate->num_hashes);
2645 :
2646 : /* free memory and reset hash tables */
2647 26920 : ReScanExprContext(aggstate->hashcontext);
2648 207380 : for (int setno = 0; setno < aggstate->num_hashes; setno++)
2649 180460 : ResetTupleHashTable(aggstate->perhash[setno].hashtable);
2650 :
2651 26920 : aggstate->hash_ngroups_current = 0;
2652 :
2653 : /*
2654 : * In AGG_MIXED mode, hash aggregation happens in phase 1 and the output
2655 : * happens in phase 0. So, we switch to phase 1 when processing a batch,
2656 : * and back to phase 0 after the batch is done.
2657 : */
2658 : Assert(aggstate->current_phase == 0);
2659 26920 : if (aggstate->phase->aggstrategy == AGG_MIXED)
2660 : {
2661 26262 : aggstate->current_phase = 1;
2662 26262 : aggstate->phase = &aggstate->phases[aggstate->current_phase];
2663 : }
2664 :
2665 26920 : select_current_set(aggstate, batch->setno, true);
2666 :
2667 26920 : perhash = &aggstate->perhash[aggstate->current_set];
2668 :
2669 : /*
2670 : * Spilled tuples are always read back as MinimalTuples, which may be
2671 : * different from the outer plan, so recompile the aggregate expressions.
2672 : *
2673 : * We still need the NULL check, because we are only processing one
2674 : * grouping set at a time and the rest will be NULL.
2675 : */
2676 26920 : hashagg_recompile_expressions(aggstate, true, true);
2677 :
2678 26920 : INJECTION_POINT("hash-aggregate-process-batch");
2679 : for (;;)
2680 678036 : {
2681 704956 : TupleTableSlot *spillslot = aggstate->hash_spill_rslot;
2682 704956 : TupleTableSlot *hashslot = perhash->hashslot;
2683 : TupleHashEntry entry;
2684 : MinimalTuple tuple;
2685 : uint32 hash;
2686 704956 : bool isnew = false;
2687 704956 : bool *p_isnew = aggstate->hash_spill_mode ? NULL : &isnew;
2688 :
2689 704956 : CHECK_FOR_INTERRUPTS();
2690 :
2691 704956 : tuple = hashagg_batch_read(batch, &hash);
2692 704956 : if (tuple == NULL)
2693 26920 : break;
2694 :
2695 678036 : ExecStoreMinimalTuple(tuple, spillslot, true);
2696 678036 : aggstate->tmpcontext->ecxt_outertuple = spillslot;
2697 :
2698 678036 : prepare_hash_slot(perhash,
2699 678036 : aggstate->tmpcontext->ecxt_outertuple,
2700 : hashslot);
2701 678036 : entry = LookupTupleHashEntryHash(perhash->hashtable, hashslot,
2702 : p_isnew, hash);
2703 :
2704 678036 : if (entry != NULL)
2705 : {
2706 231292 : if (isnew)
2707 103300 : initialize_hash_entry(aggstate, perhash->hashtable, entry);
2708 231292 : aggstate->hash_pergroup[batch->setno] = entry->additional;
2709 231292 : advance_aggregates(aggstate);
2710 : }
2711 : else
2712 : {
2713 446744 : if (!spill_initialized)
2714 : {
2715 : /*
2716 : * Avoid initializing the spill until we actually need it so
2717 : * that we don't assign tapes that will never be used.
2718 : */
2719 12500 : spill_initialized = true;
2720 12500 : hashagg_spill_init(&spill, tapeset, batch->used_bits,
2721 : batch->input_card, aggstate->hashentrysize);
2722 : }
2723 : /* no memory for a new group, spill */
2724 446744 : hashagg_spill_tuple(aggstate, &spill, spillslot, hash);
2725 :
2726 446744 : aggstate->hash_pergroup[batch->setno] = NULL;
2727 : }
2728 :
2729 : /*
2730 : * Reset per-input-tuple context after each tuple, but note that the
2731 : * hash lookups do this too
2732 : */
2733 678036 : ResetExprContext(aggstate->tmpcontext);
2734 : }
2735 :
2736 26920 : LogicalTapeClose(batch->input_tape);
2737 :
2738 : /* change back to phase 0 */
2739 26920 : aggstate->current_phase = 0;
2740 26920 : aggstate->phase = &aggstate->phases[aggstate->current_phase];
2741 :
2742 26920 : if (spill_initialized)
2743 : {
2744 12500 : hashagg_spill_finish(aggstate, &spill, batch->setno);
2745 12500 : hash_agg_update_metrics(aggstate, true, spill.npartitions);
2746 : }
2747 : else
2748 14420 : hash_agg_update_metrics(aggstate, true, 0);
2749 :
2750 26920 : aggstate->hash_spill_mode = false;
2751 :
2752 : /* prepare to walk the first hash table */
2753 26920 : select_current_set(aggstate, batch->setno, true);
2754 26920 : ResetTupleHashIterator(aggstate->perhash[batch->setno].hashtable,
2755 : &aggstate->perhash[batch->setno].hashiter);
2756 :
2757 26920 : pfree(batch);
2758 :
2759 26920 : return true;
2760 : }
2761 :
2762 : /*
2763 : * ExecAgg for hashed case: retrieving groups from hash table
2764 : *
2765 : * After exhausting in-memory tuples, also try refilling the hash table using
2766 : * previously-spilled tuples. Only returns NULL after all in-memory and
2767 : * spilled tuples are exhausted.
2768 : */
2769 : static TupleTableSlot *
2770 434370 : agg_retrieve_hash_table(AggState *aggstate)
2771 : {
2772 434370 : TupleTableSlot *result = NULL;
2773 :
2774 879762 : while (result == NULL)
2775 : {
2776 461290 : result = agg_retrieve_hash_table_in_memory(aggstate);
2777 461290 : if (result == NULL)
2778 : {
2779 42818 : if (!agg_refill_hash_table(aggstate))
2780 : {
2781 15898 : aggstate->agg_done = true;
2782 15898 : break;
2783 : }
2784 : }
2785 : }
2786 :
2787 434370 : return result;
2788 : }
2789 :
2790 : /*
2791 : * Retrieve the groups from the in-memory hash tables without considering any
2792 : * spilled tuples.
2793 : */
2794 : static TupleTableSlot *
2795 461290 : agg_retrieve_hash_table_in_memory(AggState *aggstate)
2796 : {
2797 : ExprContext *econtext;
2798 : AggStatePerAgg peragg;
2799 : AggStatePerGroup pergroup;
2800 : TupleHashEntryData *entry;
2801 : TupleTableSlot *firstSlot;
2802 : TupleTableSlot *result;
2803 : AggStatePerHash perhash;
2804 :
2805 : /*
2806 : * get state info from node.
2807 : *
2808 : * econtext is the per-output-tuple expression context.
2809 : */
2810 461290 : econtext = aggstate->ss.ps.ps_ExprContext;
2811 461290 : peragg = aggstate->peragg;
2812 461290 : firstSlot = aggstate->ss.ss_ScanTupleSlot;
2813 :
2814 : /*
2815 : * Note that perhash (and therefore anything accessed through it) can
2816 : * change inside the loop, as we change between grouping sets.
2817 : */
2818 461290 : perhash = &aggstate->perhash[aggstate->current_set];
2819 :
2820 : /*
2821 : * We loop retrieving groups until we find one satisfying
2822 : * aggstate->ss.ps.qual
2823 : */
2824 : for (;;)
2825 135888 : {
2826 597178 : TupleTableSlot *hashslot = perhash->hashslot;
2827 : int i;
2828 :
2829 597178 : CHECK_FOR_INTERRUPTS();
2830 :
2831 : /*
2832 : * Find the next entry in the hash table
2833 : */
2834 597178 : entry = ScanTupleHashTable(perhash->hashtable, &perhash->hashiter);
2835 597178 : if (entry == NULL)
2836 : {
2837 143086 : int nextset = aggstate->current_set + 1;
2838 :
2839 143086 : if (nextset < aggstate->num_hashes)
2840 : {
2841 : /*
2842 : * Switch to next grouping set, reinitialize, and restart the
2843 : * loop.
2844 : */
2845 100268 : select_current_set(aggstate, nextset, true);
2846 :
2847 100268 : perhash = &aggstate->perhash[aggstate->current_set];
2848 :
2849 100268 : ResetTupleHashIterator(perhash->hashtable, &perhash->hashiter);
2850 :
2851 100268 : continue;
2852 : }
2853 : else
2854 : {
2855 42818 : return NULL;
2856 : }
2857 : }
2858 :
2859 : /*
2860 : * Clear the per-output-tuple context for each group
2861 : *
2862 : * We intentionally don't use ReScanExprContext here; if any aggs have
2863 : * registered shutdown callbacks, they mustn't be called yet, since we
2864 : * might not be done with that agg.
2865 : */
2866 454092 : ResetExprContext(econtext);
2867 :
2868 : /*
2869 : * Transform representative tuple back into one with the right
2870 : * columns.
2871 : */
2872 454092 : ExecStoreMinimalTuple(entry->firstTuple, hashslot, false);
2873 454092 : slot_getallattrs(hashslot);
2874 :
2875 454092 : ExecClearTuple(firstSlot);
2876 454092 : memset(firstSlot->tts_isnull, true,
2877 454092 : firstSlot->tts_tupleDescriptor->natts * sizeof(bool));
2878 :
2879 1240060 : for (i = 0; i < perhash->numhashGrpCols; i++)
2880 : {
2881 785968 : int varNumber = perhash->hashGrpColIdxInput[i] - 1;
2882 :
2883 785968 : firstSlot->tts_values[varNumber] = hashslot->tts_values[i];
2884 785968 : firstSlot->tts_isnull[varNumber] = hashslot->tts_isnull[i];
2885 : }
2886 454092 : ExecStoreVirtualTuple(firstSlot);
2887 :
2888 454092 : pergroup = (AggStatePerGroup) entry->additional;
2889 :
2890 : /*
2891 : * Use the representative input tuple for any references to
2892 : * non-aggregated input columns in the qual and tlist.
2893 : */
2894 454092 : econtext->ecxt_outertuple = firstSlot;
2895 :
2896 454092 : prepare_projection_slot(aggstate,
2897 : econtext->ecxt_outertuple,
2898 : aggstate->current_set);
2899 :
2900 454092 : finalize_aggregates(aggstate, peragg, pergroup);
2901 :
2902 454092 : result = project_aggregates(aggstate);
2903 454092 : if (result)
2904 418472 : return result;
2905 : }
2906 :
2907 : /* No more groups */
2908 : return NULL;
2909 : }
2910 :
2911 : /*
2912 : * hashagg_spill_init
2913 : *
2914 : * Called after we determined that spilling is necessary. Chooses the number
2915 : * of partitions to create, and initializes them.
2916 : */
2917 : static void
2918 12616 : hashagg_spill_init(HashAggSpill *spill, LogicalTapeSet *tapeset, int used_bits,
2919 : double input_groups, double hashentrysize)
2920 : {
2921 : int npartitions;
2922 : int partition_bits;
2923 :
2924 12616 : npartitions = hash_choose_num_partitions(input_groups, hashentrysize,
2925 : used_bits, &partition_bits);
2926 :
2927 : #ifdef USE_INJECTION_POINTS
2928 12616 : if (IS_INJECTION_POINT_ATTACHED("hash-aggregate-single-partition"))
2929 : {
2930 10 : npartitions = 1;
2931 10 : partition_bits = 0;
2932 10 : INJECTION_POINT_CACHED("hash-aggregate-single-partition");
2933 : }
2934 : #endif
2935 :
2936 12616 : spill->partitions = palloc0(sizeof(LogicalTape *) * npartitions);
2937 12616 : spill->ntuples = palloc0(sizeof(int64) * npartitions);
2938 12616 : spill->hll_card = palloc0(sizeof(hyperLogLogState) * npartitions);
2939 :
2940 63050 : for (int i = 0; i < npartitions; i++)
2941 50434 : spill->partitions[i] = LogicalTapeCreate(tapeset);
2942 :
2943 12616 : spill->shift = 32 - used_bits - partition_bits;
2944 12616 : if (spill->shift < 32)
2945 12606 : spill->mask = (npartitions - 1) << spill->shift;
2946 : else
2947 10 : spill->mask = 0;
2948 12616 : spill->npartitions = npartitions;
2949 :
2950 63050 : for (int i = 0; i < npartitions; i++)
2951 50434 : initHyperLogLog(&spill->hll_card[i], HASHAGG_HLL_BIT_WIDTH);
2952 12616 : }
2953 :
2954 : /*
2955 : * hashagg_spill_tuple
2956 : *
2957 : * No room for new groups in the hash table. Save for later in the appropriate
2958 : * partition.
2959 : */
2960 : static Size
2961 678036 : hashagg_spill_tuple(AggState *aggstate, HashAggSpill *spill,
2962 : TupleTableSlot *inputslot, uint32 hash)
2963 : {
2964 : TupleTableSlot *spillslot;
2965 : int partition;
2966 : MinimalTuple tuple;
2967 : LogicalTape *tape;
2968 678036 : int total_written = 0;
2969 : bool shouldFree;
2970 :
2971 : Assert(spill->partitions != NULL);
2972 :
2973 : /* spill only attributes that we actually need */
2974 678036 : if (!aggstate->all_cols_needed)
2975 : {
2976 4704 : spillslot = aggstate->hash_spill_wslot;
2977 4704 : slot_getsomeattrs(inputslot, aggstate->max_colno_needed);
2978 4704 : ExecClearTuple(spillslot);
2979 14112 : for (int i = 0; i < spillslot->tts_tupleDescriptor->natts; i++)
2980 : {
2981 9408 : if (bms_is_member(i + 1, aggstate->colnos_needed))
2982 : {
2983 4704 : spillslot->tts_values[i] = inputslot->tts_values[i];
2984 4704 : spillslot->tts_isnull[i] = inputslot->tts_isnull[i];
2985 : }
2986 : else
2987 4704 : spillslot->tts_isnull[i] = true;
2988 : }
2989 4704 : ExecStoreVirtualTuple(spillslot);
2990 : }
2991 : else
2992 673332 : spillslot = inputslot;
2993 :
2994 678036 : tuple = ExecFetchSlotMinimalTuple(spillslot, &shouldFree);
2995 :
2996 678036 : if (spill->shift < 32)
2997 657036 : partition = (hash & spill->mask) >> spill->shift;
2998 : else
2999 21000 : partition = 0;
3000 :
3001 678036 : spill->ntuples[partition]++;
3002 :
3003 : /*
3004 : * All hash values destined for a given partition have some bits in
3005 : * common, which causes bad HLL cardinality estimates. Hash the hash to
3006 : * get a more uniform distribution.
3007 : */
3008 678036 : addHyperLogLog(&spill->hll_card[partition], hash_bytes_uint32(hash));
3009 :
3010 678036 : tape = spill->partitions[partition];
3011 :
3012 678036 : LogicalTapeWrite(tape, &hash, sizeof(uint32));
3013 678036 : total_written += sizeof(uint32);
3014 :
3015 678036 : LogicalTapeWrite(tape, tuple, tuple->t_len);
3016 678036 : total_written += tuple->t_len;
3017 :
3018 678036 : if (shouldFree)
3019 231292 : pfree(tuple);
3020 :
3021 678036 : return total_written;
3022 : }
3023 :
3024 : /*
3025 : * hashagg_batch_new
3026 : *
3027 : * Construct a HashAggBatch item, which represents one iteration of HashAgg to
3028 : * be done.
3029 : */
3030 : static HashAggBatch *
3031 26920 : hashagg_batch_new(LogicalTape *input_tape, int setno,
3032 : int64 input_tuples, double input_card, int used_bits)
3033 : {
3034 26920 : HashAggBatch *batch = palloc0(sizeof(HashAggBatch));
3035 :
3036 26920 : batch->setno = setno;
3037 26920 : batch->used_bits = used_bits;
3038 26920 : batch->input_tape = input_tape;
3039 26920 : batch->input_tuples = input_tuples;
3040 26920 : batch->input_card = input_card;
3041 :
3042 26920 : return batch;
3043 : }
3044 :
3045 : /*
3046 : * hashagg_batch_read
3047 : * read the next tuple from a batch's tape. Return NULL if no more.
3048 : */
3049 : static MinimalTuple
3050 704956 : hashagg_batch_read(HashAggBatch *batch, uint32 *hashp)
3051 : {
3052 704956 : LogicalTape *tape = batch->input_tape;
3053 : MinimalTuple tuple;
3054 : uint32 t_len;
3055 : size_t nread;
3056 : uint32 hash;
3057 :
3058 704956 : nread = LogicalTapeRead(tape, &hash, sizeof(uint32));
3059 704956 : if (nread == 0)
3060 26920 : return NULL;
3061 678036 : if (nread != sizeof(uint32))
3062 0 : ereport(ERROR,
3063 : (errcode_for_file_access(),
3064 : errmsg_internal("unexpected EOF for tape %p: requested %zu bytes, read %zu bytes",
3065 : tape, sizeof(uint32), nread)));
3066 678036 : if (hashp != NULL)
3067 678036 : *hashp = hash;
3068 :
3069 678036 : nread = LogicalTapeRead(tape, &t_len, sizeof(t_len));
3070 678036 : if (nread != sizeof(uint32))
3071 0 : ereport(ERROR,
3072 : (errcode_for_file_access(),
3073 : errmsg_internal("unexpected EOF for tape %p: requested %zu bytes, read %zu bytes",
3074 : tape, sizeof(uint32), nread)));
3075 :
3076 678036 : tuple = (MinimalTuple) palloc(t_len);
3077 678036 : tuple->t_len = t_len;
3078 :
3079 678036 : nread = LogicalTapeRead(tape,
3080 : (char *) tuple + sizeof(uint32),
3081 : t_len - sizeof(uint32));
3082 678036 : if (nread != t_len - sizeof(uint32))
3083 0 : ereport(ERROR,
3084 : (errcode_for_file_access(),
3085 : errmsg_internal("unexpected EOF for tape %p: requested %zu bytes, read %zu bytes",
3086 : tape, t_len - sizeof(uint32), nread)));
3087 :
3088 678036 : return tuple;
3089 : }
3090 :
3091 : /*
3092 : * hashagg_finish_initial_spills
3093 : *
3094 : * After a HashAggBatch has been processed, it may have spilled tuples to
3095 : * disk. If so, turn the spilled partitions into new batches that must later
3096 : * be executed.
3097 : */
3098 : static void
3099 15086 : hashagg_finish_initial_spills(AggState *aggstate)
3100 : {
3101 : int setno;
3102 15086 : int total_npartitions = 0;
3103 :
3104 15086 : if (aggstate->hash_spills != NULL)
3105 : {
3106 172 : for (setno = 0; setno < aggstate->num_hashes; setno++)
3107 : {
3108 116 : HashAggSpill *spill = &aggstate->hash_spills[setno];
3109 :
3110 116 : total_npartitions += spill->npartitions;
3111 116 : hashagg_spill_finish(aggstate, spill, setno);
3112 : }
3113 :
3114 : /*
3115 : * We're not processing tuples from outer plan any more; only
3116 : * processing batches of spilled tuples. The initial spill structures
3117 : * are no longer needed.
3118 : */
3119 56 : pfree(aggstate->hash_spills);
3120 56 : aggstate->hash_spills = NULL;
3121 : }
3122 :
3123 15086 : hash_agg_update_metrics(aggstate, false, total_npartitions);
3124 15086 : aggstate->hash_spill_mode = false;
3125 15086 : }
3126 :
3127 : /*
3128 : * hashagg_spill_finish
3129 : *
3130 : * Transform spill partitions into new batches.
3131 : */
3132 : static void
3133 12616 : hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno)
3134 : {
3135 : int i;
3136 12616 : int used_bits = 32 - spill->shift;
3137 :
3138 12616 : if (spill->npartitions == 0)
3139 0 : return; /* didn't spill */
3140 :
3141 63050 : for (i = 0; i < spill->npartitions; i++)
3142 : {
3143 50434 : LogicalTape *tape = spill->partitions[i];
3144 : HashAggBatch *new_batch;
3145 : double cardinality;
3146 :
3147 : /* if the partition is empty, don't create a new batch of work */
3148 50434 : if (spill->ntuples[i] == 0)
3149 23514 : continue;
3150 :
3151 26920 : cardinality = estimateHyperLogLog(&spill->hll_card[i]);
3152 26920 : freeHyperLogLog(&spill->hll_card[i]);
3153 :
3154 : /* rewinding frees the buffer while not in use */
3155 26920 : LogicalTapeRewindForRead(tape, HASHAGG_READ_BUFFER_SIZE);
3156 :
3157 26920 : new_batch = hashagg_batch_new(tape, setno,
3158 26920 : spill->ntuples[i], cardinality,
3159 : used_bits);
3160 26920 : aggstate->hash_batches = lappend(aggstate->hash_batches, new_batch);
3161 26920 : aggstate->hash_batches_used++;
3162 : }
3163 :
3164 12616 : pfree(spill->ntuples);
3165 12616 : pfree(spill->hll_card);
3166 12616 : pfree(spill->partitions);
3167 : }
3168 :
3169 : /*
3170 : * Free resources related to a spilled HashAgg.
3171 : */
3172 : static void
3173 55996 : hashagg_reset_spill_state(AggState *aggstate)
3174 : {
3175 : /* free spills from initial pass */
3176 55996 : if (aggstate->hash_spills != NULL)
3177 : {
3178 : int setno;
3179 :
3180 0 : for (setno = 0; setno < aggstate->num_hashes; setno++)
3181 : {
3182 0 : HashAggSpill *spill = &aggstate->hash_spills[setno];
3183 :
3184 0 : pfree(spill->ntuples);
3185 0 : pfree(spill->partitions);
3186 : }
3187 0 : pfree(aggstate->hash_spills);
3188 0 : aggstate->hash_spills = NULL;
3189 : }
3190 :
3191 : /* free batches */
3192 55996 : list_free_deep(aggstate->hash_batches);
3193 55996 : aggstate->hash_batches = NIL;
3194 :
3195 : /* close tape set */
3196 55996 : if (aggstate->hash_tapeset != NULL)
3197 : {
3198 56 : LogicalTapeSetClose(aggstate->hash_tapeset);
3199 56 : aggstate->hash_tapeset = NULL;
3200 : }
3201 55996 : }
3202 :
3203 :
3204 : /* -----------------
3205 : * ExecInitAgg
3206 : *
3207 : * Creates the run-time information for the agg node produced by the
3208 : * planner and initializes its outer subtree.
3209 : *
3210 : * -----------------
3211 : */
3212 : AggState *
3213 45100 : ExecInitAgg(Agg *node, EState *estate, int eflags)
3214 : {
3215 : AggState *aggstate;
3216 : AggStatePerAgg peraggs;
3217 : AggStatePerTrans pertransstates;
3218 : AggStatePerGroup *pergroups;
3219 : Plan *outerPlan;
3220 : ExprContext *econtext;
3221 : TupleDesc scanDesc;
3222 : int max_aggno;
3223 : int max_transno;
3224 : int numaggrefs;
3225 : int numaggs;
3226 : int numtrans;
3227 : int phase;
3228 : int phaseidx;
3229 : ListCell *l;
3230 45100 : Bitmapset *all_grouped_cols = NULL;
3231 45100 : int numGroupingSets = 1;
3232 : int numPhases;
3233 : int numHashes;
3234 45100 : int i = 0;
3235 45100 : int j = 0;
3236 84766 : bool use_hashing = (node->aggstrategy == AGG_HASHED ||
3237 39666 : node->aggstrategy == AGG_MIXED);
3238 :
3239 : /* check for unsupported flags */
3240 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
3241 :
3242 : /*
3243 : * create state structure
3244 : */
3245 45100 : aggstate = makeNode(AggState);
3246 45100 : aggstate->ss.ps.plan = (Plan *) node;
3247 45100 : aggstate->ss.ps.state = estate;
3248 45100 : aggstate->ss.ps.ExecProcNode = ExecAgg;
3249 :
3250 45100 : aggstate->aggs = NIL;
3251 45100 : aggstate->numaggs = 0;
3252 45100 : aggstate->numtrans = 0;
3253 45100 : aggstate->aggstrategy = node->aggstrategy;
3254 45100 : aggstate->aggsplit = node->aggsplit;
3255 45100 : aggstate->maxsets = 0;
3256 45100 : aggstate->projected_set = -1;
3257 45100 : aggstate->current_set = 0;
3258 45100 : aggstate->peragg = NULL;
3259 45100 : aggstate->pertrans = NULL;
3260 45100 : aggstate->curperagg = NULL;
3261 45100 : aggstate->curpertrans = NULL;
3262 45100 : aggstate->input_done = false;
3263 45100 : aggstate->agg_done = false;
3264 45100 : aggstate->pergroups = NULL;
3265 45100 : aggstate->grp_firstTuple = NULL;
3266 45100 : aggstate->sort_in = NULL;
3267 45100 : aggstate->sort_out = NULL;
3268 :
3269 : /*
3270 : * phases[0] always exists, but is dummy in sorted/plain mode
3271 : */
3272 45100 : numPhases = (use_hashing ? 1 : 2);
3273 45100 : numHashes = (use_hashing ? 1 : 0);
3274 :
3275 : /*
3276 : * Calculate the maximum number of grouping sets in any phase; this
3277 : * determines the size of some allocations. Also calculate the number of
3278 : * phases, since all hashed/mixed nodes contribute to only a single phase.
3279 : */
3280 45100 : if (node->groupingSets)
3281 : {
3282 836 : numGroupingSets = list_length(node->groupingSets);
3283 :
3284 1762 : foreach(l, node->chain)
3285 : {
3286 926 : Agg *agg = lfirst(l);
3287 :
3288 926 : numGroupingSets = Max(numGroupingSets,
3289 : list_length(agg->groupingSets));
3290 :
3291 : /*
3292 : * additional AGG_HASHED aggs become part of phase 0, but all
3293 : * others add an extra phase.
3294 : */
3295 926 : if (agg->aggstrategy != AGG_HASHED)
3296 460 : ++numPhases;
3297 : else
3298 466 : ++numHashes;
3299 : }
3300 : }
3301 :
3302 45100 : aggstate->maxsets = numGroupingSets;
3303 45100 : aggstate->numphases = numPhases;
3304 :
3305 45100 : aggstate->aggcontexts = (ExprContext **)
3306 45100 : palloc0(sizeof(ExprContext *) * numGroupingSets);
3307 :
3308 : /*
3309 : * Create expression contexts. We need three or more, one for
3310 : * per-input-tuple processing, one for per-output-tuple processing, one
3311 : * for all the hashtables, and one for each grouping set. The per-tuple
3312 : * memory context of the per-grouping-set ExprContexts (aggcontexts)
3313 : * replaces the standalone memory context formerly used to hold transition
3314 : * values. We cheat a little by using ExecAssignExprContext() to build
3315 : * all of them.
3316 : *
3317 : * NOTE: the details of what is stored in aggcontexts and what is stored
3318 : * in the regular per-query memory context are driven by a simple
3319 : * decision: we want to reset the aggcontext at group boundaries (if not
3320 : * hashing) and in ExecReScanAgg to recover no-longer-wanted space.
3321 : */
3322 45100 : ExecAssignExprContext(estate, &aggstate->ss.ps);
3323 45100 : aggstate->tmpcontext = aggstate->ss.ps.ps_ExprContext;
3324 :
3325 91040 : for (i = 0; i < numGroupingSets; ++i)
3326 : {
3327 45940 : ExecAssignExprContext(estate, &aggstate->ss.ps);
3328 45940 : aggstate->aggcontexts[i] = aggstate->ss.ps.ps_ExprContext;
3329 : }
3330 :
3331 45100 : if (use_hashing)
3332 5666 : aggstate->hashcontext = CreateWorkExprContext(estate);
3333 :
3334 45100 : ExecAssignExprContext(estate, &aggstate->ss.ps);
3335 :
3336 : /*
3337 : * Initialize child nodes.
3338 : *
3339 : * If we are doing a hashed aggregation then the child plan does not need
3340 : * to handle REWIND efficiently; see ExecReScanAgg.
3341 : */
3342 45100 : if (node->aggstrategy == AGG_HASHED)
3343 5434 : eflags &= ~EXEC_FLAG_REWIND;
3344 45100 : outerPlan = outerPlan(node);
3345 45100 : outerPlanState(aggstate) = ExecInitNode(outerPlan, estate, eflags);
3346 :
3347 : /*
3348 : * initialize source tuple type.
3349 : */
3350 45100 : aggstate->ss.ps.outerops =
3351 45100 : ExecGetResultSlotOps(outerPlanState(&aggstate->ss),
3352 : &aggstate->ss.ps.outeropsfixed);
3353 45100 : aggstate->ss.ps.outeropsset = true;
3354 :
3355 45100 : ExecCreateScanSlotFromOuterPlan(estate, &aggstate->ss,
3356 : aggstate->ss.ps.outerops);
3357 45100 : scanDesc = aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor;
3358 :
3359 : /*
3360 : * If there are more than two phases (including a potential dummy phase
3361 : * 0), input will be resorted using tuplesort. Need a slot for that.
3362 : */
3363 45100 : if (numPhases > 2)
3364 : {
3365 186 : aggstate->sort_slot = ExecInitExtraTupleSlot(estate, scanDesc,
3366 : &TTSOpsMinimalTuple);
3367 :
3368 : /*
3369 : * The output of the tuplesort, and the output from the outer child
3370 : * might not use the same type of slot. In most cases the child will
3371 : * be a Sort, and thus return a TTSOpsMinimalTuple type slot - but the
3372 : * input can also be presorted due an index, in which case it could be
3373 : * a different type of slot.
3374 : *
3375 : * XXX: For efficiency it would be good to instead/additionally
3376 : * generate expressions with corresponding settings of outerops* for
3377 : * the individual phases - deforming is often a bottleneck for
3378 : * aggregations with lots of rows per group. If there's multiple
3379 : * sorts, we know that all but the first use TTSOpsMinimalTuple (via
3380 : * the nodeAgg.c internal tuplesort).
3381 : */
3382 186 : if (aggstate->ss.ps.outeropsfixed &&
3383 186 : aggstate->ss.ps.outerops != &TTSOpsMinimalTuple)
3384 12 : aggstate->ss.ps.outeropsfixed = false;
3385 : }
3386 :
3387 : /*
3388 : * Initialize result type, slot and projection.
3389 : */
3390 45100 : ExecInitResultTupleSlotTL(&aggstate->ss.ps, &TTSOpsVirtual);
3391 45100 : ExecAssignProjectionInfo(&aggstate->ss.ps, NULL);
3392 :
3393 : /*
3394 : * initialize child expressions
3395 : *
3396 : * We expect the parser to have checked that no aggs contain other agg
3397 : * calls in their arguments (and just to be sure, we verify it again while
3398 : * initializing the plan node). This would make no sense under SQL
3399 : * semantics, and it's forbidden by the spec. Because it is true, we
3400 : * don't need to worry about evaluating the aggs in any particular order.
3401 : *
3402 : * Note: execExpr.c finds Aggrefs for us, and adds them to aggstate->aggs.
3403 : * Aggrefs in the qual are found here; Aggrefs in the targetlist are found
3404 : * during ExecAssignProjectionInfo, above.
3405 : */
3406 45100 : aggstate->ss.ps.qual =
3407 45100 : ExecInitQual(node->plan.qual, (PlanState *) aggstate);
3408 :
3409 : /*
3410 : * We should now have found all Aggrefs in the targetlist and quals.
3411 : */
3412 45100 : numaggrefs = list_length(aggstate->aggs);
3413 45100 : max_aggno = -1;
3414 45100 : max_transno = -1;
3415 95670 : foreach(l, aggstate->aggs)
3416 : {
3417 50570 : Aggref *aggref = (Aggref *) lfirst(l);
3418 :
3419 50570 : max_aggno = Max(max_aggno, aggref->aggno);
3420 50570 : max_transno = Max(max_transno, aggref->aggtransno);
3421 : }
3422 45100 : aggstate->numaggs = numaggs = max_aggno + 1;
3423 45100 : aggstate->numtrans = numtrans = max_transno + 1;
3424 :
3425 : /*
3426 : * For each phase, prepare grouping set data and fmgr lookup data for
3427 : * compare functions. Accumulate all_grouped_cols in passing.
3428 : */
3429 45100 : aggstate->phases = palloc0(numPhases * sizeof(AggStatePerPhaseData));
3430 :
3431 45100 : aggstate->num_hashes = numHashes;
3432 45100 : if (numHashes)
3433 : {
3434 5666 : aggstate->perhash = palloc0(sizeof(AggStatePerHashData) * numHashes);
3435 5666 : aggstate->phases[0].numsets = 0;
3436 5666 : aggstate->phases[0].gset_lengths = palloc(numHashes * sizeof(int));
3437 5666 : aggstate->phases[0].grouped_cols = palloc(numHashes * sizeof(Bitmapset *));
3438 : }
3439 :
3440 45100 : phase = 0;
3441 91126 : for (phaseidx = 0; phaseidx <= list_length(node->chain); ++phaseidx)
3442 : {
3443 : Agg *aggnode;
3444 : Sort *sortnode;
3445 :
3446 46026 : if (phaseidx > 0)
3447 : {
3448 926 : aggnode = list_nth_node(Agg, node->chain, phaseidx - 1);
3449 926 : sortnode = castNode(Sort, outerPlan(aggnode));
3450 : }
3451 : else
3452 : {
3453 45100 : aggnode = node;
3454 45100 : sortnode = NULL;
3455 : }
3456 :
3457 : Assert(phase <= 1 || sortnode);
3458 :
3459 46026 : if (aggnode->aggstrategy == AGG_HASHED
3460 40126 : || aggnode->aggstrategy == AGG_MIXED)
3461 : {
3462 6132 : AggStatePerPhase phasedata = &aggstate->phases[0];
3463 : AggStatePerHash perhash;
3464 6132 : Bitmapset *cols = NULL;
3465 :
3466 : Assert(phase == 0);
3467 6132 : i = phasedata->numsets++;
3468 6132 : perhash = &aggstate->perhash[i];
3469 :
3470 : /* phase 0 always points to the "real" Agg in the hash case */
3471 6132 : phasedata->aggnode = node;
3472 6132 : phasedata->aggstrategy = node->aggstrategy;
3473 :
3474 : /* but the actual Agg node representing this hash is saved here */
3475 6132 : perhash->aggnode = aggnode;
3476 :
3477 6132 : phasedata->gset_lengths[i] = perhash->numCols = aggnode->numCols;
3478 :
3479 15400 : for (j = 0; j < aggnode->numCols; ++j)
3480 9268 : cols = bms_add_member(cols, aggnode->grpColIdx[j]);
3481 :
3482 6132 : phasedata->grouped_cols[i] = cols;
3483 :
3484 6132 : all_grouped_cols = bms_add_members(all_grouped_cols, cols);
3485 6132 : continue;
3486 : }
3487 : else
3488 : {
3489 39894 : AggStatePerPhase phasedata = &aggstate->phases[++phase];
3490 : int num_sets;
3491 :
3492 39894 : phasedata->numsets = num_sets = list_length(aggnode->groupingSets);
3493 :
3494 39894 : if (num_sets)
3495 : {
3496 910 : phasedata->gset_lengths = palloc(num_sets * sizeof(int));
3497 910 : phasedata->grouped_cols = palloc(num_sets * sizeof(Bitmapset *));
3498 :
3499 910 : i = 0;
3500 2732 : foreach(l, aggnode->groupingSets)
3501 : {
3502 1822 : int current_length = list_length(lfirst(l));
3503 1822 : Bitmapset *cols = NULL;
3504 :
3505 : /* planner forces this to be correct */
3506 3594 : for (j = 0; j < current_length; ++j)
3507 1772 : cols = bms_add_member(cols, aggnode->grpColIdx[j]);
3508 :
3509 1822 : phasedata->grouped_cols[i] = cols;
3510 1822 : phasedata->gset_lengths[i] = current_length;
3511 :
3512 1822 : ++i;
3513 : }
3514 :
3515 910 : all_grouped_cols = bms_add_members(all_grouped_cols,
3516 910 : phasedata->grouped_cols[0]);
3517 : }
3518 : else
3519 : {
3520 : Assert(phaseidx == 0);
3521 :
3522 38984 : phasedata->gset_lengths = NULL;
3523 38984 : phasedata->grouped_cols = NULL;
3524 : }
3525 :
3526 : /*
3527 : * If we are grouping, precompute fmgr lookup data for inner loop.
3528 : */
3529 39894 : if (aggnode->aggstrategy == AGG_SORTED)
3530 : {
3531 : /*
3532 : * Build a separate function for each subset of columns that
3533 : * need to be compared.
3534 : */
3535 2270 : phasedata->eqfunctions =
3536 2270 : (ExprState **) palloc0(aggnode->numCols * sizeof(ExprState *));
3537 :
3538 : /* for each grouping set */
3539 3814 : for (int k = 0; k < phasedata->numsets; k++)
3540 : {
3541 1544 : int length = phasedata->gset_lengths[k];
3542 :
3543 : /* nothing to do for empty grouping set */
3544 1544 : if (length == 0)
3545 326 : continue;
3546 :
3547 : /* if we already had one of this length, it'll do */
3548 1218 : if (phasedata->eqfunctions[length - 1] != NULL)
3549 138 : continue;
3550 :
3551 1080 : phasedata->eqfunctions[length - 1] =
3552 1080 : execTuplesMatchPrepare(scanDesc,
3553 : length,
3554 1080 : aggnode->grpColIdx,
3555 1080 : aggnode->grpOperators,
3556 1080 : aggnode->grpCollations,
3557 : (PlanState *) aggstate);
3558 : }
3559 :
3560 : /* and for all grouped columns, unless already computed */
3561 2270 : if (aggnode->numCols > 0 &&
3562 2176 : phasedata->eqfunctions[aggnode->numCols - 1] == NULL)
3563 : {
3564 1472 : phasedata->eqfunctions[aggnode->numCols - 1] =
3565 1472 : execTuplesMatchPrepare(scanDesc,
3566 : aggnode->numCols,
3567 1472 : aggnode->grpColIdx,
3568 1472 : aggnode->grpOperators,
3569 1472 : aggnode->grpCollations,
3570 : (PlanState *) aggstate);
3571 : }
3572 : }
3573 :
3574 39894 : phasedata->aggnode = aggnode;
3575 39894 : phasedata->aggstrategy = aggnode->aggstrategy;
3576 39894 : phasedata->sortnode = sortnode;
3577 : }
3578 : }
3579 :
3580 : /*
3581 : * Convert all_grouped_cols to a descending-order list.
3582 : */
3583 45100 : i = -1;
3584 54898 : while ((i = bms_next_member(all_grouped_cols, i)) >= 0)
3585 9798 : aggstate->all_grouped_cols = lcons_int(i, aggstate->all_grouped_cols);
3586 :
3587 : /*
3588 : * Set up aggregate-result storage in the output expr context, and also
3589 : * allocate my private per-agg working storage
3590 : */
3591 45100 : econtext = aggstate->ss.ps.ps_ExprContext;
3592 45100 : econtext->ecxt_aggvalues = (Datum *) palloc0(sizeof(Datum) * numaggs);
3593 45100 : econtext->ecxt_aggnulls = (bool *) palloc0(sizeof(bool) * numaggs);
3594 :
3595 45100 : peraggs = (AggStatePerAgg) palloc0(sizeof(AggStatePerAggData) * numaggs);
3596 45100 : pertransstates = (AggStatePerTrans) palloc0(sizeof(AggStatePerTransData) * numtrans);
3597 :
3598 45100 : aggstate->peragg = peraggs;
3599 45100 : aggstate->pertrans = pertransstates;
3600 :
3601 :
3602 45100 : aggstate->all_pergroups =
3603 45100 : (AggStatePerGroup *) palloc0(sizeof(AggStatePerGroup)
3604 45100 : * (numGroupingSets + numHashes));
3605 45100 : pergroups = aggstate->all_pergroups;
3606 :
3607 45100 : if (node->aggstrategy != AGG_HASHED)
3608 : {
3609 80172 : for (i = 0; i < numGroupingSets; i++)
3610 : {
3611 40506 : pergroups[i] = (AggStatePerGroup) palloc0(sizeof(AggStatePerGroupData)
3612 : * numaggs);
3613 : }
3614 :
3615 39666 : aggstate->pergroups = pergroups;
3616 39666 : pergroups += numGroupingSets;
3617 : }
3618 :
3619 : /*
3620 : * Hashing can only appear in the initial phase.
3621 : */
3622 45100 : if (use_hashing)
3623 : {
3624 5666 : Plan *outerplan = outerPlan(node);
3625 5666 : uint64 totalGroups = 0;
3626 :
3627 5666 : aggstate->hash_metacxt = AllocSetContextCreate(aggstate->ss.ps.state->es_query_cxt,
3628 : "HashAgg meta context",
3629 : ALLOCSET_DEFAULT_SIZES);
3630 5666 : aggstate->hash_spill_rslot = ExecInitExtraTupleSlot(estate, scanDesc,
3631 : &TTSOpsMinimalTuple);
3632 5666 : aggstate->hash_spill_wslot = ExecInitExtraTupleSlot(estate, scanDesc,
3633 : &TTSOpsVirtual);
3634 :
3635 : /* this is an array of pointers, not structures */
3636 5666 : aggstate->hash_pergroup = pergroups;
3637 :
3638 11332 : aggstate->hashentrysize = hash_agg_entry_size(aggstate->numtrans,
3639 5666 : outerplan->plan_width,
3640 : node->transitionSpace);
3641 :
3642 : /*
3643 : * Consider all of the grouping sets together when setting the limits
3644 : * and estimating the number of partitions. This can be inaccurate
3645 : * when there is more than one grouping set, but should still be
3646 : * reasonable.
3647 : */
3648 11798 : for (int k = 0; k < aggstate->num_hashes; k++)
3649 6132 : totalGroups += aggstate->perhash[k].aggnode->numGroups;
3650 :
3651 5666 : hash_agg_set_limits(aggstate->hashentrysize, totalGroups, 0,
3652 : &aggstate->hash_mem_limit,
3653 : &aggstate->hash_ngroups_limit,
3654 : &aggstate->hash_planned_partitions);
3655 5666 : find_hash_columns(aggstate);
3656 :
3657 : /* Skip massive memory allocation if we are just doing EXPLAIN */
3658 5666 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
3659 4336 : build_hash_tables(aggstate);
3660 :
3661 5666 : aggstate->table_filled = false;
3662 :
3663 : /* Initialize this to 1, meaning nothing spilled, yet */
3664 5666 : aggstate->hash_batches_used = 1;
3665 : }
3666 :
3667 : /*
3668 : * Initialize current phase-dependent values to initial phase. The initial
3669 : * phase is 1 (first sort pass) for all strategies that use sorting (if
3670 : * hashing is being done too, then phase 0 is processed last); but if only
3671 : * hashing is being done, then phase 0 is all there is.
3672 : */
3673 45100 : if (node->aggstrategy == AGG_HASHED)
3674 : {
3675 5434 : aggstate->current_phase = 0;
3676 5434 : initialize_phase(aggstate, 0);
3677 5434 : select_current_set(aggstate, 0, true);
3678 : }
3679 : else
3680 : {
3681 39666 : aggstate->current_phase = 1;
3682 39666 : initialize_phase(aggstate, 1);
3683 39666 : select_current_set(aggstate, 0, false);
3684 : }
3685 :
3686 : /*
3687 : * Perform lookups of aggregate function info, and initialize the
3688 : * unchanging fields of the per-agg and per-trans data.
3689 : */
3690 95664 : foreach(l, aggstate->aggs)
3691 : {
3692 50570 : Aggref *aggref = lfirst(l);
3693 : AggStatePerAgg peragg;
3694 : AggStatePerTrans pertrans;
3695 : Oid aggTransFnInputTypes[FUNC_MAX_ARGS];
3696 : int numAggTransFnArgs;
3697 : int numDirectArgs;
3698 : HeapTuple aggTuple;
3699 : Form_pg_aggregate aggform;
3700 : AclResult aclresult;
3701 : Oid finalfn_oid;
3702 : Oid serialfn_oid,
3703 : deserialfn_oid;
3704 : Oid aggOwner;
3705 : Expr *finalfnexpr;
3706 : Oid aggtranstype;
3707 :
3708 : /* Planner should have assigned aggregate to correct level */
3709 : Assert(aggref->agglevelsup == 0);
3710 : /* ... and the split mode should match */
3711 : Assert(aggref->aggsplit == aggstate->aggsplit);
3712 :
3713 50570 : peragg = &peraggs[aggref->aggno];
3714 :
3715 : /* Check if we initialized the state for this aggregate already. */
3716 50570 : if (peragg->aggref != NULL)
3717 472 : continue;
3718 :
3719 50098 : peragg->aggref = aggref;
3720 50098 : peragg->transno = aggref->aggtransno;
3721 :
3722 : /* Fetch the pg_aggregate row */
3723 50098 : aggTuple = SearchSysCache1(AGGFNOID,
3724 : ObjectIdGetDatum(aggref->aggfnoid));
3725 50098 : if (!HeapTupleIsValid(aggTuple))
3726 0 : elog(ERROR, "cache lookup failed for aggregate %u",
3727 : aggref->aggfnoid);
3728 50098 : aggform = (Form_pg_aggregate) GETSTRUCT(aggTuple);
3729 :
3730 : /* Check permission to call aggregate function */
3731 50098 : aclresult = object_aclcheck(ProcedureRelationId, aggref->aggfnoid, GetUserId(),
3732 : ACL_EXECUTE);
3733 50098 : if (aclresult != ACLCHECK_OK)
3734 6 : aclcheck_error(aclresult, OBJECT_AGGREGATE,
3735 6 : get_func_name(aggref->aggfnoid));
3736 50092 : InvokeFunctionExecuteHook(aggref->aggfnoid);
3737 :
3738 : /* planner recorded transition state type in the Aggref itself */
3739 50092 : aggtranstype = aggref->aggtranstype;
3740 : Assert(OidIsValid(aggtranstype));
3741 :
3742 : /* Final function only required if we're finalizing the aggregates */
3743 50092 : if (DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit))
3744 4250 : peragg->finalfn_oid = finalfn_oid = InvalidOid;
3745 : else
3746 45842 : peragg->finalfn_oid = finalfn_oid = aggform->aggfinalfn;
3747 :
3748 50092 : serialfn_oid = InvalidOid;
3749 50092 : deserialfn_oid = InvalidOid;
3750 :
3751 : /*
3752 : * Check if serialization/deserialization is required. We only do it
3753 : * for aggregates that have transtype INTERNAL.
3754 : */
3755 50092 : if (aggtranstype == INTERNALOID)
3756 : {
3757 : /*
3758 : * The planner should only have generated a serialize agg node if
3759 : * every aggregate with an INTERNAL state has a serialization
3760 : * function. Verify that.
3761 : */
3762 21886 : if (DO_AGGSPLIT_SERIALIZE(aggstate->aggsplit))
3763 : {
3764 : /* serialization only valid when not running finalfn */
3765 : Assert(DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
3766 :
3767 336 : if (!OidIsValid(aggform->aggserialfn))
3768 0 : elog(ERROR, "serialfunc not provided for serialization aggregation");
3769 336 : serialfn_oid = aggform->aggserialfn;
3770 : }
3771 :
3772 : /* Likewise for deserialization functions */
3773 21886 : if (DO_AGGSPLIT_DESERIALIZE(aggstate->aggsplit))
3774 : {
3775 : /* deserialization only valid when combining states */
3776 : Assert(DO_AGGSPLIT_COMBINE(aggstate->aggsplit));
3777 :
3778 120 : if (!OidIsValid(aggform->aggdeserialfn))
3779 0 : elog(ERROR, "deserialfunc not provided for deserialization aggregation");
3780 120 : deserialfn_oid = aggform->aggdeserialfn;
3781 : }
3782 : }
3783 :
3784 : /* Check that aggregate owner has permission to call component fns */
3785 : {
3786 : HeapTuple procTuple;
3787 :
3788 50092 : procTuple = SearchSysCache1(PROCOID,
3789 : ObjectIdGetDatum(aggref->aggfnoid));
3790 50092 : if (!HeapTupleIsValid(procTuple))
3791 0 : elog(ERROR, "cache lookup failed for function %u",
3792 : aggref->aggfnoid);
3793 50092 : aggOwner = ((Form_pg_proc) GETSTRUCT(procTuple))->proowner;
3794 50092 : ReleaseSysCache(procTuple);
3795 :
3796 50092 : if (OidIsValid(finalfn_oid))
3797 : {
3798 23328 : aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner,
3799 : ACL_EXECUTE);
3800 23328 : if (aclresult != ACLCHECK_OK)
3801 0 : aclcheck_error(aclresult, OBJECT_FUNCTION,
3802 0 : get_func_name(finalfn_oid));
3803 23328 : InvokeFunctionExecuteHook(finalfn_oid);
3804 : }
3805 50092 : if (OidIsValid(serialfn_oid))
3806 : {
3807 336 : aclresult = object_aclcheck(ProcedureRelationId, serialfn_oid, aggOwner,
3808 : ACL_EXECUTE);
3809 336 : if (aclresult != ACLCHECK_OK)
3810 0 : aclcheck_error(aclresult, OBJECT_FUNCTION,
3811 0 : get_func_name(serialfn_oid));
3812 336 : InvokeFunctionExecuteHook(serialfn_oid);
3813 : }
3814 50092 : if (OidIsValid(deserialfn_oid))
3815 : {
3816 120 : aclresult = object_aclcheck(ProcedureRelationId, deserialfn_oid, aggOwner,
3817 : ACL_EXECUTE);
3818 120 : if (aclresult != ACLCHECK_OK)
3819 0 : aclcheck_error(aclresult, OBJECT_FUNCTION,
3820 0 : get_func_name(deserialfn_oid));
3821 120 : InvokeFunctionExecuteHook(deserialfn_oid);
3822 : }
3823 : }
3824 :
3825 : /*
3826 : * Get actual datatypes of the (nominal) aggregate inputs. These
3827 : * could be different from the agg's declared input types, when the
3828 : * agg accepts ANY or a polymorphic type.
3829 : */
3830 50092 : numAggTransFnArgs = get_aggregate_argtypes(aggref,
3831 : aggTransFnInputTypes);
3832 :
3833 : /* Count the "direct" arguments, if any */
3834 50092 : numDirectArgs = list_length(aggref->aggdirectargs);
3835 :
3836 : /* Detect how many arguments to pass to the finalfn */
3837 50092 : if (aggform->aggfinalextra)
3838 15688 : peragg->numFinalArgs = numAggTransFnArgs + 1;
3839 : else
3840 34404 : peragg->numFinalArgs = numDirectArgs + 1;
3841 :
3842 : /* Initialize any direct-argument expressions */
3843 50092 : peragg->aggdirectargs = ExecInitExprList(aggref->aggdirectargs,
3844 : (PlanState *) aggstate);
3845 :
3846 : /*
3847 : * build expression trees using actual argument & result types for the
3848 : * finalfn, if it exists and is required.
3849 : */
3850 50092 : if (OidIsValid(finalfn_oid))
3851 : {
3852 23328 : build_aggregate_finalfn_expr(aggTransFnInputTypes,
3853 : peragg->numFinalArgs,
3854 : aggtranstype,
3855 : aggref->aggtype,
3856 : aggref->inputcollid,
3857 : finalfn_oid,
3858 : &finalfnexpr);
3859 23328 : fmgr_info(finalfn_oid, &peragg->finalfn);
3860 23328 : fmgr_info_set_expr((Node *) finalfnexpr, &peragg->finalfn);
3861 : }
3862 :
3863 : /* get info about the output value's datatype */
3864 50092 : get_typlenbyval(aggref->aggtype,
3865 : &peragg->resulttypeLen,
3866 : &peragg->resulttypeByVal);
3867 :
3868 : /*
3869 : * Build working state for invoking the transition function, if we
3870 : * haven't done it already.
3871 : */
3872 50092 : pertrans = &pertransstates[aggref->aggtransno];
3873 50092 : if (pertrans->aggref == NULL)
3874 : {
3875 : Datum textInitVal;
3876 : Datum initValue;
3877 : bool initValueIsNull;
3878 : Oid transfn_oid;
3879 :
3880 : /*
3881 : * If this aggregation is performing state combines, then instead
3882 : * of using the transition function, we'll use the combine
3883 : * function.
3884 : */
3885 49834 : if (DO_AGGSPLIT_COMBINE(aggstate->aggsplit))
3886 : {
3887 1354 : transfn_oid = aggform->aggcombinefn;
3888 :
3889 : /* If not set then the planner messed up */
3890 1354 : if (!OidIsValid(transfn_oid))
3891 0 : elog(ERROR, "combinefn not set for aggregate function");
3892 : }
3893 : else
3894 48480 : transfn_oid = aggform->aggtransfn;
3895 :
3896 49834 : aclresult = object_aclcheck(ProcedureRelationId, transfn_oid, aggOwner, ACL_EXECUTE);
3897 49834 : if (aclresult != ACLCHECK_OK)
3898 0 : aclcheck_error(aclresult, OBJECT_FUNCTION,
3899 0 : get_func_name(transfn_oid));
3900 49834 : InvokeFunctionExecuteHook(transfn_oid);
3901 :
3902 : /*
3903 : * initval is potentially null, so don't try to access it as a
3904 : * struct field. Must do it the hard way with SysCacheGetAttr.
3905 : */
3906 49834 : textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
3907 : Anum_pg_aggregate_agginitval,
3908 : &initValueIsNull);
3909 49834 : if (initValueIsNull)
3910 30064 : initValue = (Datum) 0;
3911 : else
3912 19770 : initValue = GetAggInitVal(textInitVal, aggtranstype);
3913 :
3914 49834 : if (DO_AGGSPLIT_COMBINE(aggstate->aggsplit))
3915 : {
3916 1354 : Oid combineFnInputTypes[] = {aggtranstype,
3917 : aggtranstype};
3918 :
3919 : /*
3920 : * When combining there's only one input, the to-be-combined
3921 : * transition value. The transition value is not counted
3922 : * here.
3923 : */
3924 1354 : pertrans->numTransInputs = 1;
3925 :
3926 : /* aggcombinefn always has two arguments of aggtranstype */
3927 1354 : build_pertrans_for_aggref(pertrans, aggstate, estate,
3928 : aggref, transfn_oid, aggtranstype,
3929 : serialfn_oid, deserialfn_oid,
3930 : initValue, initValueIsNull,
3931 : combineFnInputTypes, 2);
3932 :
3933 : /*
3934 : * Ensure that a combine function to combine INTERNAL states
3935 : * is not strict. This should have been checked during CREATE
3936 : * AGGREGATE, but the strict property could have been changed
3937 : * since then.
3938 : */
3939 1354 : if (pertrans->transfn.fn_strict && aggtranstype == INTERNALOID)
3940 0 : ereport(ERROR,
3941 : (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
3942 : errmsg("combine function with transition type %s must not be declared STRICT",
3943 : format_type_be(aggtranstype))));
3944 : }
3945 : else
3946 : {
3947 : /* Detect how many arguments to pass to the transfn */
3948 48480 : if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
3949 252 : pertrans->numTransInputs = list_length(aggref->args);
3950 : else
3951 48228 : pertrans->numTransInputs = numAggTransFnArgs;
3952 :
3953 48480 : build_pertrans_for_aggref(pertrans, aggstate, estate,
3954 : aggref, transfn_oid, aggtranstype,
3955 : serialfn_oid, deserialfn_oid,
3956 : initValue, initValueIsNull,
3957 : aggTransFnInputTypes,
3958 : numAggTransFnArgs);
3959 :
3960 : /*
3961 : * If the transfn is strict and the initval is NULL, make sure
3962 : * input type and transtype are the same (or at least
3963 : * binary-compatible), so that it's OK to use the first
3964 : * aggregated input value as the initial transValue. This
3965 : * should have been checked at agg definition time, but we
3966 : * must check again in case the transfn's strictness property
3967 : * has been changed.
3968 : */
3969 48480 : if (pertrans->transfn.fn_strict && pertrans->initValueIsNull)
3970 : {
3971 4962 : if (numAggTransFnArgs <= numDirectArgs ||
3972 4962 : !IsBinaryCoercible(aggTransFnInputTypes[numDirectArgs],
3973 : aggtranstype))
3974 0 : ereport(ERROR,
3975 : (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
3976 : errmsg("aggregate %u needs to have compatible input type and transition type",
3977 : aggref->aggfnoid)));
3978 : }
3979 : }
3980 : }
3981 : else
3982 258 : pertrans->aggshared = true;
3983 50092 : ReleaseSysCache(aggTuple);
3984 : }
3985 :
3986 : /*
3987 : * Last, check whether any more aggregates got added onto the node while
3988 : * we processed the expressions for the aggregate arguments (including not
3989 : * only the regular arguments and FILTER expressions handled immediately
3990 : * above, but any direct arguments we might've handled earlier). If so,
3991 : * we have nested aggregate functions, which is semantically nonsensical,
3992 : * so complain. (This should have been caught by the parser, so we don't
3993 : * need to work hard on a helpful error message; but we defend against it
3994 : * here anyway, just to be sure.)
3995 : */
3996 45094 : if (numaggrefs != list_length(aggstate->aggs))
3997 0 : ereport(ERROR,
3998 : (errcode(ERRCODE_GROUPING_ERROR),
3999 : errmsg("aggregate function calls cannot be nested")));
4000 :
4001 : /*
4002 : * Build expressions doing all the transition work at once. We build a
4003 : * different one for each phase, as the number of transition function
4004 : * invocation can differ between phases. Note this'll work both for
4005 : * transition and combination functions (although there'll only be one
4006 : * phase in the latter case).
4007 : */
4008 130076 : for (phaseidx = 0; phaseidx < aggstate->numphases; phaseidx++)
4009 : {
4010 84982 : AggStatePerPhase phase = &aggstate->phases[phaseidx];
4011 84982 : bool dohash = false;
4012 84982 : bool dosort = false;
4013 :
4014 : /* phase 0 doesn't necessarily exist */
4015 84982 : if (!phase->aggnode)
4016 39428 : continue;
4017 :
4018 45554 : if (aggstate->aggstrategy == AGG_MIXED && phaseidx == 1)
4019 : {
4020 : /*
4021 : * Phase one, and only phase one, in a mixed agg performs both
4022 : * sorting and aggregation.
4023 : */
4024 232 : dohash = true;
4025 232 : dosort = true;
4026 : }
4027 45322 : else if (aggstate->aggstrategy == AGG_MIXED && phaseidx == 0)
4028 : {
4029 : /*
4030 : * No need to compute a transition function for an AGG_MIXED phase
4031 : * 0 - the contents of the hashtables will have been computed
4032 : * during phase 1.
4033 : */
4034 232 : continue;
4035 : }
4036 45090 : else if (phase->aggstrategy == AGG_PLAIN ||
4037 7642 : phase->aggstrategy == AGG_SORTED)
4038 : {
4039 39656 : dohash = false;
4040 39656 : dosort = true;
4041 : }
4042 5434 : else if (phase->aggstrategy == AGG_HASHED)
4043 : {
4044 5434 : dohash = true;
4045 5434 : dosort = false;
4046 : }
4047 : else
4048 : Assert(false);
4049 :
4050 45322 : phase->evaltrans = ExecBuildAggTrans(aggstate, phase, dosort, dohash,
4051 : false);
4052 :
4053 : /* cache compiled expression for outer slot without NULL check */
4054 45322 : phase->evaltrans_cache[0][0] = phase->evaltrans;
4055 : }
4056 :
4057 45094 : return aggstate;
4058 : }
4059 :
4060 : /*
4061 : * Build the state needed to calculate a state value for an aggregate.
4062 : *
4063 : * This initializes all the fields in 'pertrans'. 'aggref' is the aggregate
4064 : * to initialize the state for. 'transfn_oid', 'aggtranstype', and the rest
4065 : * of the arguments could be calculated from 'aggref', but the caller has
4066 : * calculated them already, so might as well pass them.
4067 : *
4068 : * 'transfn_oid' may be either the Oid of the aggtransfn or the aggcombinefn.
4069 : */
4070 : static void
4071 49834 : build_pertrans_for_aggref(AggStatePerTrans pertrans,
4072 : AggState *aggstate, EState *estate,
4073 : Aggref *aggref,
4074 : Oid transfn_oid, Oid aggtranstype,
4075 : Oid aggserialfn, Oid aggdeserialfn,
4076 : Datum initValue, bool initValueIsNull,
4077 : Oid *inputTypes, int numArguments)
4078 : {
4079 49834 : int numGroupingSets = Max(aggstate->maxsets, 1);
4080 : Expr *transfnexpr;
4081 : int numTransArgs;
4082 49834 : Expr *serialfnexpr = NULL;
4083 49834 : Expr *deserialfnexpr = NULL;
4084 : ListCell *lc;
4085 : int numInputs;
4086 : int numDirectArgs;
4087 : List *sortlist;
4088 : int numSortCols;
4089 : int numDistinctCols;
4090 : int i;
4091 :
4092 : /* Begin filling in the pertrans data */
4093 49834 : pertrans->aggref = aggref;
4094 49834 : pertrans->aggshared = false;
4095 49834 : pertrans->aggCollation = aggref->inputcollid;
4096 49834 : pertrans->transfn_oid = transfn_oid;
4097 49834 : pertrans->serialfn_oid = aggserialfn;
4098 49834 : pertrans->deserialfn_oid = aggdeserialfn;
4099 49834 : pertrans->initValue = initValue;
4100 49834 : pertrans->initValueIsNull = initValueIsNull;
4101 :
4102 : /* Count the "direct" arguments, if any */
4103 49834 : numDirectArgs = list_length(aggref->aggdirectargs);
4104 :
4105 : /* Count the number of aggregated input columns */
4106 49834 : pertrans->numInputs = numInputs = list_length(aggref->args);
4107 :
4108 49834 : pertrans->aggtranstype = aggtranstype;
4109 :
4110 : /* account for the current transition state */
4111 49834 : numTransArgs = pertrans->numTransInputs + 1;
4112 :
4113 : /*
4114 : * Set up infrastructure for calling the transfn. Note that invtransfn is
4115 : * not needed here.
4116 : */
4117 49834 : build_aggregate_transfn_expr(inputTypes,
4118 : numArguments,
4119 : numDirectArgs,
4120 49834 : aggref->aggvariadic,
4121 : aggtranstype,
4122 : aggref->inputcollid,
4123 : transfn_oid,
4124 : InvalidOid,
4125 : &transfnexpr,
4126 : NULL);
4127 :
4128 49834 : fmgr_info(transfn_oid, &pertrans->transfn);
4129 49834 : fmgr_info_set_expr((Node *) transfnexpr, &pertrans->transfn);
4130 :
4131 49834 : pertrans->transfn_fcinfo =
4132 49834 : (FunctionCallInfo) palloc(SizeForFunctionCallInfo(numTransArgs));
4133 49834 : InitFunctionCallInfoData(*pertrans->transfn_fcinfo,
4134 : &pertrans->transfn,
4135 : numTransArgs,
4136 : pertrans->aggCollation,
4137 : (Node *) aggstate, NULL);
4138 :
4139 : /* get info about the state value's datatype */
4140 49834 : get_typlenbyval(aggtranstype,
4141 : &pertrans->transtypeLen,
4142 : &pertrans->transtypeByVal);
4143 :
4144 49834 : if (OidIsValid(aggserialfn))
4145 : {
4146 336 : build_aggregate_serialfn_expr(aggserialfn,
4147 : &serialfnexpr);
4148 336 : fmgr_info(aggserialfn, &pertrans->serialfn);
4149 336 : fmgr_info_set_expr((Node *) serialfnexpr, &pertrans->serialfn);
4150 :
4151 336 : pertrans->serialfn_fcinfo =
4152 336 : (FunctionCallInfo) palloc(SizeForFunctionCallInfo(1));
4153 336 : InitFunctionCallInfoData(*pertrans->serialfn_fcinfo,
4154 : &pertrans->serialfn,
4155 : 1,
4156 : InvalidOid,
4157 : (Node *) aggstate, NULL);
4158 : }
4159 :
4160 49834 : if (OidIsValid(aggdeserialfn))
4161 : {
4162 120 : build_aggregate_deserialfn_expr(aggdeserialfn,
4163 : &deserialfnexpr);
4164 120 : fmgr_info(aggdeserialfn, &pertrans->deserialfn);
4165 120 : fmgr_info_set_expr((Node *) deserialfnexpr, &pertrans->deserialfn);
4166 :
4167 120 : pertrans->deserialfn_fcinfo =
4168 120 : (FunctionCallInfo) palloc(SizeForFunctionCallInfo(2));
4169 120 : InitFunctionCallInfoData(*pertrans->deserialfn_fcinfo,
4170 : &pertrans->deserialfn,
4171 : 2,
4172 : InvalidOid,
4173 : (Node *) aggstate, NULL);
4174 : }
4175 :
4176 : /*
4177 : * If we're doing either DISTINCT or ORDER BY for a plain agg, then we
4178 : * have a list of SortGroupClause nodes; fish out the data in them and
4179 : * stick them into arrays. We ignore ORDER BY for an ordered-set agg,
4180 : * however; the agg's transfn and finalfn are responsible for that.
4181 : *
4182 : * When the planner has set the aggpresorted flag, the input to the
4183 : * aggregate is already correctly sorted. For ORDER BY aggregates we can
4184 : * simply treat these as normal aggregates. For presorted DISTINCT
4185 : * aggregates an extra step must be added to remove duplicate consecutive
4186 : * inputs.
4187 : *
4188 : * Note that by construction, if there is a DISTINCT clause then the ORDER
4189 : * BY clause is a prefix of it (see transformDistinctClause).
4190 : */
4191 49834 : if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
4192 : {
4193 252 : sortlist = NIL;
4194 252 : numSortCols = numDistinctCols = 0;
4195 252 : pertrans->aggsortrequired = false;
4196 : }
4197 49582 : else if (aggref->aggpresorted && aggref->aggdistinct == NIL)
4198 : {
4199 1588 : sortlist = NIL;
4200 1588 : numSortCols = numDistinctCols = 0;
4201 1588 : pertrans->aggsortrequired = false;
4202 : }
4203 47994 : else if (aggref->aggdistinct)
4204 : {
4205 570 : sortlist = aggref->aggdistinct;
4206 570 : numSortCols = numDistinctCols = list_length(sortlist);
4207 : Assert(numSortCols >= list_length(aggref->aggorder));
4208 570 : pertrans->aggsortrequired = !aggref->aggpresorted;
4209 : }
4210 : else
4211 : {
4212 47424 : sortlist = aggref->aggorder;
4213 47424 : numSortCols = list_length(sortlist);
4214 47424 : numDistinctCols = 0;
4215 47424 : pertrans->aggsortrequired = (numSortCols > 0);
4216 : }
4217 :
4218 49834 : pertrans->numSortCols = numSortCols;
4219 49834 : pertrans->numDistinctCols = numDistinctCols;
4220 :
4221 : /*
4222 : * If we have either sorting or filtering to do, create a tupledesc and
4223 : * slot corresponding to the aggregated inputs (including sort
4224 : * expressions) of the agg.
4225 : */
4226 49834 : if (numSortCols > 0 || aggref->aggfilter)
4227 : {
4228 1404 : pertrans->sortdesc = ExecTypeFromTL(aggref->args);
4229 1404 : pertrans->sortslot =
4230 1404 : ExecInitExtraTupleSlot(estate, pertrans->sortdesc,
4231 : &TTSOpsMinimalTuple);
4232 : }
4233 :
4234 49834 : if (numSortCols > 0)
4235 : {
4236 : /*
4237 : * We don't implement DISTINCT or ORDER BY aggs in the HASHED case
4238 : * (yet)
4239 : */
4240 : Assert(aggstate->aggstrategy != AGG_HASHED && aggstate->aggstrategy != AGG_MIXED);
4241 :
4242 : /* ORDER BY aggregates are not supported with partial aggregation */
4243 : Assert(!DO_AGGSPLIT_COMBINE(aggstate->aggsplit));
4244 :
4245 : /* If we have only one input, we need its len/byval info. */
4246 696 : if (numInputs == 1)
4247 : {
4248 570 : get_typlenbyval(inputTypes[numDirectArgs],
4249 : &pertrans->inputtypeLen,
4250 : &pertrans->inputtypeByVal);
4251 : }
4252 126 : else if (numDistinctCols > 0)
4253 : {
4254 : /* we will need an extra slot to store prior values */
4255 96 : pertrans->uniqslot =
4256 96 : ExecInitExtraTupleSlot(estate, pertrans->sortdesc,
4257 : &TTSOpsMinimalTuple);
4258 : }
4259 :
4260 : /* Extract the sort information for use later */
4261 696 : pertrans->sortColIdx =
4262 696 : (AttrNumber *) palloc(numSortCols * sizeof(AttrNumber));
4263 696 : pertrans->sortOperators =
4264 696 : (Oid *) palloc(numSortCols * sizeof(Oid));
4265 696 : pertrans->sortCollations =
4266 696 : (Oid *) palloc(numSortCols * sizeof(Oid));
4267 696 : pertrans->sortNullsFirst =
4268 696 : (bool *) palloc(numSortCols * sizeof(bool));
4269 :
4270 696 : i = 0;
4271 1578 : foreach(lc, sortlist)
4272 : {
4273 882 : SortGroupClause *sortcl = (SortGroupClause *) lfirst(lc);
4274 882 : TargetEntry *tle = get_sortgroupclause_tle(sortcl, aggref->args);
4275 :
4276 : /* the parser should have made sure of this */
4277 : Assert(OidIsValid(sortcl->sortop));
4278 :
4279 882 : pertrans->sortColIdx[i] = tle->resno;
4280 882 : pertrans->sortOperators[i] = sortcl->sortop;
4281 882 : pertrans->sortCollations[i] = exprCollation((Node *) tle->expr);
4282 882 : pertrans->sortNullsFirst[i] = sortcl->nulls_first;
4283 882 : i++;
4284 : }
4285 : Assert(i == numSortCols);
4286 : }
4287 :
4288 49834 : if (aggref->aggdistinct)
4289 : {
4290 : Oid *ops;
4291 :
4292 : Assert(numArguments > 0);
4293 : Assert(list_length(aggref->aggdistinct) == numDistinctCols);
4294 :
4295 570 : ops = palloc(numDistinctCols * sizeof(Oid));
4296 :
4297 570 : i = 0;
4298 1308 : foreach(lc, aggref->aggdistinct)
4299 738 : ops[i++] = ((SortGroupClause *) lfirst(lc))->eqop;
4300 :
4301 : /* lookup / build the necessary comparators */
4302 570 : if (numDistinctCols == 1)
4303 474 : fmgr_info(get_opcode(ops[0]), &pertrans->equalfnOne);
4304 : else
4305 96 : pertrans->equalfnMulti =
4306 96 : execTuplesMatchPrepare(pertrans->sortdesc,
4307 : numDistinctCols,
4308 96 : pertrans->sortColIdx,
4309 : ops,
4310 96 : pertrans->sortCollations,
4311 : &aggstate->ss.ps);
4312 570 : pfree(ops);
4313 : }
4314 :
4315 49834 : pertrans->sortstates = (Tuplesortstate **)
4316 49834 : palloc0(sizeof(Tuplesortstate *) * numGroupingSets);
4317 49834 : }
4318 :
4319 :
4320 : static Datum
4321 19770 : GetAggInitVal(Datum textInitVal, Oid transtype)
4322 : {
4323 : Oid typinput,
4324 : typioparam;
4325 : char *strInitVal;
4326 : Datum initVal;
4327 :
4328 19770 : getTypeInputInfo(transtype, &typinput, &typioparam);
4329 19770 : strInitVal = TextDatumGetCString(textInitVal);
4330 19770 : initVal = OidInputFunctionCall(typinput, strInitVal,
4331 : typioparam, -1);
4332 19770 : pfree(strInitVal);
4333 19770 : return initVal;
4334 : }
4335 :
4336 : void
4337 44962 : ExecEndAgg(AggState *node)
4338 : {
4339 : PlanState *outerPlan;
4340 : int transno;
4341 44962 : int numGroupingSets = Max(node->maxsets, 1);
4342 : int setno;
4343 :
4344 : /*
4345 : * When ending a parallel worker, copy the statistics gathered by the
4346 : * worker back into shared memory so that it can be picked up by the main
4347 : * process to report in EXPLAIN ANALYZE.
4348 : */
4349 44962 : if (node->shared_info && IsParallelWorker())
4350 : {
4351 : AggregateInstrumentation *si;
4352 :
4353 : Assert(ParallelWorkerNumber <= node->shared_info->num_workers);
4354 168 : si = &node->shared_info->sinstrument[ParallelWorkerNumber];
4355 168 : si->hash_batches_used = node->hash_batches_used;
4356 168 : si->hash_disk_used = node->hash_disk_used;
4357 168 : si->hash_mem_peak = node->hash_mem_peak;
4358 : }
4359 :
4360 : /* Make sure we have closed any open tuplesorts */
4361 :
4362 44962 : if (node->sort_in)
4363 144 : tuplesort_end(node->sort_in);
4364 44962 : if (node->sort_out)
4365 42 : tuplesort_end(node->sort_out);
4366 :
4367 44962 : hashagg_reset_spill_state(node);
4368 :
4369 44962 : if (node->hash_metacxt != NULL)
4370 : {
4371 5658 : MemoryContextDelete(node->hash_metacxt);
4372 5658 : node->hash_metacxt = NULL;
4373 : }
4374 :
4375 94660 : for (transno = 0; transno < node->numtrans; transno++)
4376 : {
4377 49698 : AggStatePerTrans pertrans = &node->pertrans[transno];
4378 :
4379 100410 : for (setno = 0; setno < numGroupingSets; setno++)
4380 : {
4381 50712 : if (pertrans->sortstates[setno])
4382 0 : tuplesort_end(pertrans->sortstates[setno]);
4383 : }
4384 : }
4385 :
4386 : /* And ensure any agg shutdown callbacks have been called */
4387 90764 : for (setno = 0; setno < numGroupingSets; setno++)
4388 45802 : ReScanExprContext(node->aggcontexts[setno]);
4389 44962 : if (node->hashcontext)
4390 5658 : ReScanExprContext(node->hashcontext);
4391 :
4392 44962 : outerPlan = outerPlanState(node);
4393 44962 : ExecEndNode(outerPlan);
4394 44962 : }
4395 :
4396 : void
4397 51056 : ExecReScanAgg(AggState *node)
4398 : {
4399 51056 : ExprContext *econtext = node->ss.ps.ps_ExprContext;
4400 51056 : PlanState *outerPlan = outerPlanState(node);
4401 51056 : Agg *aggnode = (Agg *) node->ss.ps.plan;
4402 : int transno;
4403 51056 : int numGroupingSets = Max(node->maxsets, 1);
4404 : int setno;
4405 :
4406 51056 : node->agg_done = false;
4407 :
4408 51056 : if (node->aggstrategy == AGG_HASHED)
4409 : {
4410 : /*
4411 : * In the hashed case, if we haven't yet built the hash table then we
4412 : * can just return; nothing done yet, so nothing to undo. If subnode's
4413 : * chgParam is not NULL then it will be re-scanned by ExecProcNode,
4414 : * else no reason to re-scan it at all.
4415 : */
4416 12020 : if (!node->table_filled)
4417 132 : return;
4418 :
4419 : /*
4420 : * If we do have the hash table, and it never spilled, and the subplan
4421 : * does not have any parameter changes, and none of our own parameter
4422 : * changes affect input expressions of the aggregated functions, then
4423 : * we can just rescan the existing hash table; no need to build it
4424 : * again.
4425 : */
4426 11888 : if (outerPlan->chgParam == NULL && !node->hash_ever_spilled &&
4427 908 : !bms_overlap(node->ss.ps.chgParam, aggnode->aggParams))
4428 : {
4429 884 : ResetTupleHashIterator(node->perhash[0].hashtable,
4430 : &node->perhash[0].hashiter);
4431 884 : select_current_set(node, 0, true);
4432 884 : return;
4433 : }
4434 : }
4435 :
4436 : /* Make sure we have closed any open tuplesorts */
4437 116184 : for (transno = 0; transno < node->numtrans; transno++)
4438 : {
4439 132324 : for (setno = 0; setno < numGroupingSets; setno++)
4440 : {
4441 66180 : AggStatePerTrans pertrans = &node->pertrans[transno];
4442 :
4443 66180 : if (pertrans->sortstates[setno])
4444 : {
4445 0 : tuplesort_end(pertrans->sortstates[setno]);
4446 0 : pertrans->sortstates[setno] = NULL;
4447 : }
4448 : }
4449 : }
4450 :
4451 : /*
4452 : * We don't need to ReScanExprContext the output tuple context here;
4453 : * ExecReScan already did it. But we do need to reset our per-grouping-set
4454 : * contexts, which may have transvalues stored in them. (We use rescan
4455 : * rather than just reset because transfns may have registered callbacks
4456 : * that need to be run now.) For the AGG_HASHED case, see below.
4457 : */
4458 :
4459 100116 : for (setno = 0; setno < numGroupingSets; setno++)
4460 : {
4461 50076 : ReScanExprContext(node->aggcontexts[setno]);
4462 : }
4463 :
4464 : /* Release first tuple of group, if we have made a copy */
4465 50040 : if (node->grp_firstTuple != NULL)
4466 : {
4467 0 : heap_freetuple(node->grp_firstTuple);
4468 0 : node->grp_firstTuple = NULL;
4469 : }
4470 50040 : ExecClearTuple(node->ss.ss_ScanTupleSlot);
4471 :
4472 : /* Forget current agg values */
4473 116184 : MemSet(econtext->ecxt_aggvalues, 0, sizeof(Datum) * node->numaggs);
4474 50040 : MemSet(econtext->ecxt_aggnulls, 0, sizeof(bool) * node->numaggs);
4475 :
4476 : /*
4477 : * With AGG_HASHED/MIXED, the hash table is allocated in a sub-context of
4478 : * the hashcontext. This used to be an issue, but now, resetting a context
4479 : * automatically deletes sub-contexts too.
4480 : */
4481 50040 : if (node->aggstrategy == AGG_HASHED || node->aggstrategy == AGG_MIXED)
4482 : {
4483 11034 : hashagg_reset_spill_state(node);
4484 :
4485 11034 : node->hash_ever_spilled = false;
4486 11034 : node->hash_spill_mode = false;
4487 11034 : node->hash_ngroups_current = 0;
4488 :
4489 11034 : ReScanExprContext(node->hashcontext);
4490 : /* Rebuild an empty hash table */
4491 11034 : build_hash_tables(node);
4492 11034 : node->table_filled = false;
4493 : /* iterator will be reset when the table is filled */
4494 :
4495 11034 : hashagg_recompile_expressions(node, false, false);
4496 : }
4497 :
4498 50040 : if (node->aggstrategy != AGG_HASHED)
4499 : {
4500 : /*
4501 : * Reset the per-group state (in particular, mark transvalues null)
4502 : */
4503 78108 : for (setno = 0; setno < numGroupingSets; setno++)
4504 : {
4505 171336 : MemSet(node->pergroups[setno], 0,
4506 : sizeof(AggStatePerGroupData) * node->numaggs);
4507 : }
4508 :
4509 : /* reset to phase 1 */
4510 39036 : initialize_phase(node, 1);
4511 :
4512 39036 : node->input_done = false;
4513 39036 : node->projected_set = -1;
4514 : }
4515 :
4516 50040 : if (outerPlan->chgParam == NULL)
4517 188 : ExecReScan(outerPlan);
4518 : }
4519 :
4520 :
4521 : /***********************************************************************
4522 : * API exposed to aggregate functions
4523 : ***********************************************************************/
4524 :
4525 :
4526 : /*
4527 : * AggCheckCallContext - test if a SQL function is being called as an aggregate
4528 : *
4529 : * The transition and/or final functions of an aggregate may want to verify
4530 : * that they are being called as aggregates, rather than as plain SQL
4531 : * functions. They should use this function to do so. The return value
4532 : * is nonzero if being called as an aggregate, or zero if not. (Specific
4533 : * nonzero values are AGG_CONTEXT_AGGREGATE or AGG_CONTEXT_WINDOW, but more
4534 : * values could conceivably appear in future.)
4535 : *
4536 : * If aggcontext isn't NULL, the function also stores at *aggcontext the
4537 : * identity of the memory context that aggregate transition values are being
4538 : * stored in. Note that the same aggregate call site (flinfo) may be called
4539 : * interleaved on different transition values in different contexts, so it's
4540 : * not kosher to cache aggcontext under fn_extra. It is, however, kosher to
4541 : * cache it in the transvalue itself (for internal-type transvalues).
4542 : */
4543 : int
4544 4726464 : AggCheckCallContext(FunctionCallInfo fcinfo, MemoryContext *aggcontext)
4545 : {
4546 4726464 : if (fcinfo->context && IsA(fcinfo->context, AggState))
4547 : {
4548 4715078 : if (aggcontext)
4549 : {
4550 2097450 : AggState *aggstate = ((AggState *) fcinfo->context);
4551 2097450 : ExprContext *cxt = aggstate->curaggcontext;
4552 :
4553 2097450 : *aggcontext = cxt->ecxt_per_tuple_memory;
4554 : }
4555 4715078 : return AGG_CONTEXT_AGGREGATE;
4556 : }
4557 11386 : if (fcinfo->context && IsA(fcinfo->context, WindowAggState))
4558 : {
4559 9512 : if (aggcontext)
4560 710 : *aggcontext = ((WindowAggState *) fcinfo->context)->curaggcontext;
4561 9512 : return AGG_CONTEXT_WINDOW;
4562 : }
4563 :
4564 : /* this is just to prevent "uninitialized variable" warnings */
4565 1874 : if (aggcontext)
4566 1826 : *aggcontext = NULL;
4567 1874 : return 0;
4568 : }
4569 :
4570 : /*
4571 : * AggGetAggref - allow an aggregate support function to get its Aggref
4572 : *
4573 : * If the function is being called as an aggregate support function,
4574 : * return the Aggref node for the aggregate call. Otherwise, return NULL.
4575 : *
4576 : * Aggregates sharing the same inputs and transition functions can get
4577 : * merged into a single transition calculation. If the transition function
4578 : * calls AggGetAggref, it will get some one of the Aggrefs for which it is
4579 : * executing. It must therefore not pay attention to the Aggref fields that
4580 : * relate to the final function, as those are indeterminate. But if a final
4581 : * function calls AggGetAggref, it will get a precise result.
4582 : *
4583 : * Note that if an aggregate is being used as a window function, this will
4584 : * return NULL. We could provide a similar function to return the relevant
4585 : * WindowFunc node in such cases, but it's not needed yet.
4586 : */
4587 : Aggref *
4588 246 : AggGetAggref(FunctionCallInfo fcinfo)
4589 : {
4590 246 : if (fcinfo->context && IsA(fcinfo->context, AggState))
4591 : {
4592 246 : AggState *aggstate = (AggState *) fcinfo->context;
4593 : AggStatePerAgg curperagg;
4594 : AggStatePerTrans curpertrans;
4595 :
4596 : /* check curperagg (valid when in a final function) */
4597 246 : curperagg = aggstate->curperagg;
4598 :
4599 246 : if (curperagg)
4600 0 : return curperagg->aggref;
4601 :
4602 : /* check curpertrans (valid when in a transition function) */
4603 246 : curpertrans = aggstate->curpertrans;
4604 :
4605 246 : if (curpertrans)
4606 246 : return curpertrans->aggref;
4607 : }
4608 0 : return NULL;
4609 : }
4610 :
4611 : /*
4612 : * AggGetTempMemoryContext - fetch short-term memory context for aggregates
4613 : *
4614 : * This is useful in agg final functions; the context returned is one that
4615 : * the final function can safely reset as desired. This isn't useful for
4616 : * transition functions, since the context returned MAY (we don't promise)
4617 : * be the same as the context those are called in.
4618 : *
4619 : * As above, this is currently not useful for aggs called as window functions.
4620 : */
4621 : MemoryContext
4622 0 : AggGetTempMemoryContext(FunctionCallInfo fcinfo)
4623 : {
4624 0 : if (fcinfo->context && IsA(fcinfo->context, AggState))
4625 : {
4626 0 : AggState *aggstate = (AggState *) fcinfo->context;
4627 :
4628 0 : return aggstate->tmpcontext->ecxt_per_tuple_memory;
4629 : }
4630 0 : return NULL;
4631 : }
4632 :
4633 : /*
4634 : * AggStateIsShared - find out whether transition state is shared
4635 : *
4636 : * If the function is being called as an aggregate support function,
4637 : * return true if the aggregate's transition state is shared across
4638 : * multiple aggregates, false if it is not.
4639 : *
4640 : * Returns true if not called as an aggregate support function.
4641 : * This is intended as a conservative answer, ie "no you'd better not
4642 : * scribble on your input". In particular, will return true if the
4643 : * aggregate is being used as a window function, which is a scenario
4644 : * in which changing the transition state is a bad idea. We might
4645 : * want to refine the behavior for the window case in future.
4646 : */
4647 : bool
4648 246 : AggStateIsShared(FunctionCallInfo fcinfo)
4649 : {
4650 246 : if (fcinfo->context && IsA(fcinfo->context, AggState))
4651 : {
4652 246 : AggState *aggstate = (AggState *) fcinfo->context;
4653 : AggStatePerAgg curperagg;
4654 : AggStatePerTrans curpertrans;
4655 :
4656 : /* check curperagg (valid when in a final function) */
4657 246 : curperagg = aggstate->curperagg;
4658 :
4659 246 : if (curperagg)
4660 0 : return aggstate->pertrans[curperagg->transno].aggshared;
4661 :
4662 : /* check curpertrans (valid when in a transition function) */
4663 246 : curpertrans = aggstate->curpertrans;
4664 :
4665 246 : if (curpertrans)
4666 246 : return curpertrans->aggshared;
4667 : }
4668 0 : return true;
4669 : }
4670 :
4671 : /*
4672 : * AggRegisterCallback - register a cleanup callback for an aggregate
4673 : *
4674 : * This is useful for aggs to register shutdown callbacks, which will ensure
4675 : * that non-memory resources are freed. The callback will occur just before
4676 : * the associated aggcontext (as returned by AggCheckCallContext) is reset,
4677 : * either between groups or as a result of rescanning the query. The callback
4678 : * will NOT be called on error paths. The typical use-case is for freeing of
4679 : * tuplestores or tuplesorts maintained in aggcontext, or pins held by slots
4680 : * created by the agg functions. (The callback will not be called until after
4681 : * the result of the finalfn is no longer needed, so it's safe for the finalfn
4682 : * to return data that will be freed by the callback.)
4683 : *
4684 : * As above, this is currently not useful for aggs called as window functions.
4685 : */
4686 : void
4687 660 : AggRegisterCallback(FunctionCallInfo fcinfo,
4688 : ExprContextCallbackFunction func,
4689 : Datum arg)
4690 : {
4691 660 : if (fcinfo->context && IsA(fcinfo->context, AggState))
4692 : {
4693 660 : AggState *aggstate = (AggState *) fcinfo->context;
4694 660 : ExprContext *cxt = aggstate->curaggcontext;
4695 :
4696 660 : RegisterExprContextCallback(cxt, func, arg);
4697 :
4698 660 : return;
4699 : }
4700 0 : elog(ERROR, "aggregate function cannot register a callback in this context");
4701 : }
4702 :
4703 :
4704 : /* ----------------------------------------------------------------
4705 : * Parallel Query Support
4706 : * ----------------------------------------------------------------
4707 : */
4708 :
4709 : /* ----------------------------------------------------------------
4710 : * ExecAggEstimate
4711 : *
4712 : * Estimate space required to propagate aggregate statistics.
4713 : * ----------------------------------------------------------------
4714 : */
4715 : void
4716 554 : ExecAggEstimate(AggState *node, ParallelContext *pcxt)
4717 : {
4718 : Size size;
4719 :
4720 : /* don't need this if not instrumenting or no workers */
4721 554 : if (!node->ss.ps.instrument || pcxt->nworkers == 0)
4722 452 : return;
4723 :
4724 102 : size = mul_size(pcxt->nworkers, sizeof(AggregateInstrumentation));
4725 102 : size = add_size(size, offsetof(SharedAggInfo, sinstrument));
4726 102 : shm_toc_estimate_chunk(&pcxt->estimator, size);
4727 102 : shm_toc_estimate_keys(&pcxt->estimator, 1);
4728 : }
4729 :
4730 : /* ----------------------------------------------------------------
4731 : * ExecAggInitializeDSM
4732 : *
4733 : * Initialize DSM space for aggregate statistics.
4734 : * ----------------------------------------------------------------
4735 : */
4736 : void
4737 554 : ExecAggInitializeDSM(AggState *node, ParallelContext *pcxt)
4738 : {
4739 : Size size;
4740 :
4741 : /* don't need this if not instrumenting or no workers */
4742 554 : if (!node->ss.ps.instrument || pcxt->nworkers == 0)
4743 452 : return;
4744 :
4745 102 : size = offsetof(SharedAggInfo, sinstrument)
4746 102 : + pcxt->nworkers * sizeof(AggregateInstrumentation);
4747 102 : node->shared_info = shm_toc_allocate(pcxt->toc, size);
4748 : /* ensure any unfilled slots will contain zeroes */
4749 102 : memset(node->shared_info, 0, size);
4750 102 : node->shared_info->num_workers = pcxt->nworkers;
4751 102 : shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id,
4752 102 : node->shared_info);
4753 : }
4754 :
4755 : /* ----------------------------------------------------------------
4756 : * ExecAggInitializeWorker
4757 : *
4758 : * Attach worker to DSM space for aggregate statistics.
4759 : * ----------------------------------------------------------------
4760 : */
4761 : void
4762 1548 : ExecAggInitializeWorker(AggState *node, ParallelWorkerContext *pwcxt)
4763 : {
4764 1548 : node->shared_info =
4765 1548 : shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, true);
4766 1548 : }
4767 :
4768 : /* ----------------------------------------------------------------
4769 : * ExecAggRetrieveInstrumentation
4770 : *
4771 : * Transfer aggregate statistics from DSM to private memory.
4772 : * ----------------------------------------------------------------
4773 : */
4774 : void
4775 102 : ExecAggRetrieveInstrumentation(AggState *node)
4776 : {
4777 : Size size;
4778 : SharedAggInfo *si;
4779 :
4780 102 : if (node->shared_info == NULL)
4781 0 : return;
4782 :
4783 102 : size = offsetof(SharedAggInfo, sinstrument)
4784 102 : + node->shared_info->num_workers * sizeof(AggregateInstrumentation);
4785 102 : si = palloc(size);
4786 102 : memcpy(si, node->shared_info, size);
4787 102 : node->shared_info = si;
4788 : }
|