LCOV - code coverage report
Current view: top level - src/backend/executor - nodeAgg.c (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 1420 1500 94.7 %
Date: 2025-12-13 03:17:34 Functions: 57 58 98.3 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * nodeAgg.c
       4             :  *    Routines to handle aggregate nodes.
       5             :  *
       6             :  *    ExecAgg normally evaluates each aggregate in the following steps:
       7             :  *
       8             :  *       transvalue = initcond
       9             :  *       foreach input_tuple do
      10             :  *          transvalue = transfunc(transvalue, input_value(s))
      11             :  *       result = finalfunc(transvalue, direct_argument(s))
      12             :  *
      13             :  *    If a finalfunc is not supplied then the result is just the ending
      14             :  *    value of transvalue.
      15             :  *
      16             :  *    Other behaviors can be selected by the "aggsplit" mode, which exists
      17             :  *    to support partial aggregation.  It is possible to:
      18             :  *    * Skip running the finalfunc, so that the output is always the
      19             :  *    final transvalue state.
      20             :  *    * Substitute the combinefunc for the transfunc, so that transvalue
      21             :  *    states (propagated up from a child partial-aggregation step) are merged
      22             :  *    rather than processing raw input rows.  (The statements below about
      23             :  *    the transfunc apply equally to the combinefunc, when it's selected.)
      24             :  *    * Apply the serializefunc to the output values (this only makes sense
      25             :  *    when skipping the finalfunc, since the serializefunc works on the
      26             :  *    transvalue data type).
      27             :  *    * Apply the deserializefunc to the input values (this only makes sense
      28             :  *    when using the combinefunc, for similar reasons).
      29             :  *    It is the planner's responsibility to connect up Agg nodes using these
      30             :  *    alternate behaviors in a way that makes sense, with partial aggregation
      31             :  *    results being fed to nodes that expect them.
      32             :  *
      33             :  *    If a normal aggregate call specifies DISTINCT or ORDER BY, we sort the
      34             :  *    input tuples and eliminate duplicates (if required) before performing
      35             :  *    the above-depicted process.  (However, we don't do that for ordered-set
      36             :  *    aggregates; their "ORDER BY" inputs are ordinary aggregate arguments
      37             :  *    so far as this module is concerned.)  Note that partial aggregation
      38             :  *    is not supported in these cases, since we couldn't ensure global
      39             :  *    ordering or distinctness of the inputs.
      40             :  *
      41             :  *    If transfunc is marked "strict" in pg_proc and initcond is NULL,
      42             :  *    then the first non-NULL input_value is assigned directly to transvalue,
      43             :  *    and transfunc isn't applied until the second non-NULL input_value.
      44             :  *    The agg's first input type and transtype must be the same in this case!
      45             :  *
      46             :  *    If transfunc is marked "strict" then NULL input_values are skipped,
      47             :  *    keeping the previous transvalue.  If transfunc is not strict then it
      48             :  *    is called for every input tuple and must deal with NULL initcond
      49             :  *    or NULL input_values for itself.
      50             :  *
      51             :  *    If finalfunc is marked "strict" then it is not called when the
      52             :  *    ending transvalue is NULL, instead a NULL result is created
      53             :  *    automatically (this is just the usual handling of strict functions,
      54             :  *    of course).  A non-strict finalfunc can make its own choice of
      55             :  *    what to return for a NULL ending transvalue.
      56             :  *
      57             :  *    Ordered-set aggregates are treated specially in one other way: we
      58             :  *    evaluate any "direct" arguments and pass them to the finalfunc along
      59             :  *    with the transition value.
      60             :  *
      61             :  *    A finalfunc can have additional arguments beyond the transvalue and
      62             :  *    any "direct" arguments, corresponding to the input arguments of the
      63             :  *    aggregate.  These are always just passed as NULL.  Such arguments may be
      64             :  *    needed to allow resolution of a polymorphic aggregate's result type.
      65             :  *
      66             :  *    We compute aggregate input expressions and run the transition functions
      67             :  *    in a temporary econtext (aggstate->tmpcontext).  This is reset at least
      68             :  *    once per input tuple, so when the transvalue datatype is
      69             :  *    pass-by-reference, we have to be careful to copy it into a longer-lived
      70             :  *    memory context, and free the prior value to avoid memory leakage.  We
      71             :  *    store transvalues in another set of econtexts, aggstate->aggcontexts
      72             :  *    (one per grouping set, see below), which are also used for the hashtable
      73             :  *    structures in AGG_HASHED mode.  These econtexts are rescanned, not just
      74             :  *    reset, at group boundaries so that aggregate transition functions can
      75             :  *    register shutdown callbacks via AggRegisterCallback.
      76             :  *
      77             :  *    The node's regular econtext (aggstate->ss.ps.ps_ExprContext) is used to
      78             :  *    run finalize functions and compute the output tuple; this context can be
      79             :  *    reset once per output tuple.
      80             :  *
      81             :  *    The executor's AggState node is passed as the fmgr "context" value in
      82             :  *    all transfunc and finalfunc calls.  It is not recommended that the
      83             :  *    transition functions look at the AggState node directly, but they can
      84             :  *    use AggCheckCallContext() to verify that they are being called by
      85             :  *    nodeAgg.c (and not as ordinary SQL functions).  The main reason a
      86             :  *    transition function might want to know this is so that it can avoid
      87             :  *    palloc'ing a fixed-size pass-by-ref transition value on every call:
      88             :  *    it can instead just scribble on and return its left input.  Ordinarily
      89             :  *    it is completely forbidden for functions to modify pass-by-ref inputs,
      90             :  *    but in the aggregate case we know the left input is either the initial
      91             :  *    transition value or a previous function result, and in either case its
      92             :  *    value need not be preserved.  See int8inc() for an example.  Notice that
      93             :  *    the EEOP_AGG_PLAIN_TRANS step is coded to avoid a data copy step when
      94             :  *    the previous transition value pointer is returned.  It is also possible
      95             :  *    to avoid repeated data copying when the transition value is an expanded
      96             :  *    object: to do that, the transition function must take care to return
      97             :  *    an expanded object that is in a child context of the memory context
      98             :  *    returned by AggCheckCallContext().  Also, some transition functions want
      99             :  *    to store working state in addition to the nominal transition value; they
     100             :  *    can use the memory context returned by AggCheckCallContext() to do that.
     101             :  *
     102             :  *    Note: AggCheckCallContext() is available as of PostgreSQL 9.0.  The
     103             :  *    AggState is available as context in earlier releases (back to 8.1),
     104             :  *    but direct examination of the node is needed to use it before 9.0.
     105             :  *
     106             :  *    As of 9.4, aggregate transition functions can also use AggGetAggref()
     107             :  *    to get hold of the Aggref expression node for their aggregate call.
     108             :  *    This is mainly intended for ordered-set aggregates, which are not
     109             :  *    supported as window functions.  (A regular aggregate function would
     110             :  *    need some fallback logic to use this, since there's no Aggref node
     111             :  *    for a window function.)
     112             :  *
     113             :  *    Grouping sets:
     114             :  *
     115             :  *    A list of grouping sets which is structurally equivalent to a ROLLUP
     116             :  *    clause (e.g. (a,b,c), (a,b), (a)) can be processed in a single pass over
     117             :  *    ordered data.  We do this by keeping a separate set of transition values
     118             :  *    for each grouping set being concurrently processed; for each input tuple
     119             :  *    we update them all, and on group boundaries we reset those states
     120             :  *    (starting at the front of the list) whose grouping values have changed
     121             :  *    (the list of grouping sets is ordered from most specific to least
     122             :  *    specific).
     123             :  *
     124             :  *    Where more complex grouping sets are used, we break them down into
     125             :  *    "phases", where each phase has a different sort order (except phase 0
     126             :  *    which is reserved for hashing).  During each phase but the last, the
     127             :  *    input tuples are additionally stored in a tuplesort which is keyed to the
     128             :  *    next phase's sort order; during each phase but the first, the input
     129             :  *    tuples are drawn from the previously sorted data.  (The sorting of the
     130             :  *    data for the first phase is handled by the planner, as it might be
     131             :  *    satisfied by underlying nodes.)
     132             :  *
     133             :  *    Hashing can be mixed with sorted grouping.  To do this, we have an
     134             :  *    AGG_MIXED strategy that populates the hashtables during the first sorted
     135             :  *    phase, and switches to reading them out after completing all sort phases.
     136             :  *    We can also support AGG_HASHED with multiple hash tables and no sorting
     137             :  *    at all.
     138             :  *
     139             :  *    From the perspective of aggregate transition and final functions, the
     140             :  *    only issue regarding grouping sets is this: a single call site (flinfo)
     141             :  *    of an aggregate function may be used for updating several different
     142             :  *    transition values in turn. So the function must not cache in the flinfo
     143             :  *    anything which logically belongs as part of the transition value (most
     144             :  *    importantly, the memory context in which the transition value exists).
     145             :  *    The support API functions (AggCheckCallContext, AggRegisterCallback) are
     146             :  *    sensitive to the grouping set for which the aggregate function is
     147             :  *    currently being called.
     148             :  *
     149             :  *    Plan structure:
     150             :  *
     151             :  *    What we get from the planner is actually one "real" Agg node which is
     152             :  *    part of the plan tree proper, but which optionally has an additional list
     153             :  *    of Agg nodes hung off the side via the "chain" field.  This is because an
     154             :  *    Agg node happens to be a convenient representation of all the data we
     155             :  *    need for grouping sets.
     156             :  *
     157             :  *    For many purposes, we treat the "real" node as if it were just the first
     158             :  *    node in the chain.  The chain must be ordered such that hashed entries
     159             :  *    come before sorted/plain entries; the real node is marked AGG_MIXED if
     160             :  *    there are both types present (in which case the real node describes one
     161             :  *    of the hashed groupings, other AGG_HASHED nodes may optionally follow in
     162             :  *    the chain, followed in turn by AGG_SORTED or (one) AGG_PLAIN node).  If
     163             :  *    the real node is marked AGG_HASHED or AGG_SORTED, then all the chained
     164             :  *    nodes must be of the same type; if it is AGG_PLAIN, there can be no
     165             :  *    chained nodes.
     166             :  *
     167             :  *    We collect all hashed nodes into a single "phase", numbered 0, and create
     168             :  *    a sorted phase (numbered 1..n) for each AGG_SORTED or AGG_PLAIN node.
     169             :  *    Phase 0 is allocated even if there are no hashes, but remains unused in
     170             :  *    that case.
     171             :  *
     172             :  *    AGG_HASHED nodes actually refer to only a single grouping set each,
     173             :  *    because for each hashed grouping we need a separate grpColIdx and
     174             :  *    numGroups estimate.  AGG_SORTED nodes represent a "rollup", a list of
     175             :  *    grouping sets that share a sort order.  Each AGG_SORTED node other than
     176             :  *    the first one has an associated Sort node which describes the sort order
     177             :  *    to be used; the first sorted node takes its input from the outer subtree,
     178             :  *    which the planner has already arranged to provide ordered data.
     179             :  *
     180             :  *    Memory and ExprContext usage:
     181             :  *
     182             :  *    Because we're accumulating aggregate values across input rows, we need to
     183             :  *    use more memory contexts than just simple input/output tuple contexts.
     184             :  *    In fact, for a rollup, we need a separate context for each grouping set
     185             :  *    so that we can reset the inner (finer-grained) aggregates on their group
     186             :  *    boundaries while continuing to accumulate values for outer
     187             :  *    (coarser-grained) groupings.  On top of this, we might be simultaneously
     188             :  *    populating hashtables; however, we only need one context for all the
     189             :  *    hashtables.
     190             :  *
     191             :  *    So we create an array, aggcontexts, with an ExprContext for each grouping
     192             :  *    set in the largest rollup that we're going to process, and use the
     193             :  *    per-tuple memory context of those ExprContexts to store the aggregate
     194             :  *    transition values.  hashcontext is the single context created to support
     195             :  *    all hash tables.
     196             :  *
     197             :  *    Spilling To Disk
     198             :  *
     199             :  *    When performing hash aggregation, if the hash table memory exceeds the
     200             :  *    limit (see hash_agg_check_limits()), we enter "spill mode". In spill
     201             :  *    mode, we advance the transition states only for groups already in the
     202             :  *    hash table. For tuples that would need to create a new hash table
     203             :  *    entries (and initialize new transition states), we instead spill them to
     204             :  *    disk to be processed later. The tuples are spilled in a partitioned
     205             :  *    manner, so that subsequent batches are smaller and less likely to exceed
     206             :  *    hash_mem (if a batch does exceed hash_mem, it must be spilled
     207             :  *    recursively).
     208             :  *
     209             :  *    Spilled data is written to logical tapes. These provide better control
     210             :  *    over memory usage, disk space, and the number of files than if we were
     211             :  *    to use a BufFile for each spill.  We don't know the number of tapes needed
     212             :  *    at the start of the algorithm (because it can recurse), so a tape set is
     213             :  *    allocated at the beginning, and individual tapes are created as needed.
     214             :  *    As a particular tape is read, logtape.c recycles its disk space. When a
     215             :  *    tape is read to completion, it is destroyed entirely.
     216             :  *
     217             :  *    Tapes' buffers can take up substantial memory when many tapes are open at
     218             :  *    once. We only need one tape open at a time in read mode (using a buffer
     219             :  *    that's a multiple of BLCKSZ); but we need one tape open in write mode (each
     220             :  *    requiring a buffer of size BLCKSZ) for each partition.
     221             :  *
     222             :  *    Note that it's possible for transition states to start small but then
     223             :  *    grow very large; for instance in the case of ARRAY_AGG. In such cases,
     224             :  *    it's still possible to significantly exceed hash_mem. We try to avoid
     225             :  *    this situation by estimating what will fit in the available memory, and
     226             :  *    imposing a limit on the number of groups separately from the amount of
     227             :  *    memory consumed.
     228             :  *
     229             :  *    Transition / Combine function invocation:
     230             :  *
     231             :  *    For performance reasons transition functions, including combine
     232             :  *    functions, aren't invoked one-by-one from nodeAgg.c after computing
     233             :  *    arguments using the expression evaluation engine. Instead
     234             :  *    ExecBuildAggTrans() builds one large expression that does both argument
     235             :  *    evaluation and transition function invocation. That avoids performance
     236             :  *    issues due to repeated uses of expression evaluation, complications due
     237             :  *    to filter expressions having to be evaluated early, and allows to JIT
     238             :  *    the entire expression into one native function.
     239             :  *
     240             :  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
     241             :  * Portions Copyright (c) 1994, Regents of the University of California
     242             :  *
     243             :  * IDENTIFICATION
     244             :  *    src/backend/executor/nodeAgg.c
     245             :  *
     246             :  *-------------------------------------------------------------------------
     247             :  */
     248             : 
     249             : #include "postgres.h"
     250             : 
     251             : #include "access/htup_details.h"
     252             : #include "access/parallel.h"
     253             : #include "catalog/objectaccess.h"
     254             : #include "catalog/pg_aggregate.h"
     255             : #include "catalog/pg_proc.h"
     256             : #include "catalog/pg_type.h"
     257             : #include "common/hashfn.h"
     258             : #include "executor/execExpr.h"
     259             : #include "executor/executor.h"
     260             : #include "executor/nodeAgg.h"
     261             : #include "lib/hyperloglog.h"
     262             : #include "miscadmin.h"
     263             : #include "nodes/nodeFuncs.h"
     264             : #include "optimizer/optimizer.h"
     265             : #include "parser/parse_agg.h"
     266             : #include "parser/parse_coerce.h"
     267             : #include "utils/acl.h"
     268             : #include "utils/builtins.h"
     269             : #include "utils/datum.h"
     270             : #include "utils/expandeddatum.h"
     271             : #include "utils/injection_point.h"
     272             : #include "utils/logtape.h"
     273             : #include "utils/lsyscache.h"
     274             : #include "utils/memutils.h"
     275             : #include "utils/memutils_memorychunk.h"
     276             : #include "utils/syscache.h"
     277             : #include "utils/tuplesort.h"
     278             : 
     279             : /*
     280             :  * Control how many partitions are created when spilling HashAgg to
     281             :  * disk.
     282             :  *
     283             :  * HASHAGG_PARTITION_FACTOR is multiplied by the estimated number of
     284             :  * partitions needed such that each partition will fit in memory. The factor
     285             :  * is set higher than one because there's not a high cost to having a few too
     286             :  * many partitions, and it makes it less likely that a partition will need to
     287             :  * be spilled recursively. Another benefit of having more, smaller partitions
     288             :  * is that small hash tables may perform better than large ones due to memory
     289             :  * caching effects.
     290             :  *
     291             :  * We also specify a min and max number of partitions per spill. Too few might
     292             :  * mean a lot of wasted I/O from repeated spilling of the same tuples. Too
     293             :  * many will result in lots of memory wasted buffering the spill files (which
     294             :  * could instead be spent on a larger hash table).
     295             :  */
     296             : #define HASHAGG_PARTITION_FACTOR 1.50
     297             : #define HASHAGG_MIN_PARTITIONS 4
     298             : #define HASHAGG_MAX_PARTITIONS 1024
     299             : 
     300             : /*
     301             :  * For reading from tapes, the buffer size must be a multiple of
     302             :  * BLCKSZ. Larger values help when reading from multiple tapes concurrently,
     303             :  * but that doesn't happen in HashAgg, so we simply use BLCKSZ. Writing to a
     304             :  * tape always uses a buffer of size BLCKSZ.
     305             :  */
     306             : #define HASHAGG_READ_BUFFER_SIZE BLCKSZ
     307             : #define HASHAGG_WRITE_BUFFER_SIZE BLCKSZ
     308             : 
     309             : /*
     310             :  * HyperLogLog is used for estimating the cardinality of the spilled tuples in
     311             :  * a given partition. 5 bits corresponds to a size of about 32 bytes and a
     312             :  * worst-case error of around 18%. That's effective enough to choose a
     313             :  * reasonable number of partitions when recursing.
     314             :  */
     315             : #define HASHAGG_HLL_BIT_WIDTH 5
     316             : 
     317             : /*
     318             :  * Assume the palloc overhead always uses sizeof(MemoryChunk) bytes.
     319             :  */
     320             : #define CHUNKHDRSZ sizeof(MemoryChunk)
     321             : 
     322             : /*
     323             :  * Represents partitioned spill data for a single hashtable. Contains the
     324             :  * necessary information to route tuples to the correct partition, and to
     325             :  * transform the spilled data into new batches.
     326             :  *
     327             :  * The high bits are used for partition selection (when recursing, we ignore
     328             :  * the bits that have already been used for partition selection at an earlier
     329             :  * level).
     330             :  */
     331             : typedef struct HashAggSpill
     332             : {
     333             :     int         npartitions;    /* number of partitions */
     334             :     LogicalTape **partitions;   /* spill partition tapes */
     335             :     int64      *ntuples;        /* number of tuples in each partition */
     336             :     uint32      mask;           /* mask to find partition from hash value */
     337             :     int         shift;          /* after masking, shift by this amount */
     338             :     hyperLogLogState *hll_card; /* cardinality estimate for contents */
     339             : } HashAggSpill;
     340             : 
     341             : /*
     342             :  * Represents work to be done for one pass of hash aggregation (with only one
     343             :  * grouping set).
     344             :  *
     345             :  * Also tracks the bits of the hash already used for partition selection by
     346             :  * earlier iterations, so that this batch can use new bits. If all bits have
     347             :  * already been used, no partitioning will be done (any spilled data will go
     348             :  * to a single output tape).
     349             :  */
     350             : typedef struct HashAggBatch
     351             : {
     352             :     int         setno;          /* grouping set */
     353             :     int         used_bits;      /* number of bits of hash already used */
     354             :     LogicalTape *input_tape;    /* input partition tape */
     355             :     int64       input_tuples;   /* number of tuples in this batch */
     356             :     double      input_card;     /* estimated group cardinality */
     357             : } HashAggBatch;
     358             : 
     359             : /* used to find referenced colnos */
     360             : typedef struct FindColsContext
     361             : {
     362             :     bool        is_aggref;      /* is under an aggref */
     363             :     Bitmapset  *aggregated;     /* column references under an aggref */
     364             :     Bitmapset  *unaggregated;   /* other column references */
     365             : } FindColsContext;
     366             : 
     367             : static void select_current_set(AggState *aggstate, int setno, bool is_hash);
     368             : static void initialize_phase(AggState *aggstate, int newphase);
     369             : static TupleTableSlot *fetch_input_tuple(AggState *aggstate);
     370             : static void initialize_aggregates(AggState *aggstate,
     371             :                                   AggStatePerGroup *pergroups,
     372             :                                   int numReset);
     373             : static void advance_transition_function(AggState *aggstate,
     374             :                                         AggStatePerTrans pertrans,
     375             :                                         AggStatePerGroup pergroupstate);
     376             : static void advance_aggregates(AggState *aggstate);
     377             : static void process_ordered_aggregate_single(AggState *aggstate,
     378             :                                              AggStatePerTrans pertrans,
     379             :                                              AggStatePerGroup pergroupstate);
     380             : static void process_ordered_aggregate_multi(AggState *aggstate,
     381             :                                             AggStatePerTrans pertrans,
     382             :                                             AggStatePerGroup pergroupstate);
     383             : static void finalize_aggregate(AggState *aggstate,
     384             :                                AggStatePerAgg peragg,
     385             :                                AggStatePerGroup pergroupstate,
     386             :                                Datum *resultVal, bool *resultIsNull);
     387             : static void finalize_partialaggregate(AggState *aggstate,
     388             :                                       AggStatePerAgg peragg,
     389             :                                       AggStatePerGroup pergroupstate,
     390             :                                       Datum *resultVal, bool *resultIsNull);
     391             : static inline void prepare_hash_slot(AggStatePerHash perhash,
     392             :                                      TupleTableSlot *inputslot,
     393             :                                      TupleTableSlot *hashslot);
     394             : static void prepare_projection_slot(AggState *aggstate,
     395             :                                     TupleTableSlot *slot,
     396             :                                     int currentSet);
     397             : static void finalize_aggregates(AggState *aggstate,
     398             :                                 AggStatePerAgg peraggs,
     399             :                                 AggStatePerGroup pergroup);
     400             : static TupleTableSlot *project_aggregates(AggState *aggstate);
     401             : static void find_cols(AggState *aggstate, Bitmapset **aggregated,
     402             :                       Bitmapset **unaggregated);
     403             : static bool find_cols_walker(Node *node, FindColsContext *context);
     404             : static void build_hash_tables(AggState *aggstate);
     405             : static void build_hash_table(AggState *aggstate, int setno, double nbuckets);
     406             : static void hashagg_recompile_expressions(AggState *aggstate, bool minslot,
     407             :                                           bool nullcheck);
     408             : static void hash_create_memory(AggState *aggstate);
     409             : static double hash_choose_num_buckets(double hashentrysize,
     410             :                                       double ngroups, Size memory);
     411             : static int  hash_choose_num_partitions(double input_groups,
     412             :                                        double hashentrysize,
     413             :                                        int used_bits,
     414             :                                        int *log2_npartitions);
     415             : static void initialize_hash_entry(AggState *aggstate,
     416             :                                   TupleHashTable hashtable,
     417             :                                   TupleHashEntry entry);
     418             : static void lookup_hash_entries(AggState *aggstate);
     419             : static TupleTableSlot *agg_retrieve_direct(AggState *aggstate);
     420             : static void agg_fill_hash_table(AggState *aggstate);
     421             : static bool agg_refill_hash_table(AggState *aggstate);
     422             : static TupleTableSlot *agg_retrieve_hash_table(AggState *aggstate);
     423             : static TupleTableSlot *agg_retrieve_hash_table_in_memory(AggState *aggstate);
     424             : static void hash_agg_check_limits(AggState *aggstate);
     425             : static void hash_agg_enter_spill_mode(AggState *aggstate);
     426             : static void hash_agg_update_metrics(AggState *aggstate, bool from_tape,
     427             :                                     int npartitions);
     428             : static void hashagg_finish_initial_spills(AggState *aggstate);
     429             : static void hashagg_reset_spill_state(AggState *aggstate);
     430             : static HashAggBatch *hashagg_batch_new(LogicalTape *input_tape, int setno,
     431             :                                        int64 input_tuples, double input_card,
     432             :                                        int used_bits);
     433             : static MinimalTuple hashagg_batch_read(HashAggBatch *batch, uint32 *hashp);
     434             : static void hashagg_spill_init(HashAggSpill *spill, LogicalTapeSet *tapeset,
     435             :                                int used_bits, double input_groups,
     436             :                                double hashentrysize);
     437             : static Size hashagg_spill_tuple(AggState *aggstate, HashAggSpill *spill,
     438             :                                 TupleTableSlot *inputslot, uint32 hash);
     439             : static void hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill,
     440             :                                  int setno);
     441             : static Datum GetAggInitVal(Datum textInitVal, Oid transtype);
     442             : static void build_pertrans_for_aggref(AggStatePerTrans pertrans,
     443             :                                       AggState *aggstate, EState *estate,
     444             :                                       Aggref *aggref, Oid transfn_oid,
     445             :                                       Oid aggtranstype, Oid aggserialfn,
     446             :                                       Oid aggdeserialfn, Datum initValue,
     447             :                                       bool initValueIsNull, Oid *inputTypes,
     448             :                                       int numArguments);
     449             : 
     450             : 
     451             : /*
     452             :  * Select the current grouping set; affects current_set and
     453             :  * curaggcontext.
     454             :  */
     455             : static void
     456     7922258 : select_current_set(AggState *aggstate, int setno, bool is_hash)
     457             : {
     458             :     /*
     459             :      * When changing this, also adapt ExecAggPlainTransByVal() and
     460             :      * ExecAggPlainTransByRef().
     461             :      */
     462     7922258 :     if (is_hash)
     463     7231990 :         aggstate->curaggcontext = aggstate->hashcontext;
     464             :     else
     465      690268 :         aggstate->curaggcontext = aggstate->aggcontexts[setno];
     466             : 
     467     7922258 :     aggstate->current_set = setno;
     468     7922258 : }
     469             : 
     470             : /*
     471             :  * Switch to phase "newphase", which must either be 0 or 1 (to reset) or
     472             :  * current_phase + 1. Juggle the tuplesorts accordingly.
     473             :  *
     474             :  * Phase 0 is for hashing, which we currently handle last in the AGG_MIXED
     475             :  * case, so when entering phase 0, all we need to do is drop open sorts.
     476             :  */
     477             : static void
     478       95444 : initialize_phase(AggState *aggstate, int newphase)
     479             : {
     480             :     Assert(newphase <= 1 || newphase == aggstate->current_phase + 1);
     481             : 
     482             :     /*
     483             :      * Whatever the previous state, we're now done with whatever input
     484             :      * tuplesort was in use.
     485             :      */
     486       95444 :     if (aggstate->sort_in)
     487             :     {
     488          42 :         tuplesort_end(aggstate->sort_in);
     489          42 :         aggstate->sort_in = NULL;
     490             :     }
     491             : 
     492       95444 :     if (newphase <= 1)
     493             :     {
     494             :         /*
     495             :          * Discard any existing output tuplesort.
     496             :          */
     497       95240 :         if (aggstate->sort_out)
     498             :         {
     499           6 :             tuplesort_end(aggstate->sort_out);
     500           6 :             aggstate->sort_out = NULL;
     501             :         }
     502             :     }
     503             :     else
     504             :     {
     505             :         /*
     506             :          * The old output tuplesort becomes the new input one, and this is the
     507             :          * right time to actually sort it.
     508             :          */
     509         204 :         aggstate->sort_in = aggstate->sort_out;
     510         204 :         aggstate->sort_out = NULL;
     511             :         Assert(aggstate->sort_in);
     512         204 :         tuplesort_performsort(aggstate->sort_in);
     513             :     }
     514             : 
     515             :     /*
     516             :      * If this isn't the last phase, we need to sort appropriately for the
     517             :      * next phase in sequence.
     518             :      */
     519       95444 :     if (newphase > 0 && newphase < aggstate->numphases - 1)
     520             :     {
     521         258 :         Sort       *sortnode = aggstate->phases[newphase + 1].sortnode;
     522         258 :         PlanState  *outerNode = outerPlanState(aggstate);
     523         258 :         TupleDesc   tupDesc = ExecGetResultType(outerNode);
     524             : 
     525         258 :         aggstate->sort_out = tuplesort_begin_heap(tupDesc,
     526             :                                                   sortnode->numCols,
     527             :                                                   sortnode->sortColIdx,
     528             :                                                   sortnode->sortOperators,
     529             :                                                   sortnode->collations,
     530             :                                                   sortnode->nullsFirst,
     531             :                                                   work_mem,
     532             :                                                   NULL, TUPLESORT_NONE);
     533             :     }
     534             : 
     535       95444 :     aggstate->current_phase = newphase;
     536       95444 :     aggstate->phase = &aggstate->phases[newphase];
     537       95444 : }
     538             : 
     539             : /*
     540             :  * Fetch a tuple from either the outer plan (for phase 1) or from the sorter
     541             :  * populated by the previous phase.  Copy it to the sorter for the next phase
     542             :  * if any.
     543             :  *
     544             :  * Callers cannot rely on memory for tuple in returned slot remaining valid
     545             :  * past any subsequently fetched tuple.
     546             :  */
     547             : static TupleTableSlot *
     548    29038916 : fetch_input_tuple(AggState *aggstate)
     549             : {
     550             :     TupleTableSlot *slot;
     551             : 
     552    29038916 :     if (aggstate->sort_in)
     553             :     {
     554             :         /* make sure we check for interrupts in either path through here */
     555      294900 :         CHECK_FOR_INTERRUPTS();
     556      294900 :         if (!tuplesort_gettupleslot(aggstate->sort_in, true, false,
     557             :                                     aggstate->sort_slot, NULL))
     558         204 :             return NULL;
     559      294696 :         slot = aggstate->sort_slot;
     560             :     }
     561             :     else
     562    28744016 :         slot = ExecProcNode(outerPlanState(aggstate));
     563             : 
     564    29038622 :     if (!TupIsNull(slot) && aggstate->sort_out)
     565      294696 :         tuplesort_puttupleslot(aggstate->sort_out, slot);
     566             : 
     567    29038622 :     return slot;
     568             : }
     569             : 
     570             : /*
     571             :  * (Re)Initialize an individual aggregate.
     572             :  *
     573             :  * This function handles only one grouping set, already set in
     574             :  * aggstate->current_set.
     575             :  *
     576             :  * When called, CurrentMemoryContext should be the per-query context.
     577             :  */
     578             : static void
     579     1138700 : initialize_aggregate(AggState *aggstate, AggStatePerTrans pertrans,
     580             :                      AggStatePerGroup pergroupstate)
     581             : {
     582             :     /*
     583             :      * Start a fresh sort operation for each DISTINCT/ORDER BY aggregate.
     584             :      */
     585     1138700 :     if (pertrans->aggsortrequired)
     586             :     {
     587             :         /*
     588             :          * In case of rescan, maybe there could be an uncompleted sort
     589             :          * operation?  Clean it up if so.
     590             :          */
     591       53842 :         if (pertrans->sortstates[aggstate->current_set])
     592           0 :             tuplesort_end(pertrans->sortstates[aggstate->current_set]);
     593             : 
     594             : 
     595             :         /*
     596             :          * We use a plain Datum sorter when there's a single input column;
     597             :          * otherwise sort the full tuple.  (See comments for
     598             :          * process_ordered_aggregate_single.)
     599             :          */
     600       53842 :         if (pertrans->numInputs == 1)
     601             :         {
     602       53758 :             Form_pg_attribute attr = TupleDescAttr(pertrans->sortdesc, 0);
     603             : 
     604       53758 :             pertrans->sortstates[aggstate->current_set] =
     605       53758 :                 tuplesort_begin_datum(attr->atttypid,
     606       53758 :                                       pertrans->sortOperators[0],
     607       53758 :                                       pertrans->sortCollations[0],
     608       53758 :                                       pertrans->sortNullsFirst[0],
     609             :                                       work_mem, NULL, TUPLESORT_NONE);
     610             :         }
     611             :         else
     612          84 :             pertrans->sortstates[aggstate->current_set] =
     613          84 :                 tuplesort_begin_heap(pertrans->sortdesc,
     614             :                                      pertrans->numSortCols,
     615             :                                      pertrans->sortColIdx,
     616             :                                      pertrans->sortOperators,
     617             :                                      pertrans->sortCollations,
     618             :                                      pertrans->sortNullsFirst,
     619             :                                      work_mem, NULL, TUPLESORT_NONE);
     620             :     }
     621             : 
     622             :     /*
     623             :      * (Re)set transValue to the initial value.
     624             :      *
     625             :      * Note that when the initial value is pass-by-ref, we must copy it (into
     626             :      * the aggcontext) since we will pfree the transValue later.
     627             :      */
     628     1138700 :     if (pertrans->initValueIsNull)
     629      597800 :         pergroupstate->transValue = pertrans->initValue;
     630             :     else
     631             :     {
     632             :         MemoryContext oldContext;
     633             : 
     634      540900 :         oldContext = MemoryContextSwitchTo(aggstate->curaggcontext->ecxt_per_tuple_memory);
     635     1081800 :         pergroupstate->transValue = datumCopy(pertrans->initValue,
     636      540900 :                                               pertrans->transtypeByVal,
     637      540900 :                                               pertrans->transtypeLen);
     638      540900 :         MemoryContextSwitchTo(oldContext);
     639             :     }
     640     1138700 :     pergroupstate->transValueIsNull = pertrans->initValueIsNull;
     641             : 
     642             :     /*
     643             :      * If the initial value for the transition state doesn't exist in the
     644             :      * pg_aggregate table then we will let the first non-NULL value returned
     645             :      * from the outer procNode become the initial value. (This is useful for
     646             :      * aggregates like max() and min().) The noTransValue flag signals that we
     647             :      * still need to do this.
     648             :      */
     649     1138700 :     pergroupstate->noTransValue = pertrans->initValueIsNull;
     650     1138700 : }
     651             : 
     652             : /*
     653             :  * Initialize all aggregate transition states for a new group of input values.
     654             :  *
     655             :  * If there are multiple grouping sets, we initialize only the first numReset
     656             :  * of them (the grouping sets are ordered so that the most specific one, which
     657             :  * is reset most often, is first). As a convenience, if numReset is 0, we
     658             :  * reinitialize all sets.
     659             :  *
     660             :  * NB: This cannot be used for hash aggregates, as for those the grouping set
     661             :  * number has to be specified from further up.
     662             :  *
     663             :  * When called, CurrentMemoryContext should be the per-query context.
     664             :  */
     665             : static void
     666      307438 : initialize_aggregates(AggState *aggstate,
     667             :                       AggStatePerGroup *pergroups,
     668             :                       int numReset)
     669             : {
     670             :     int         transno;
     671      307438 :     int         numGroupingSets = Max(aggstate->phase->numsets, 1);
     672      307438 :     int         setno = 0;
     673      307438 :     int         numTrans = aggstate->numtrans;
     674      307438 :     AggStatePerTrans transstates = aggstate->pertrans;
     675             : 
     676      307438 :     if (numReset == 0)
     677           0 :         numReset = numGroupingSets;
     678             : 
     679      629050 :     for (setno = 0; setno < numReset; setno++)
     680             :     {
     681      321612 :         AggStatePerGroup pergroup = pergroups[setno];
     682             : 
     683      321612 :         select_current_set(aggstate, setno, false);
     684             : 
     685      995202 :         for (transno = 0; transno < numTrans; transno++)
     686             :         {
     687      673590 :             AggStatePerTrans pertrans = &transstates[transno];
     688      673590 :             AggStatePerGroup pergroupstate = &pergroup[transno];
     689             : 
     690      673590 :             initialize_aggregate(aggstate, pertrans, pergroupstate);
     691             :         }
     692             :     }
     693      307438 : }
     694             : 
     695             : /*
     696             :  * Given new input value(s), advance the transition function of one aggregate
     697             :  * state within one grouping set only (already set in aggstate->current_set)
     698             :  *
     699             :  * The new values (and null flags) have been preloaded into argument positions
     700             :  * 1 and up in pertrans->transfn_fcinfo, so that we needn't copy them again to
     701             :  * pass to the transition function.  We also expect that the static fields of
     702             :  * the fcinfo are already initialized; that was done by ExecInitAgg().
     703             :  *
     704             :  * It doesn't matter which memory context this is called in.
     705             :  */
     706             : static void
     707      724272 : advance_transition_function(AggState *aggstate,
     708             :                             AggStatePerTrans pertrans,
     709             :                             AggStatePerGroup pergroupstate)
     710             : {
     711      724272 :     FunctionCallInfo fcinfo = pertrans->transfn_fcinfo;
     712             :     MemoryContext oldContext;
     713             :     Datum       newVal;
     714             : 
     715      724272 :     if (pertrans->transfn.fn_strict)
     716             :     {
     717             :         /*
     718             :          * For a strict transfn, nothing happens when there's a NULL input; we
     719             :          * just keep the prior transValue.
     720             :          */
     721      225000 :         int         numTransInputs = pertrans->numTransInputs;
     722             :         int         i;
     723             : 
     724      450000 :         for (i = 1; i <= numTransInputs; i++)
     725             :         {
     726      225000 :             if (fcinfo->args[i].isnull)
     727           0 :                 return;
     728             :         }
     729      225000 :         if (pergroupstate->noTransValue)
     730             :         {
     731             :             /*
     732             :              * transValue has not been initialized. This is the first non-NULL
     733             :              * input value. We use it as the initial value for transValue. (We
     734             :              * already checked that the agg's input type is binary-compatible
     735             :              * with its transtype, so straight copy here is OK.)
     736             :              *
     737             :              * We must copy the datum into aggcontext if it is pass-by-ref. We
     738             :              * do not need to pfree the old transValue, since it's NULL.
     739             :              */
     740           0 :             oldContext = MemoryContextSwitchTo(aggstate->curaggcontext->ecxt_per_tuple_memory);
     741           0 :             pergroupstate->transValue = datumCopy(fcinfo->args[1].value,
     742           0 :                                                   pertrans->transtypeByVal,
     743           0 :                                                   pertrans->transtypeLen);
     744           0 :             pergroupstate->transValueIsNull = false;
     745           0 :             pergroupstate->noTransValue = false;
     746           0 :             MemoryContextSwitchTo(oldContext);
     747           0 :             return;
     748             :         }
     749      225000 :         if (pergroupstate->transValueIsNull)
     750             :         {
     751             :             /*
     752             :              * Don't call a strict function with NULL inputs.  Note it is
     753             :              * possible to get here despite the above tests, if the transfn is
     754             :              * strict *and* returned a NULL on a prior cycle. If that happens
     755             :              * we will propagate the NULL all the way to the end.
     756             :              */
     757           0 :             return;
     758             :         }
     759             :     }
     760             : 
     761             :     /* We run the transition functions in per-input-tuple memory context */
     762      724272 :     oldContext = MemoryContextSwitchTo(aggstate->tmpcontext->ecxt_per_tuple_memory);
     763             : 
     764             :     /* set up aggstate->curpertrans for AggGetAggref() */
     765      724272 :     aggstate->curpertrans = pertrans;
     766             : 
     767             :     /*
     768             :      * OK to call the transition function
     769             :      */
     770      724272 :     fcinfo->args[0].value = pergroupstate->transValue;
     771      724272 :     fcinfo->args[0].isnull = pergroupstate->transValueIsNull;
     772      724272 :     fcinfo->isnull = false;      /* just in case transfn doesn't set it */
     773             : 
     774      724272 :     newVal = FunctionCallInvoke(fcinfo);
     775             : 
     776      724272 :     aggstate->curpertrans = NULL;
     777             : 
     778             :     /*
     779             :      * If pass-by-ref datatype, must copy the new value into aggcontext and
     780             :      * free the prior transValue.  But if transfn returned a pointer to its
     781             :      * first input, we don't need to do anything.
     782             :      *
     783             :      * It's safe to compare newVal with pergroup->transValue without regard
     784             :      * for either being NULL, because ExecAggCopyTransValue takes care to set
     785             :      * transValue to 0 when NULL. Otherwise we could end up accidentally not
     786             :      * reparenting, when the transValue has the same numerical value as
     787             :      * newValue, despite being NULL.  This is a somewhat hot path, making it
     788             :      * undesirable to instead solve this with another branch for the common
     789             :      * case of the transition function returning its (modified) input
     790             :      * argument.
     791             :      */
     792      724272 :     if (!pertrans->transtypeByVal &&
     793           0 :         DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
     794           0 :         newVal = ExecAggCopyTransValue(aggstate, pertrans,
     795           0 :                                        newVal, fcinfo->isnull,
     796             :                                        pergroupstate->transValue,
     797           0 :                                        pergroupstate->transValueIsNull);
     798             : 
     799      724272 :     pergroupstate->transValue = newVal;
     800      724272 :     pergroupstate->transValueIsNull = fcinfo->isnull;
     801             : 
     802      724272 :     MemoryContextSwitchTo(oldContext);
     803             : }
     804             : 
     805             : /*
     806             :  * Advance each aggregate transition state for one input tuple.  The input
     807             :  * tuple has been stored in tmpcontext->ecxt_outertuple, so that it is
     808             :  * accessible to ExecEvalExpr.
     809             :  *
     810             :  * We have two sets of transition states to handle: one for sorted aggregation
     811             :  * and one for hashed; we do them both here, to avoid multiple evaluation of
     812             :  * the inputs.
     813             :  *
     814             :  * When called, CurrentMemoryContext should be the per-query context.
     815             :  */
     816             : static void
     817    29713228 : advance_aggregates(AggState *aggstate)
     818             : {
     819    29713228 :     ExecEvalExprNoReturnSwitchContext(aggstate->phase->evaltrans,
     820             :                                       aggstate->tmpcontext);
     821    29713150 : }
     822             : 
     823             : /*
     824             :  * Run the transition function for a DISTINCT or ORDER BY aggregate
     825             :  * with only one input.  This is called after we have completed
     826             :  * entering all the input values into the sort object.  We complete the
     827             :  * sort, read out the values in sorted order, and run the transition
     828             :  * function on each value (applying DISTINCT if appropriate).
     829             :  *
     830             :  * Note that the strictness of the transition function was checked when
     831             :  * entering the values into the sort, so we don't check it again here;
     832             :  * we just apply standard SQL DISTINCT logic.
     833             :  *
     834             :  * The one-input case is handled separately from the multi-input case
     835             :  * for performance reasons: for single by-value inputs, such as the
     836             :  * common case of count(distinct id), the tuplesort_getdatum code path
     837             :  * is around 300% faster.  (The speedup for by-reference types is less
     838             :  * but still noticeable.)
     839             :  *
     840             :  * This function handles only one grouping set (already set in
     841             :  * aggstate->current_set).
     842             :  *
     843             :  * When called, CurrentMemoryContext should be the per-query context.
     844             :  */
     845             : static void
     846       53758 : process_ordered_aggregate_single(AggState *aggstate,
     847             :                                  AggStatePerTrans pertrans,
     848             :                                  AggStatePerGroup pergroupstate)
     849             : {
     850       53758 :     Datum       oldVal = (Datum) 0;
     851       53758 :     bool        oldIsNull = true;
     852       53758 :     bool        haveOldVal = false;
     853       53758 :     MemoryContext workcontext = aggstate->tmpcontext->ecxt_per_tuple_memory;
     854             :     MemoryContext oldContext;
     855       53758 :     bool        isDistinct = (pertrans->numDistinctCols > 0);
     856       53758 :     Datum       newAbbrevVal = (Datum) 0;
     857       53758 :     Datum       oldAbbrevVal = (Datum) 0;
     858       53758 :     FunctionCallInfo fcinfo = pertrans->transfn_fcinfo;
     859             :     Datum      *newVal;
     860             :     bool       *isNull;
     861             : 
     862             :     Assert(pertrans->numDistinctCols < 2);
     863             : 
     864       53758 :     tuplesort_performsort(pertrans->sortstates[aggstate->current_set]);
     865             : 
     866             :     /* Load the column into argument 1 (arg 0 will be transition value) */
     867       53758 :     newVal = &fcinfo->args[1].value;
     868       53758 :     isNull = &fcinfo->args[1].isnull;
     869             : 
     870             :     /*
     871             :      * Note: if input type is pass-by-ref, the datums returned by the sort are
     872             :      * freshly palloc'd in the per-query context, so we must be careful to
     873             :      * pfree them when they are no longer needed.
     874             :      */
     875             : 
     876      898276 :     while (tuplesort_getdatum(pertrans->sortstates[aggstate->current_set],
     877             :                               true, false, newVal, isNull, &newAbbrevVal))
     878             :     {
     879             :         /*
     880             :          * Clear and select the working context for evaluation of the equality
     881             :          * function and transition function.
     882             :          */
     883      844518 :         MemoryContextReset(workcontext);
     884      844518 :         oldContext = MemoryContextSwitchTo(workcontext);
     885             : 
     886             :         /*
     887             :          * If DISTINCT mode, and not distinct from prior, skip it.
     888             :          */
     889      844518 :         if (isDistinct &&
     890      310454 :             haveOldVal &&
     891           0 :             ((oldIsNull && *isNull) ||
     892      310454 :              (!oldIsNull && !*isNull &&
     893      605948 :               oldAbbrevVal == newAbbrevVal &&
     894      295494 :               DatumGetBool(FunctionCall2Coll(&pertrans->equalfnOne,
     895             :                                              pertrans->aggCollation,
     896             :                                              oldVal, *newVal)))))
     897             :         {
     898      120462 :             MemoryContextSwitchTo(oldContext);
     899      120462 :             continue;
     900             :         }
     901             :         else
     902             :         {
     903      724056 :             advance_transition_function(aggstate, pertrans, pergroupstate);
     904             : 
     905      724056 :             MemoryContextSwitchTo(oldContext);
     906             : 
     907             :             /*
     908             :              * Forget the old value, if any, and remember the new one for
     909             :              * subsequent equality checks.
     910             :              */
     911      724056 :             if (!pertrans->inputtypeByVal)
     912             :             {
     913      525288 :                 if (!oldIsNull)
     914      525108 :                     pfree(DatumGetPointer(oldVal));
     915      525288 :                 if (!*isNull)
     916      525228 :                     oldVal = datumCopy(*newVal, pertrans->inputtypeByVal,
     917      525228 :                                        pertrans->inputtypeLen);
     918             :             }
     919             :             else
     920      198768 :                 oldVal = *newVal;
     921      724056 :             oldAbbrevVal = newAbbrevVal;
     922      724056 :             oldIsNull = *isNull;
     923      724056 :             haveOldVal = true;
     924             :         }
     925             :     }
     926             : 
     927       53758 :     if (!oldIsNull && !pertrans->inputtypeByVal)
     928         120 :         pfree(DatumGetPointer(oldVal));
     929             : 
     930       53758 :     tuplesort_end(pertrans->sortstates[aggstate->current_set]);
     931       53758 :     pertrans->sortstates[aggstate->current_set] = NULL;
     932       53758 : }
     933             : 
     934             : /*
     935             :  * Run the transition function for a DISTINCT or ORDER BY aggregate
     936             :  * with more than one input.  This is called after we have completed
     937             :  * entering all the input values into the sort object.  We complete the
     938             :  * sort, read out the values in sorted order, and run the transition
     939             :  * function on each value (applying DISTINCT if appropriate).
     940             :  *
     941             :  * This function handles only one grouping set (already set in
     942             :  * aggstate->current_set).
     943             :  *
     944             :  * When called, CurrentMemoryContext should be the per-query context.
     945             :  */
     946             : static void
     947          84 : process_ordered_aggregate_multi(AggState *aggstate,
     948             :                                 AggStatePerTrans pertrans,
     949             :                                 AggStatePerGroup pergroupstate)
     950             : {
     951          84 :     ExprContext *tmpcontext = aggstate->tmpcontext;
     952          84 :     FunctionCallInfo fcinfo = pertrans->transfn_fcinfo;
     953          84 :     TupleTableSlot *slot1 = pertrans->sortslot;
     954          84 :     TupleTableSlot *slot2 = pertrans->uniqslot;
     955          84 :     int         numTransInputs = pertrans->numTransInputs;
     956          84 :     int         numDistinctCols = pertrans->numDistinctCols;
     957          84 :     Datum       newAbbrevVal = (Datum) 0;
     958          84 :     Datum       oldAbbrevVal = (Datum) 0;
     959          84 :     bool        haveOldValue = false;
     960          84 :     TupleTableSlot *save = aggstate->tmpcontext->ecxt_outertuple;
     961             :     int         i;
     962             : 
     963          84 :     tuplesort_performsort(pertrans->sortstates[aggstate->current_set]);
     964             : 
     965          84 :     ExecClearTuple(slot1);
     966          84 :     if (slot2)
     967           0 :         ExecClearTuple(slot2);
     968             : 
     969         300 :     while (tuplesort_gettupleslot(pertrans->sortstates[aggstate->current_set],
     970             :                                   true, true, slot1, &newAbbrevVal))
     971             :     {
     972         216 :         CHECK_FOR_INTERRUPTS();
     973             : 
     974         216 :         tmpcontext->ecxt_outertuple = slot1;
     975         216 :         tmpcontext->ecxt_innertuple = slot2;
     976             : 
     977         216 :         if (numDistinctCols == 0 ||
     978           0 :             !haveOldValue ||
     979           0 :             newAbbrevVal != oldAbbrevVal ||
     980           0 :             !ExecQual(pertrans->equalfnMulti, tmpcontext))
     981             :         {
     982             :             /*
     983             :              * Extract the first numTransInputs columns as datums to pass to
     984             :              * the transfn.
     985             :              */
     986         216 :             slot_getsomeattrs(slot1, numTransInputs);
     987             : 
     988             :             /* Load values into fcinfo */
     989             :             /* Start from 1, since the 0th arg will be the transition value */
     990         612 :             for (i = 0; i < numTransInputs; i++)
     991             :             {
     992         396 :                 fcinfo->args[i + 1].value = slot1->tts_values[i];
     993         396 :                 fcinfo->args[i + 1].isnull = slot1->tts_isnull[i];
     994             :             }
     995             : 
     996         216 :             advance_transition_function(aggstate, pertrans, pergroupstate);
     997             : 
     998         216 :             if (numDistinctCols > 0)
     999             :             {
    1000             :                 /* swap the slot pointers to retain the current tuple */
    1001           0 :                 TupleTableSlot *tmpslot = slot2;
    1002             : 
    1003           0 :                 slot2 = slot1;
    1004           0 :                 slot1 = tmpslot;
    1005             :                 /* avoid ExecQual() calls by reusing abbreviated keys */
    1006           0 :                 oldAbbrevVal = newAbbrevVal;
    1007           0 :                 haveOldValue = true;
    1008             :             }
    1009             :         }
    1010             : 
    1011             :         /* Reset context each time */
    1012         216 :         ResetExprContext(tmpcontext);
    1013             : 
    1014         216 :         ExecClearTuple(slot1);
    1015             :     }
    1016             : 
    1017          84 :     if (slot2)
    1018           0 :         ExecClearTuple(slot2);
    1019             : 
    1020          84 :     tuplesort_end(pertrans->sortstates[aggstate->current_set]);
    1021          84 :     pertrans->sortstates[aggstate->current_set] = NULL;
    1022             : 
    1023             :     /* restore previous slot, potentially in use for grouping sets */
    1024          84 :     tmpcontext->ecxt_outertuple = save;
    1025          84 : }
    1026             : 
    1027             : /*
    1028             :  * Compute the final value of one aggregate.
    1029             :  *
    1030             :  * This function handles only one grouping set (already set in
    1031             :  * aggstate->current_set).
    1032             :  *
    1033             :  * The finalfn will be run, and the result delivered, in the
    1034             :  * output-tuple context; caller's CurrentMemoryContext does not matter.
    1035             :  * (But note that in some cases, such as when there is no finalfn, the
    1036             :  * result might be a pointer to or into the agg's transition value.)
    1037             :  *
    1038             :  * The finalfn uses the state as set in the transno.  This also might be
    1039             :  * being used by another aggregate function, so it's important that we do
    1040             :  * nothing destructive here.  Moreover, the aggregate's final value might
    1041             :  * get used in multiple places, so we mustn't return a R/W expanded datum.
    1042             :  */
    1043             : static void
    1044     1124924 : finalize_aggregate(AggState *aggstate,
    1045             :                    AggStatePerAgg peragg,
    1046             :                    AggStatePerGroup pergroupstate,
    1047             :                    Datum *resultVal, bool *resultIsNull)
    1048             : {
    1049     1124924 :     LOCAL_FCINFO(fcinfo, FUNC_MAX_ARGS);
    1050     1124924 :     bool        anynull = false;
    1051             :     MemoryContext oldContext;
    1052             :     int         i;
    1053             :     ListCell   *lc;
    1054     1124924 :     AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
    1055             : 
    1056     1124924 :     oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
    1057             : 
    1058             :     /*
    1059             :      * Evaluate any direct arguments.  We do this even if there's no finalfn
    1060             :      * (which is unlikely anyway), so that side-effects happen as expected.
    1061             :      * The direct arguments go into arg positions 1 and up, leaving position 0
    1062             :      * for the transition state value.
    1063             :      */
    1064     1124924 :     i = 1;
    1065     1125898 :     foreach(lc, peragg->aggdirectargs)
    1066             :     {
    1067         974 :         ExprState  *expr = (ExprState *) lfirst(lc);
    1068             : 
    1069         974 :         fcinfo->args[i].value = ExecEvalExpr(expr,
    1070             :                                              aggstate->ss.ps.ps_ExprContext,
    1071             :                                              &fcinfo->args[i].isnull);
    1072         974 :         anynull |= fcinfo->args[i].isnull;
    1073         974 :         i++;
    1074             :     }
    1075             : 
    1076             :     /*
    1077             :      * Apply the agg's finalfn if one is provided, else return transValue.
    1078             :      */
    1079     1124924 :     if (OidIsValid(peragg->finalfn_oid))
    1080             :     {
    1081      339344 :         int         numFinalArgs = peragg->numFinalArgs;
    1082             : 
    1083             :         /* set up aggstate->curperagg for AggGetAggref() */
    1084      339344 :         aggstate->curperagg = peragg;
    1085             : 
    1086      339344 :         InitFunctionCallInfoData(*fcinfo, &peragg->finalfn,
    1087             :                                  numFinalArgs,
    1088             :                                  pertrans->aggCollation,
    1089             :                                  (Node *) aggstate, NULL);
    1090             : 
    1091             :         /* Fill in the transition state value */
    1092      339344 :         fcinfo->args[0].value =
    1093      339344 :             MakeExpandedObjectReadOnly(pergroupstate->transValue,
    1094             :                                        pergroupstate->transValueIsNull,
    1095             :                                        pertrans->transtypeLen);
    1096      339344 :         fcinfo->args[0].isnull = pergroupstate->transValueIsNull;
    1097      339344 :         anynull |= pergroupstate->transValueIsNull;
    1098             : 
    1099             :         /* Fill any remaining argument positions with nulls */
    1100      492432 :         for (; i < numFinalArgs; i++)
    1101             :         {
    1102      153088 :             fcinfo->args[i].value = (Datum) 0;
    1103      153088 :             fcinfo->args[i].isnull = true;
    1104      153088 :             anynull = true;
    1105             :         }
    1106             : 
    1107      339344 :         if (fcinfo->flinfo->fn_strict && anynull)
    1108             :         {
    1109             :             /* don't call a strict function with NULL inputs */
    1110           0 :             *resultVal = (Datum) 0;
    1111           0 :             *resultIsNull = true;
    1112             :         }
    1113             :         else
    1114             :         {
    1115             :             Datum       result;
    1116             : 
    1117      339344 :             result = FunctionCallInvoke(fcinfo);
    1118      339332 :             *resultIsNull = fcinfo->isnull;
    1119      339332 :             *resultVal = MakeExpandedObjectReadOnly(result,
    1120             :                                                     fcinfo->isnull,
    1121             :                                                     peragg->resulttypeLen);
    1122             :         }
    1123      339332 :         aggstate->curperagg = NULL;
    1124             :     }
    1125             :     else
    1126             :     {
    1127      785580 :         *resultVal =
    1128      785580 :             MakeExpandedObjectReadOnly(pergroupstate->transValue,
    1129             :                                        pergroupstate->transValueIsNull,
    1130             :                                        pertrans->transtypeLen);
    1131      785580 :         *resultIsNull = pergroupstate->transValueIsNull;
    1132             :     }
    1133             : 
    1134     1124912 :     MemoryContextSwitchTo(oldContext);
    1135     1124912 : }
    1136             : 
    1137             : /*
    1138             :  * Compute the output value of one partial aggregate.
    1139             :  *
    1140             :  * The serialization function will be run, and the result delivered, in the
    1141             :  * output-tuple context; caller's CurrentMemoryContext does not matter.
    1142             :  */
    1143             : static void
    1144       17388 : finalize_partialaggregate(AggState *aggstate,
    1145             :                           AggStatePerAgg peragg,
    1146             :                           AggStatePerGroup pergroupstate,
    1147             :                           Datum *resultVal, bool *resultIsNull)
    1148             : {
    1149       17388 :     AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
    1150             :     MemoryContext oldContext;
    1151             : 
    1152       17388 :     oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
    1153             : 
    1154             :     /*
    1155             :      * serialfn_oid will be set if we must serialize the transvalue before
    1156             :      * returning it
    1157             :      */
    1158       17388 :     if (OidIsValid(pertrans->serialfn_oid))
    1159             :     {
    1160             :         /* Don't call a strict serialization function with NULL input. */
    1161         666 :         if (pertrans->serialfn.fn_strict && pergroupstate->transValueIsNull)
    1162             :         {
    1163          84 :             *resultVal = (Datum) 0;
    1164          84 :             *resultIsNull = true;
    1165             :         }
    1166             :         else
    1167             :         {
    1168         582 :             FunctionCallInfo fcinfo = pertrans->serialfn_fcinfo;
    1169             :             Datum       result;
    1170             : 
    1171         582 :             fcinfo->args[0].value =
    1172         582 :                 MakeExpandedObjectReadOnly(pergroupstate->transValue,
    1173             :                                            pergroupstate->transValueIsNull,
    1174             :                                            pertrans->transtypeLen);
    1175         582 :             fcinfo->args[0].isnull = pergroupstate->transValueIsNull;
    1176         582 :             fcinfo->isnull = false;
    1177             : 
    1178         582 :             result = FunctionCallInvoke(fcinfo);
    1179         582 :             *resultIsNull = fcinfo->isnull;
    1180         582 :             *resultVal = MakeExpandedObjectReadOnly(result,
    1181             :                                                     fcinfo->isnull,
    1182             :                                                     peragg->resulttypeLen);
    1183             :         }
    1184             :     }
    1185             :     else
    1186             :     {
    1187       16722 :         *resultVal =
    1188       16722 :             MakeExpandedObjectReadOnly(pergroupstate->transValue,
    1189             :                                        pergroupstate->transValueIsNull,
    1190             :                                        pertrans->transtypeLen);
    1191       16722 :         *resultIsNull = pergroupstate->transValueIsNull;
    1192             :     }
    1193             : 
    1194       17388 :     MemoryContextSwitchTo(oldContext);
    1195       17388 : }
    1196             : 
    1197             : /*
    1198             :  * Extract the attributes that make up the grouping key into the
    1199             :  * hashslot. This is necessary to compute the hash or perform a lookup.
    1200             :  */
    1201             : static inline void
    1202     8269312 : prepare_hash_slot(AggStatePerHash perhash,
    1203             :                   TupleTableSlot *inputslot,
    1204             :                   TupleTableSlot *hashslot)
    1205             : {
    1206             :     int         i;
    1207             : 
    1208             :     /* transfer just the needed columns into hashslot */
    1209     8269312 :     slot_getsomeattrs(inputslot, perhash->largestGrpColIdx);
    1210     8269312 :     ExecClearTuple(hashslot);
    1211             : 
    1212    20488720 :     for (i = 0; i < perhash->numhashGrpCols; i++)
    1213             :     {
    1214    12219408 :         int         varNumber = perhash->hashGrpColIdxInput[i] - 1;
    1215             : 
    1216    12219408 :         hashslot->tts_values[i] = inputslot->tts_values[varNumber];
    1217    12219408 :         hashslot->tts_isnull[i] = inputslot->tts_isnull[varNumber];
    1218             :     }
    1219     8269312 :     ExecStoreVirtualTuple(hashslot);
    1220     8269312 : }
    1221             : 
    1222             : /*
    1223             :  * Prepare to finalize and project based on the specified representative tuple
    1224             :  * slot and grouping set.
    1225             :  *
    1226             :  * In the specified tuple slot, force to null all attributes that should be
    1227             :  * read as null in the context of the current grouping set.  Also stash the
    1228             :  * current group bitmap where GroupingExpr can get at it.
    1229             :  *
    1230             :  * This relies on three conditions:
    1231             :  *
    1232             :  * 1) Nothing is ever going to try and extract the whole tuple from this slot,
    1233             :  * only reference it in evaluations, which will only access individual
    1234             :  * attributes.
    1235             :  *
    1236             :  * 2) No system columns are going to need to be nulled. (If a system column is
    1237             :  * referenced in a group clause, it is actually projected in the outer plan
    1238             :  * tlist.)
    1239             :  *
    1240             :  * 3) Within a given phase, we never need to recover the value of an attribute
    1241             :  * once it has been set to null.
    1242             :  *
    1243             :  * Poking into the slot this way is a bit ugly, but the consensus is that the
    1244             :  * alternative was worse.
    1245             :  */
    1246             : static void
    1247      853034 : prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet)
    1248             : {
    1249      853034 :     if (aggstate->phase->grouped_cols)
    1250             :     {
    1251      561002 :         Bitmapset  *grouped_cols = aggstate->phase->grouped_cols[currentSet];
    1252             : 
    1253      561002 :         aggstate->grouped_cols = grouped_cols;
    1254             : 
    1255      561002 :         if (TTS_EMPTY(slot))
    1256             :         {
    1257             :             /*
    1258             :              * Force all values to be NULL if working on an empty input tuple
    1259             :              * (i.e. an empty grouping set for which no input rows were
    1260             :              * supplied).
    1261             :              */
    1262          60 :             ExecStoreAllNullTuple(slot);
    1263             :         }
    1264      560942 :         else if (aggstate->all_grouped_cols)
    1265             :         {
    1266             :             ListCell   *lc;
    1267             : 
    1268             :             /* all_grouped_cols is arranged in desc order */
    1269      560894 :             slot_getsomeattrs(slot, linitial_int(aggstate->all_grouped_cols));
    1270             : 
    1271     1530204 :             foreach(lc, aggstate->all_grouped_cols)
    1272             :             {
    1273      969310 :                 int         attnum = lfirst_int(lc);
    1274             : 
    1275      969310 :                 if (!bms_is_member(attnum, grouped_cols))
    1276       57832 :                     slot->tts_isnull[attnum - 1] = true;
    1277             :             }
    1278             :         }
    1279             :     }
    1280      853034 : }
    1281             : 
    1282             : /*
    1283             :  * Compute the final value of all aggregates for one group.
    1284             :  *
    1285             :  * This function handles only one grouping set at a time, which the caller must
    1286             :  * have selected.  It's also the caller's responsibility to adjust the supplied
    1287             :  * pergroup parameter to point to the current set's transvalues.
    1288             :  *
    1289             :  * Results are stored in the output econtext aggvalues/aggnulls.
    1290             :  */
    1291             : static void
    1292      853034 : finalize_aggregates(AggState *aggstate,
    1293             :                     AggStatePerAgg peraggs,
    1294             :                     AggStatePerGroup pergroup)
    1295             : {
    1296      853034 :     ExprContext *econtext = aggstate->ss.ps.ps_ExprContext;
    1297      853034 :     Datum      *aggvalues = econtext->ecxt_aggvalues;
    1298      853034 :     bool       *aggnulls = econtext->ecxt_aggnulls;
    1299             :     int         aggno;
    1300             : 
    1301             :     /*
    1302             :      * If there were any DISTINCT and/or ORDER BY aggregates, sort their
    1303             :      * inputs and run the transition functions.
    1304             :      */
    1305     1995064 :     for (int transno = 0; transno < aggstate->numtrans; transno++)
    1306             :     {
    1307     1142030 :         AggStatePerTrans pertrans = &aggstate->pertrans[transno];
    1308             :         AggStatePerGroup pergroupstate;
    1309             : 
    1310     1142030 :         pergroupstate = &pergroup[transno];
    1311             : 
    1312     1142030 :         if (pertrans->aggsortrequired)
    1313             :         {
    1314             :             Assert(aggstate->aggstrategy != AGG_HASHED &&
    1315             :                    aggstate->aggstrategy != AGG_MIXED);
    1316             : 
    1317       53842 :             if (pertrans->numInputs == 1)
    1318       53758 :                 process_ordered_aggregate_single(aggstate,
    1319             :                                                  pertrans,
    1320             :                                                  pergroupstate);
    1321             :             else
    1322          84 :                 process_ordered_aggregate_multi(aggstate,
    1323             :                                                 pertrans,
    1324             :                                                 pergroupstate);
    1325             :         }
    1326     1088188 :         else if (pertrans->numDistinctCols > 0 && pertrans->haslast)
    1327             :         {
    1328       18358 :             pertrans->haslast = false;
    1329             : 
    1330       18358 :             if (pertrans->numDistinctCols == 1)
    1331             :             {
    1332       18262 :                 if (!pertrans->inputtypeByVal && !pertrans->lastisnull)
    1333         262 :                     pfree(DatumGetPointer(pertrans->lastdatum));
    1334             : 
    1335       18262 :                 pertrans->lastisnull = false;
    1336       18262 :                 pertrans->lastdatum = (Datum) 0;
    1337             :             }
    1338             :             else
    1339          96 :                 ExecClearTuple(pertrans->uniqslot);
    1340             :         }
    1341             :     }
    1342             : 
    1343             :     /*
    1344             :      * Run the final functions.
    1345             :      */
    1346     1995334 :     for (aggno = 0; aggno < aggstate->numaggs; aggno++)
    1347             :     {
    1348     1142312 :         AggStatePerAgg peragg = &peraggs[aggno];
    1349     1142312 :         int         transno = peragg->transno;
    1350             :         AggStatePerGroup pergroupstate;
    1351             : 
    1352     1142312 :         pergroupstate = &pergroup[transno];
    1353             : 
    1354     1142312 :         if (DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit))
    1355       17388 :             finalize_partialaggregate(aggstate, peragg, pergroupstate,
    1356       17388 :                                       &aggvalues[aggno], &aggnulls[aggno]);
    1357             :         else
    1358     1124924 :             finalize_aggregate(aggstate, peragg, pergroupstate,
    1359     1124924 :                                &aggvalues[aggno], &aggnulls[aggno]);
    1360             :     }
    1361      853022 : }
    1362             : 
    1363             : /*
    1364             :  * Project the result of a group (whose aggs have already been calculated by
    1365             :  * finalize_aggregates). Returns the result slot, or NULL if no row is
    1366             :  * projected (suppressed by qual).
    1367             :  */
    1368             : static TupleTableSlot *
    1369      853022 : project_aggregates(AggState *aggstate)
    1370             : {
    1371      853022 :     ExprContext *econtext = aggstate->ss.ps.ps_ExprContext;
    1372             : 
    1373             :     /*
    1374             :      * Check the qual (HAVING clause); if the group does not match, ignore it.
    1375             :      */
    1376      853022 :     if (ExecQual(aggstate->ss.ps.qual, econtext))
    1377             :     {
    1378             :         /*
    1379             :          * Form and return projection tuple using the aggregate results and
    1380             :          * the representative input tuple.
    1381             :          */
    1382      746546 :         return ExecProject(aggstate->ss.ps.ps_ProjInfo);
    1383             :     }
    1384             :     else
    1385      106476 :         InstrCountFiltered1(aggstate, 1);
    1386             : 
    1387      106476 :     return NULL;
    1388             : }
    1389             : 
    1390             : /*
    1391             :  * Find input-tuple columns that are needed, dividing them into
    1392             :  * aggregated and unaggregated sets.
    1393             :  */
    1394             : static void
    1395        7038 : find_cols(AggState *aggstate, Bitmapset **aggregated, Bitmapset **unaggregated)
    1396             : {
    1397        7038 :     Agg        *agg = (Agg *) aggstate->ss.ps.plan;
    1398             :     FindColsContext context;
    1399             : 
    1400        7038 :     context.is_aggref = false;
    1401        7038 :     context.aggregated = NULL;
    1402        7038 :     context.unaggregated = NULL;
    1403             : 
    1404             :     /* Examine tlist and quals */
    1405        7038 :     (void) find_cols_walker((Node *) agg->plan.targetlist, &context);
    1406        7038 :     (void) find_cols_walker((Node *) agg->plan.qual, &context);
    1407             : 
    1408             :     /* In some cases, grouping columns will not appear in the tlist */
    1409       17924 :     for (int i = 0; i < agg->numCols; i++)
    1410       10886 :         context.unaggregated = bms_add_member(context.unaggregated,
    1411       10886 :                                               agg->grpColIdx[i]);
    1412             : 
    1413        7038 :     *aggregated = context.aggregated;
    1414        7038 :     *unaggregated = context.unaggregated;
    1415        7038 : }
    1416             : 
    1417             : static bool
    1418       83970 : find_cols_walker(Node *node, FindColsContext *context)
    1419             : {
    1420       83970 :     if (node == NULL)
    1421       15114 :         return false;
    1422       68856 :     if (IsA(node, Var))
    1423             :     {
    1424       18976 :         Var        *var = (Var *) node;
    1425             : 
    1426             :         /* setrefs.c should have set the varno to OUTER_VAR */
    1427             :         Assert(var->varno == OUTER_VAR);
    1428             :         Assert(var->varlevelsup == 0);
    1429       18976 :         if (context->is_aggref)
    1430        6094 :             context->aggregated = bms_add_member(context->aggregated,
    1431        6094 :                                                  var->varattno);
    1432             :         else
    1433       12882 :             context->unaggregated = bms_add_member(context->unaggregated,
    1434       12882 :                                                    var->varattno);
    1435       18976 :         return false;
    1436             :     }
    1437       49880 :     if (IsA(node, Aggref))
    1438             :     {
    1439             :         Assert(!context->is_aggref);
    1440        8576 :         context->is_aggref = true;
    1441        8576 :         expression_tree_walker(node, find_cols_walker, context);
    1442        8576 :         context->is_aggref = false;
    1443        8576 :         return false;
    1444             :     }
    1445       41304 :     return expression_tree_walker(node, find_cols_walker, context);
    1446             : }
    1447             : 
    1448             : /*
    1449             :  * (Re-)initialize the hash table(s) to empty.
    1450             :  *
    1451             :  * To implement hashed aggregation, we need a hashtable that stores a
    1452             :  * representative tuple and an array of AggStatePerGroup structs for each
    1453             :  * distinct set of GROUP BY column values.  We compute the hash key from the
    1454             :  * GROUP BY columns.  The per-group data is allocated in initialize_hash_entry(),
    1455             :  * for each entry.
    1456             :  *
    1457             :  * We have a separate hashtable and associated perhash data structure for each
    1458             :  * grouping set for which we're doing hashing.
    1459             :  *
    1460             :  * The contents of the hash tables live in the aggstate's hash_tuplescxt
    1461             :  * memory context (there is only one of these for all tables together, since
    1462             :  * they are all reset at the same time).
    1463             :  */
    1464             : static void
    1465       17838 : build_hash_tables(AggState *aggstate)
    1466             : {
    1467             :     int         setno;
    1468             : 
    1469       36020 :     for (setno = 0; setno < aggstate->num_hashes; ++setno)
    1470             :     {
    1471       18182 :         AggStatePerHash perhash = &aggstate->perhash[setno];
    1472             :         double      nbuckets;
    1473             :         Size        memory;
    1474             : 
    1475       18182 :         if (perhash->hashtable != NULL)
    1476             :         {
    1477       12718 :             ResetTupleHashTable(perhash->hashtable);
    1478       12718 :             continue;
    1479             :         }
    1480             : 
    1481        5464 :         memory = aggstate->hash_mem_limit / aggstate->num_hashes;
    1482             : 
    1483             :         /* choose reasonable number of buckets per hashtable */
    1484        5464 :         nbuckets = hash_choose_num_buckets(aggstate->hashentrysize,
    1485        5464 :                                            perhash->aggnode->numGroups,
    1486             :                                            memory);
    1487             : 
    1488             : #ifdef USE_INJECTION_POINTS
    1489        5464 :         if (IS_INJECTION_POINT_ATTACHED("hash-aggregate-oversize-table"))
    1490             :         {
    1491           0 :             nbuckets = memory / TupleHashEntrySize();
    1492           0 :             INJECTION_POINT_CACHED("hash-aggregate-oversize-table", NULL);
    1493             :         }
    1494             : #endif
    1495             : 
    1496        5464 :         build_hash_table(aggstate, setno, nbuckets);
    1497             :     }
    1498             : 
    1499       17838 :     aggstate->hash_ngroups_current = 0;
    1500       17838 : }
    1501             : 
    1502             : /*
    1503             :  * Build a single hashtable for this grouping set.
    1504             :  */
    1505             : static void
    1506        5464 : build_hash_table(AggState *aggstate, int setno, double nbuckets)
    1507             : {
    1508        5464 :     AggStatePerHash perhash = &aggstate->perhash[setno];
    1509        5464 :     MemoryContext metacxt = aggstate->hash_metacxt;
    1510        5464 :     MemoryContext tuplescxt = aggstate->hash_tuplescxt;
    1511        5464 :     MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory;
    1512             :     Size        additionalsize;
    1513             : 
    1514             :     Assert(aggstate->aggstrategy == AGG_HASHED ||
    1515             :            aggstate->aggstrategy == AGG_MIXED);
    1516             : 
    1517             :     /*
    1518             :      * Used to make sure initial hash table allocation does not exceed
    1519             :      * hash_mem. Note that the estimate does not include space for
    1520             :      * pass-by-reference transition data values, nor for the representative
    1521             :      * tuple of each group.
    1522             :      */
    1523        5464 :     additionalsize = aggstate->numtrans * sizeof(AggStatePerGroupData);
    1524             : 
    1525       10928 :     perhash->hashtable = BuildTupleHashTable(&aggstate->ss.ps,
    1526        5464 :                                              perhash->hashslot->tts_tupleDescriptor,
    1527        5464 :                                              perhash->hashslot->tts_ops,
    1528             :                                              perhash->numCols,
    1529             :                                              perhash->hashGrpColIdxHash,
    1530        5464 :                                              perhash->eqfuncoids,
    1531             :                                              perhash->hashfunctions,
    1532        5464 :                                              perhash->aggnode->grpCollations,
    1533             :                                              nbuckets,
    1534             :                                              additionalsize,
    1535             :                                              metacxt,
    1536             :                                              tuplescxt,
    1537             :                                              tmpcxt,
    1538        5464 :                                              DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
    1539        5464 : }
    1540             : 
    1541             : /*
    1542             :  * Compute columns that actually need to be stored in hashtable entries.  The
    1543             :  * incoming tuples from the child plan node will contain grouping columns,
    1544             :  * other columns referenced in our targetlist and qual, columns used to
    1545             :  * compute the aggregate functions, and perhaps just junk columns we don't use
    1546             :  * at all.  Only columns of the first two types need to be stored in the
    1547             :  * hashtable, and getting rid of the others can make the table entries
    1548             :  * significantly smaller.  The hashtable only contains the relevant columns,
    1549             :  * and is packed/unpacked in lookup_hash_entries() / agg_retrieve_hash_table()
    1550             :  * into the format of the normal input descriptor.
    1551             :  *
    1552             :  * Additional columns, in addition to the columns grouped by, come from two
    1553             :  * sources: Firstly functionally dependent columns that we don't need to group
    1554             :  * by themselves, and secondly ctids for row-marks.
    1555             :  *
    1556             :  * To eliminate duplicates, we build a bitmapset of the needed columns, and
    1557             :  * then build an array of the columns included in the hashtable. We might
    1558             :  * still have duplicates if the passed-in grpColIdx has them, which can happen
    1559             :  * in edge cases from semijoins/distinct; these can't always be removed,
    1560             :  * because it's not certain that the duplicate cols will be using the same
    1561             :  * hash function.
    1562             :  *
    1563             :  * Note that the array is preserved over ExecReScanAgg, so we allocate it in
    1564             :  * the per-query context (unlike the hash table itself).
    1565             :  */
    1566             : static void
    1567        7038 : find_hash_columns(AggState *aggstate)
    1568             : {
    1569             :     Bitmapset  *base_colnos;
    1570             :     Bitmapset  *aggregated_colnos;
    1571        7038 :     TupleDesc   scanDesc = aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor;
    1572        7038 :     List       *outerTlist = outerPlanState(aggstate)->plan->targetlist;
    1573        7038 :     int         numHashes = aggstate->num_hashes;
    1574        7038 :     EState     *estate = aggstate->ss.ps.state;
    1575             :     int         j;
    1576             : 
    1577             :     /* Find Vars that will be needed in tlist and qual */
    1578        7038 :     find_cols(aggstate, &aggregated_colnos, &base_colnos);
    1579        7038 :     aggstate->colnos_needed = bms_union(base_colnos, aggregated_colnos);
    1580        7038 :     aggstate->max_colno_needed = 0;
    1581        7038 :     aggstate->all_cols_needed = true;
    1582             : 
    1583       29426 :     for (int i = 0; i < scanDesc->natts; i++)
    1584             :     {
    1585       22388 :         int         colno = i + 1;
    1586             : 
    1587       22388 :         if (bms_is_member(colno, aggstate->colnos_needed))
    1588       16300 :             aggstate->max_colno_needed = colno;
    1589             :         else
    1590        6088 :             aggstate->all_cols_needed = false;
    1591             :     }
    1592             : 
    1593       14590 :     for (j = 0; j < numHashes; ++j)
    1594             :     {
    1595        7552 :         AggStatePerHash perhash = &aggstate->perhash[j];
    1596        7552 :         Bitmapset  *colnos = bms_copy(base_colnos);
    1597        7552 :         AttrNumber *grpColIdx = perhash->aggnode->grpColIdx;
    1598        7552 :         List       *hashTlist = NIL;
    1599             :         TupleDesc   hashDesc;
    1600             :         int         maxCols;
    1601             :         int         i;
    1602             : 
    1603        7552 :         perhash->largestGrpColIdx = 0;
    1604             : 
    1605             :         /*
    1606             :          * If we're doing grouping sets, then some Vars might be referenced in
    1607             :          * tlist/qual for the benefit of other grouping sets, but not needed
    1608             :          * when hashing; i.e. prepare_projection_slot will null them out, so
    1609             :          * there'd be no point storing them.  Use prepare_projection_slot's
    1610             :          * logic to determine which.
    1611             :          */
    1612        7552 :         if (aggstate->phases[0].grouped_cols)
    1613             :         {
    1614        7552 :             Bitmapset  *grouped_cols = aggstate->phases[0].grouped_cols[j];
    1615             :             ListCell   *lc;
    1616             : 
    1617       20404 :             foreach(lc, aggstate->all_grouped_cols)
    1618             :             {
    1619       12852 :                 int         attnum = lfirst_int(lc);
    1620             : 
    1621       12852 :                 if (!bms_is_member(attnum, grouped_cols))
    1622        1344 :                     colnos = bms_del_member(colnos, attnum);
    1623             :             }
    1624             :         }
    1625             : 
    1626             :         /*
    1627             :          * Compute maximum number of input columns accounting for possible
    1628             :          * duplications in the grpColIdx array, which can happen in some edge
    1629             :          * cases where HashAggregate was generated as part of a semijoin or a
    1630             :          * DISTINCT.
    1631             :          */
    1632        7552 :         maxCols = bms_num_members(colnos) + perhash->numCols;
    1633             : 
    1634        7552 :         perhash->hashGrpColIdxInput =
    1635        7552 :             palloc(maxCols * sizeof(AttrNumber));
    1636        7552 :         perhash->hashGrpColIdxHash =
    1637        7552 :             palloc(perhash->numCols * sizeof(AttrNumber));
    1638             : 
    1639             :         /* Add all the grouping columns to colnos */
    1640       19060 :         for (i = 0; i < perhash->numCols; i++)
    1641       11508 :             colnos = bms_add_member(colnos, grpColIdx[i]);
    1642             : 
    1643             :         /*
    1644             :          * First build mapping for columns directly hashed. These are the
    1645             :          * first, because they'll be accessed when computing hash values and
    1646             :          * comparing tuples for exact matches. We also build simple mapping
    1647             :          * for execGrouping, so it knows where to find the to-be-hashed /
    1648             :          * compared columns in the input.
    1649             :          */
    1650       19060 :         for (i = 0; i < perhash->numCols; i++)
    1651             :         {
    1652       11508 :             perhash->hashGrpColIdxInput[i] = grpColIdx[i];
    1653       11508 :             perhash->hashGrpColIdxHash[i] = i + 1;
    1654       11508 :             perhash->numhashGrpCols++;
    1655             :             /* delete already mapped columns */
    1656       11508 :             colnos = bms_del_member(colnos, grpColIdx[i]);
    1657             :         }
    1658             : 
    1659             :         /* and add the remaining columns */
    1660        7552 :         i = -1;
    1661        8826 :         while ((i = bms_next_member(colnos, i)) >= 0)
    1662             :         {
    1663        1274 :             perhash->hashGrpColIdxInput[perhash->numhashGrpCols] = i;
    1664        1274 :             perhash->numhashGrpCols++;
    1665             :         }
    1666             : 
    1667             :         /* and build a tuple descriptor for the hashtable */
    1668       20334 :         for (i = 0; i < perhash->numhashGrpCols; i++)
    1669             :         {
    1670       12782 :             int         varNumber = perhash->hashGrpColIdxInput[i] - 1;
    1671             : 
    1672       12782 :             hashTlist = lappend(hashTlist, list_nth(outerTlist, varNumber));
    1673       12782 :             perhash->largestGrpColIdx =
    1674       12782 :                 Max(varNumber + 1, perhash->largestGrpColIdx);
    1675             :         }
    1676             : 
    1677        7552 :         hashDesc = ExecTypeFromTL(hashTlist);
    1678             : 
    1679        7552 :         execTuplesHashPrepare(perhash->numCols,
    1680        7552 :                               perhash->aggnode->grpOperators,
    1681             :                               &perhash->eqfuncoids,
    1682             :                               &perhash->hashfunctions);
    1683        7552 :         perhash->hashslot =
    1684        7552 :             ExecAllocTableSlot(&estate->es_tupleTable, hashDesc,
    1685             :                                &TTSOpsMinimalTuple);
    1686             : 
    1687        7552 :         list_free(hashTlist);
    1688        7552 :         bms_free(colnos);
    1689             :     }
    1690             : 
    1691        7038 :     bms_free(base_colnos);
    1692        7038 : }
    1693             : 
    1694             : /*
    1695             :  * Estimate per-hash-table-entry overhead.
    1696             :  */
    1697             : Size
    1698       41776 : hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace)
    1699             : {
    1700             :     Size        tupleChunkSize;
    1701             :     Size        pergroupChunkSize;
    1702             :     Size        transitionChunkSize;
    1703       41776 :     Size        tupleSize = (MAXALIGN(SizeofMinimalTupleHeader) +
    1704             :                              tupleWidth);
    1705       41776 :     Size        pergroupSize = numTrans * sizeof(AggStatePerGroupData);
    1706             : 
    1707             :     /*
    1708             :      * Entries use the Bump allocator, so the chunk sizes are the same as the
    1709             :      * requested sizes.
    1710             :      */
    1711       41776 :     tupleChunkSize = MAXALIGN(tupleSize);
    1712       41776 :     pergroupChunkSize = pergroupSize;
    1713             : 
    1714             :     /*
    1715             :      * Transition values use AllocSet, which has a chunk header and also uses
    1716             :      * power-of-two allocations.
    1717             :      */
    1718       41776 :     if (transitionSpace > 0)
    1719        5396 :         transitionChunkSize = CHUNKHDRSZ + pg_nextpower2_size_t(transitionSpace);
    1720             :     else
    1721       36380 :         transitionChunkSize = 0;
    1722             : 
    1723             :     return
    1724       41776 :         TupleHashEntrySize() +
    1725       41776 :         tupleChunkSize +
    1726       41776 :         pergroupChunkSize +
    1727             :         transitionChunkSize;
    1728             : }
    1729             : 
    1730             : /*
    1731             :  * hashagg_recompile_expressions()
    1732             :  *
    1733             :  * Identifies the right phase, compiles the right expression given the
    1734             :  * arguments, and then sets phase->evalfunc to that expression.
    1735             :  *
    1736             :  * Different versions of the compiled expression are needed depending on
    1737             :  * whether hash aggregation has spilled or not, and whether it's reading from
    1738             :  * the outer plan or a tape. Before spilling to disk, the expression reads
    1739             :  * from the outer plan and does not need to perform a NULL check. After
    1740             :  * HashAgg begins to spill, new groups will not be created in the hash table,
    1741             :  * and the AggStatePerGroup array may be NULL; therefore we need to add a null
    1742             :  * pointer check to the expression. Then, when reading spilled data from a
    1743             :  * tape, we change the outer slot type to be a fixed minimal tuple slot.
    1744             :  *
    1745             :  * It would be wasteful to recompile every time, so cache the compiled
    1746             :  * expressions in the AggStatePerPhase, and reuse when appropriate.
    1747             :  */
    1748             : static void
    1749       66060 : hashagg_recompile_expressions(AggState *aggstate, bool minslot, bool nullcheck)
    1750             : {
    1751             :     AggStatePerPhase phase;
    1752       66060 :     int         i = minslot ? 1 : 0;
    1753       66060 :     int         j = nullcheck ? 1 : 0;
    1754             : 
    1755             :     Assert(aggstate->aggstrategy == AGG_HASHED ||
    1756             :            aggstate->aggstrategy == AGG_MIXED);
    1757             : 
    1758       66060 :     if (aggstate->aggstrategy == AGG_HASHED)
    1759       13488 :         phase = &aggstate->phases[0];
    1760             :     else                        /* AGG_MIXED */
    1761       52572 :         phase = &aggstate->phases[1];
    1762             : 
    1763       66060 :     if (phase->evaltrans_cache[i][j] == NULL)
    1764             :     {
    1765          88 :         const TupleTableSlotOps *outerops = aggstate->ss.ps.outerops;
    1766          88 :         bool        outerfixed = aggstate->ss.ps.outeropsfixed;
    1767          88 :         bool        dohash = true;
    1768          88 :         bool        dosort = false;
    1769             : 
    1770             :         /*
    1771             :          * If minslot is true, that means we are processing a spilled batch
    1772             :          * (inside agg_refill_hash_table()), and we must not advance the
    1773             :          * sorted grouping sets.
    1774             :          */
    1775          88 :         if (aggstate->aggstrategy == AGG_MIXED && !minslot)
    1776          12 :             dosort = true;
    1777             : 
    1778             :         /* temporarily change the outerops while compiling the expression */
    1779          88 :         if (minslot)
    1780             :         {
    1781          44 :             aggstate->ss.ps.outerops = &TTSOpsMinimalTuple;
    1782          44 :             aggstate->ss.ps.outeropsfixed = true;
    1783             :         }
    1784             : 
    1785          88 :         phase->evaltrans_cache[i][j] = ExecBuildAggTrans(aggstate, phase,
    1786             :                                                          dosort, dohash,
    1787             :                                                          nullcheck);
    1788             : 
    1789             :         /* change back */
    1790          88 :         aggstate->ss.ps.outerops = outerops;
    1791          88 :         aggstate->ss.ps.outeropsfixed = outerfixed;
    1792             :     }
    1793             : 
    1794       66060 :     phase->evaltrans = phase->evaltrans_cache[i][j];
    1795       66060 : }
    1796             : 
    1797             : /*
    1798             :  * Set limits that trigger spilling to avoid exceeding hash_mem. Consider the
    1799             :  * number of partitions we expect to create (if we do spill).
    1800             :  *
    1801             :  * There are two limits: a memory limit, and also an ngroups limit. The
    1802             :  * ngroups limit becomes important when we expect transition values to grow
    1803             :  * substantially larger than the initial value.
    1804             :  */
    1805             : void
    1806       66274 : hash_agg_set_limits(double hashentrysize, double input_groups, int used_bits,
    1807             :                     Size *mem_limit, uint64 *ngroups_limit,
    1808             :                     int *num_partitions)
    1809             : {
    1810             :     int         npartitions;
    1811             :     Size        partition_mem;
    1812       66274 :     Size        hash_mem_limit = get_hash_memory_limit();
    1813             : 
    1814             :     /* if not expected to spill, use all of hash_mem */
    1815       66274 :     if (input_groups * hashentrysize <= hash_mem_limit)
    1816             :     {
    1817       63844 :         if (num_partitions != NULL)
    1818       39234 :             *num_partitions = 0;
    1819       63844 :         *mem_limit = hash_mem_limit;
    1820       63844 :         *ngroups_limit = hash_mem_limit / hashentrysize;
    1821       63844 :         return;
    1822             :     }
    1823             : 
    1824             :     /*
    1825             :      * Calculate expected memory requirements for spilling, which is the size
    1826             :      * of the buffers needed for all the tapes that need to be open at once.
    1827             :      * Then, subtract that from the memory available for holding hash tables.
    1828             :      */
    1829        2430 :     npartitions = hash_choose_num_partitions(input_groups,
    1830             :                                              hashentrysize,
    1831             :                                              used_bits,
    1832             :                                              NULL);
    1833        2430 :     if (num_partitions != NULL)
    1834          96 :         *num_partitions = npartitions;
    1835             : 
    1836        2430 :     partition_mem =
    1837        2430 :         HASHAGG_READ_BUFFER_SIZE +
    1838             :         HASHAGG_WRITE_BUFFER_SIZE * npartitions;
    1839             : 
    1840             :     /*
    1841             :      * Don't set the limit below 3/4 of hash_mem. In that case, we are at the
    1842             :      * minimum number of partitions, so we aren't going to dramatically exceed
    1843             :      * work mem anyway.
    1844             :      */
    1845        2430 :     if (hash_mem_limit > 4 * partition_mem)
    1846           0 :         *mem_limit = hash_mem_limit - partition_mem;
    1847             :     else
    1848        2430 :         *mem_limit = hash_mem_limit * 0.75;
    1849             : 
    1850        2430 :     if (*mem_limit > hashentrysize)
    1851        2430 :         *ngroups_limit = *mem_limit / hashentrysize;
    1852             :     else
    1853           0 :         *ngroups_limit = 1;
    1854             : }
    1855             : 
    1856             : /*
    1857             :  * hash_agg_check_limits
    1858             :  *
    1859             :  * After adding a new group to the hash table, check whether we need to enter
    1860             :  * spill mode. Allocations may happen without adding new groups (for instance,
    1861             :  * if the transition state size grows), so this check is imperfect.
    1862             :  */
    1863             : static void
    1864      529632 : hash_agg_check_limits(AggState *aggstate)
    1865             : {
    1866      529632 :     uint64      ngroups = aggstate->hash_ngroups_current;
    1867      529632 :     Size        meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt,
    1868             :                                                      true);
    1869      529632 :     Size        entry_mem = MemoryContextMemAllocated(aggstate->hash_tuplescxt,
    1870             :                                                       true);
    1871      529632 :     Size        tval_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory,
    1872             :                                                      true);
    1873      529632 :     Size        total_mem = meta_mem + entry_mem + tval_mem;
    1874      529632 :     bool        do_spill = false;
    1875             : 
    1876             : #ifdef USE_INJECTION_POINTS
    1877      529632 :     if (ngroups >= 1000)
    1878             :     {
    1879       95650 :         if (IS_INJECTION_POINT_ATTACHED("hash-aggregate-spill-1000"))
    1880             :         {
    1881          10 :             do_spill = true;
    1882          10 :             INJECTION_POINT_CACHED("hash-aggregate-spill-1000", NULL);
    1883             :         }
    1884             :     }
    1885             : #endif
    1886             : 
    1887             :     /*
    1888             :      * Don't spill unless there's at least one group in the hash table so we
    1889             :      * can be sure to make progress even in edge cases.
    1890             :      */
    1891      529632 :     if (aggstate->hash_ngroups_current > 0 &&
    1892      529632 :         (total_mem > aggstate->hash_mem_limit ||
    1893      503220 :          ngroups > aggstate->hash_ngroups_limit))
    1894             :     {
    1895       26448 :         do_spill = true;
    1896             :     }
    1897             : 
    1898      529632 :     if (do_spill)
    1899       26458 :         hash_agg_enter_spill_mode(aggstate);
    1900      529632 : }
    1901             : 
    1902             : /*
    1903             :  * Enter "spill mode", meaning that no new groups are added to any of the hash
    1904             :  * tables. Tuples that would create a new group are instead spilled, and
    1905             :  * processed later.
    1906             :  */
    1907             : static void
    1908       26458 : hash_agg_enter_spill_mode(AggState *aggstate)
    1909             : {
    1910       26458 :     INJECTION_POINT("hash-aggregate-enter-spill-mode", NULL);
    1911       26458 :     aggstate->hash_spill_mode = true;
    1912       26458 :     hashagg_recompile_expressions(aggstate, aggstate->table_filled, true);
    1913             : 
    1914       26458 :     if (!aggstate->hash_ever_spilled)
    1915             :     {
    1916             :         Assert(aggstate->hash_tapeset == NULL);
    1917             :         Assert(aggstate->hash_spills == NULL);
    1918             : 
    1919          62 :         aggstate->hash_ever_spilled = true;
    1920             : 
    1921          62 :         aggstate->hash_tapeset = LogicalTapeSetCreate(true, NULL, -1);
    1922             : 
    1923          62 :         aggstate->hash_spills = palloc_array(HashAggSpill, aggstate->num_hashes);
    1924             : 
    1925         184 :         for (int setno = 0; setno < aggstate->num_hashes; setno++)
    1926             :         {
    1927         122 :             AggStatePerHash perhash = &aggstate->perhash[setno];
    1928         122 :             HashAggSpill *spill = &aggstate->hash_spills[setno];
    1929             : 
    1930         122 :             hashagg_spill_init(spill, aggstate->hash_tapeset, 0,
    1931         122 :                                perhash->aggnode->numGroups,
    1932             :                                aggstate->hashentrysize);
    1933             :         }
    1934             :     }
    1935       26458 : }
    1936             : 
    1937             : /*
    1938             :  * Update metrics after filling the hash table.
    1939             :  *
    1940             :  * If reading from the outer plan, from_tape should be false; if reading from
    1941             :  * another tape, from_tape should be true.
    1942             :  */
    1943             : static void
    1944       44522 : hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
    1945             : {
    1946             :     Size        meta_mem;
    1947             :     Size        entry_mem;
    1948             :     Size        hashkey_mem;
    1949             :     Size        buffer_mem;
    1950             :     Size        total_mem;
    1951             : 
    1952       44522 :     if (aggstate->aggstrategy != AGG_MIXED &&
    1953       18116 :         aggstate->aggstrategy != AGG_HASHED)
    1954           0 :         return;
    1955             : 
    1956             :     /* memory for the hash table itself */
    1957       44522 :     meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, true);
    1958             : 
    1959             :     /* memory for hash entries */
    1960       44522 :     entry_mem = MemoryContextMemAllocated(aggstate->hash_tuplescxt, true);
    1961             : 
    1962             :     /* memory for byref transition states */
    1963       44522 :     hashkey_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, true);
    1964             : 
    1965             :     /* memory for read/write tape buffers, if spilled */
    1966       44522 :     buffer_mem = npartitions * HASHAGG_WRITE_BUFFER_SIZE;
    1967       44522 :     if (from_tape)
    1968       26944 :         buffer_mem += HASHAGG_READ_BUFFER_SIZE;
    1969             : 
    1970             :     /* update peak mem */
    1971       44522 :     total_mem = meta_mem + entry_mem + hashkey_mem + buffer_mem;
    1972       44522 :     if (total_mem > aggstate->hash_mem_peak)
    1973        4970 :         aggstate->hash_mem_peak = total_mem;
    1974             : 
    1975             :     /* update disk usage */
    1976       44522 :     if (aggstate->hash_tapeset != NULL)
    1977             :     {
    1978       27006 :         uint64      disk_used = LogicalTapeSetBlocks(aggstate->hash_tapeset) * (BLCKSZ / 1024);
    1979             : 
    1980       27006 :         if (aggstate->hash_disk_used < disk_used)
    1981          52 :             aggstate->hash_disk_used = disk_used;
    1982             :     }
    1983             : 
    1984             :     /* update hashentrysize estimate based on contents */
    1985       44522 :     if (aggstate->hash_ngroups_current > 0)
    1986             :     {
    1987       43946 :         aggstate->hashentrysize =
    1988       43946 :             TupleHashEntrySize() +
    1989       43946 :             (hashkey_mem / (double) aggstate->hash_ngroups_current);
    1990             :     }
    1991             : }
    1992             : 
    1993             : /*
    1994             :  * Create memory contexts used for hash aggregation.
    1995             :  */
    1996             : static void
    1997        7038 : hash_create_memory(AggState *aggstate)
    1998             : {
    1999        7038 :     Size        maxBlockSize = ALLOCSET_DEFAULT_MAXSIZE;
    2000             : 
    2001             :     /*
    2002             :      * The hashcontext's per-tuple memory will be used for byref transition
    2003             :      * values and returned by AggCheckCallContext().
    2004             :      */
    2005        7038 :     aggstate->hashcontext = CreateWorkExprContext(aggstate->ss.ps.state);
    2006             : 
    2007             :     /*
    2008             :      * The meta context will be used for the bucket array of
    2009             :      * TupleHashEntryData (or arrays, in the case of grouping sets). As the
    2010             :      * hash table grows, the bucket array will double in size and the old one
    2011             :      * will be freed, so an AllocSet is appropriate. For large bucket arrays,
    2012             :      * the large allocation path will be used, so it's not worth worrying
    2013             :      * about wasting space due to power-of-two allocations.
    2014             :      */
    2015        7038 :     aggstate->hash_metacxt = AllocSetContextCreate(aggstate->ss.ps.state->es_query_cxt,
    2016             :                                                    "HashAgg meta context",
    2017             :                                                    ALLOCSET_DEFAULT_SIZES);
    2018             : 
    2019             :     /*
    2020             :      * The hash entries themselves, which include the grouping key
    2021             :      * (firstTuple) and pergroup data, are stored in the table context. The
    2022             :      * bump allocator can be used because the entries are not freed until the
    2023             :      * entire hash table is reset. The bump allocator is faster for
    2024             :      * allocations and avoids wasting space on the chunk header or
    2025             :      * power-of-two allocations.
    2026             :      *
    2027             :      * Like CreateWorkExprContext(), use smaller sizings for smaller work_mem,
    2028             :      * to avoid large jumps in memory usage.
    2029             :      */
    2030             : 
    2031             :     /*
    2032             :      * Like CreateWorkExprContext(), use smaller sizings for smaller work_mem,
    2033             :      * to avoid large jumps in memory usage.
    2034             :      */
    2035        7038 :     maxBlockSize = pg_prevpower2_size_t(work_mem * (Size) 1024 / 16);
    2036             : 
    2037             :     /* But no bigger than ALLOCSET_DEFAULT_MAXSIZE */
    2038        7038 :     maxBlockSize = Min(maxBlockSize, ALLOCSET_DEFAULT_MAXSIZE);
    2039             : 
    2040             :     /* and no smaller than ALLOCSET_DEFAULT_INITSIZE */
    2041        7038 :     maxBlockSize = Max(maxBlockSize, ALLOCSET_DEFAULT_INITSIZE);
    2042             : 
    2043        7038 :     aggstate->hash_tuplescxt = BumpContextCreate(aggstate->ss.ps.state->es_query_cxt,
    2044             :                                                  "HashAgg hashed tuples",
    2045             :                                                  ALLOCSET_DEFAULT_MINSIZE,
    2046             :                                                  ALLOCSET_DEFAULT_INITSIZE,
    2047             :                                                  maxBlockSize);
    2048             : 
    2049        7038 : }
    2050             : 
    2051             : /*
    2052             :  * Choose a reasonable number of buckets for the initial hash table size.
    2053             :  */
    2054             : static double
    2055        5464 : hash_choose_num_buckets(double hashentrysize, double ngroups, Size memory)
    2056             : {
    2057             :     double      max_nbuckets;
    2058        5464 :     double      nbuckets = ngroups;
    2059             : 
    2060        5464 :     max_nbuckets = memory / hashentrysize;
    2061             : 
    2062             :     /*
    2063             :      * Underestimating is better than overestimating. Too many buckets crowd
    2064             :      * out space for group keys and transition state values.
    2065             :      */
    2066        5464 :     max_nbuckets /= 2;
    2067             : 
    2068        5464 :     if (nbuckets > max_nbuckets)
    2069          72 :         nbuckets = max_nbuckets;
    2070             : 
    2071             :     /*
    2072             :      * BuildTupleHashTable will clamp any obviously-insane result, so we don't
    2073             :      * need to be too careful here.
    2074             :      */
    2075        5464 :     return nbuckets;
    2076             : }
    2077             : 
    2078             : /*
    2079             :  * Determine the number of partitions to create when spilling, which will
    2080             :  * always be a power of two. If log2_npartitions is non-NULL, set
    2081             :  * *log2_npartitions to the log2() of the number of partitions.
    2082             :  */
    2083             : static int
    2084       15052 : hash_choose_num_partitions(double input_groups, double hashentrysize,
    2085             :                            int used_bits, int *log2_npartitions)
    2086             : {
    2087       15052 :     Size        hash_mem_limit = get_hash_memory_limit();
    2088             :     double      partition_limit;
    2089             :     double      mem_wanted;
    2090             :     double      dpartitions;
    2091             :     int         npartitions;
    2092             :     int         partition_bits;
    2093             : 
    2094             :     /*
    2095             :      * Avoid creating so many partitions that the memory requirements of the
    2096             :      * open partition files are greater than 1/4 of hash_mem.
    2097             :      */
    2098       15052 :     partition_limit =
    2099       15052 :         (hash_mem_limit * 0.25 - HASHAGG_READ_BUFFER_SIZE) /
    2100             :         HASHAGG_WRITE_BUFFER_SIZE;
    2101             : 
    2102       15052 :     mem_wanted = HASHAGG_PARTITION_FACTOR * input_groups * hashentrysize;
    2103             : 
    2104             :     /* make enough partitions so that each one is likely to fit in memory */
    2105       15052 :     dpartitions = 1 + (mem_wanted / hash_mem_limit);
    2106             : 
    2107       15052 :     if (dpartitions > partition_limit)
    2108       14988 :         dpartitions = partition_limit;
    2109             : 
    2110       15052 :     if (dpartitions < HASHAGG_MIN_PARTITIONS)
    2111       15052 :         dpartitions = HASHAGG_MIN_PARTITIONS;
    2112       15052 :     if (dpartitions > HASHAGG_MAX_PARTITIONS)
    2113           0 :         dpartitions = HASHAGG_MAX_PARTITIONS;
    2114             : 
    2115             :     /* HASHAGG_MAX_PARTITIONS limit makes this safe */
    2116       15052 :     npartitions = (int) dpartitions;
    2117             : 
    2118             :     /* ceil(log2(npartitions)) */
    2119       15052 :     partition_bits = pg_ceil_log2_32(npartitions);
    2120             : 
    2121             :     /* make sure that we don't exhaust the hash bits */
    2122       15052 :     if (partition_bits + used_bits >= 32)
    2123           0 :         partition_bits = 32 - used_bits;
    2124             : 
    2125       15052 :     if (log2_npartitions != NULL)
    2126       12622 :         *log2_npartitions = partition_bits;
    2127             : 
    2128             :     /* number of partitions will be a power of two */
    2129       15052 :     npartitions = 1 << partition_bits;
    2130             : 
    2131       15052 :     return npartitions;
    2132             : }
    2133             : 
    2134             : /*
    2135             :  * Initialize a freshly-created TupleHashEntry.
    2136             :  */
    2137             : static void
    2138      529632 : initialize_hash_entry(AggState *aggstate, TupleHashTable hashtable,
    2139             :                       TupleHashEntry entry)
    2140             : {
    2141             :     AggStatePerGroup pergroup;
    2142             :     int         transno;
    2143             : 
    2144      529632 :     aggstate->hash_ngroups_current++;
    2145      529632 :     hash_agg_check_limits(aggstate);
    2146             : 
    2147             :     /* no need to allocate or initialize per-group state */
    2148      529632 :     if (aggstate->numtrans == 0)
    2149      214056 :         return;
    2150             : 
    2151      315576 :     pergroup = (AggStatePerGroup) TupleHashEntryGetAdditional(hashtable, entry);
    2152             : 
    2153             :     /*
    2154             :      * Initialize aggregates for new tuple group, lookup_hash_entries()
    2155             :      * already has selected the relevant grouping set.
    2156             :      */
    2157      780686 :     for (transno = 0; transno < aggstate->numtrans; transno++)
    2158             :     {
    2159      465110 :         AggStatePerTrans pertrans = &aggstate->pertrans[transno];
    2160      465110 :         AggStatePerGroup pergroupstate = &pergroup[transno];
    2161             : 
    2162      465110 :         initialize_aggregate(aggstate, pertrans, pergroupstate);
    2163             :     }
    2164             : }
    2165             : 
    2166             : /*
    2167             :  * Look up hash entries for the current tuple in all hashed grouping sets.
    2168             :  *
    2169             :  * Some entries may be left NULL if we are in "spill mode". The same tuple
    2170             :  * will belong to different groups for each grouping set, so may match a group
    2171             :  * already in memory for one set and match a group not in memory for another
    2172             :  * set. When in "spill mode", the tuple will be spilled for each grouping set
    2173             :  * where it doesn't match a group in memory.
    2174             :  *
    2175             :  * NB: It's possible to spill the same tuple for several different grouping
    2176             :  * sets. This may seem wasteful, but it's actually a trade-off: if we spill
    2177             :  * the tuple multiple times for multiple grouping sets, it can be partitioned
    2178             :  * for each grouping set, making the refilling of the hash table very
    2179             :  * efficient.
    2180             :  */
    2181             : static void
    2182     6918084 : lookup_hash_entries(AggState *aggstate)
    2183             : {
    2184     6918084 :     AggStatePerGroup *pergroup = aggstate->hash_pergroup;
    2185     6918084 :     TupleTableSlot *outerslot = aggstate->tmpcontext->ecxt_outertuple;
    2186             :     int         setno;
    2187             : 
    2188    13970620 :     for (setno = 0; setno < aggstate->num_hashes; setno++)
    2189             :     {
    2190     7052536 :         AggStatePerHash perhash = &aggstate->perhash[setno];
    2191     7052536 :         TupleHashTable hashtable = perhash->hashtable;
    2192     7052536 :         TupleTableSlot *hashslot = perhash->hashslot;
    2193             :         TupleHashEntry entry;
    2194             :         uint32      hash;
    2195     7052536 :         bool        isnew = false;
    2196             :         bool       *p_isnew;
    2197             : 
    2198             :         /* if hash table already spilled, don't create new entries */
    2199     7052536 :         p_isnew = aggstate->hash_spill_mode ? NULL : &isnew;
    2200             : 
    2201     7052536 :         select_current_set(aggstate, setno, true);
    2202     7052536 :         prepare_hash_slot(perhash,
    2203             :                           outerslot,
    2204             :                           hashslot);
    2205             : 
    2206     7052536 :         entry = LookupTupleHashEntry(hashtable, hashslot,
    2207             :                                      p_isnew, &hash);
    2208             : 
    2209     7052536 :         if (entry != NULL)
    2210             :         {
    2211     6285300 :             if (isnew)
    2212      373952 :                 initialize_hash_entry(aggstate, hashtable, entry);
    2213     6285300 :             pergroup[setno] = TupleHashEntryGetAdditional(hashtable, entry);
    2214             :         }
    2215             :         else
    2216             :         {
    2217      767236 :             HashAggSpill *spill = &aggstate->hash_spills[setno];
    2218      767236 :             TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple;
    2219             : 
    2220      767236 :             if (spill->partitions == NULL)
    2221           0 :                 hashagg_spill_init(spill, aggstate->hash_tapeset, 0,
    2222           0 :                                    perhash->aggnode->numGroups,
    2223             :                                    aggstate->hashentrysize);
    2224             : 
    2225      767236 :             hashagg_spill_tuple(aggstate, spill, slot, hash);
    2226      767236 :             pergroup[setno] = NULL;
    2227             :         }
    2228             :     }
    2229     6918084 : }
    2230             : 
    2231             : /*
    2232             :  * ExecAgg -
    2233             :  *
    2234             :  *    ExecAgg receives tuples from its outer subplan and aggregates over
    2235             :  *    the appropriate attribute for each aggregate function use (Aggref
    2236             :  *    node) appearing in the targetlist or qual of the node.  The number
    2237             :  *    of tuples to aggregate over depends on whether grouped or plain
    2238             :  *    aggregation is selected.  In grouped aggregation, we produce a result
    2239             :  *    row for each group; in plain aggregation there's a single result row
    2240             :  *    for the whole query.  In either case, the value of each aggregate is
    2241             :  *    stored in the expression context to be used when ExecProject evaluates
    2242             :  *    the result tuple.
    2243             :  */
    2244             : static TupleTableSlot *
    2245      835410 : ExecAgg(PlanState *pstate)
    2246             : {
    2247      835410 :     AggState   *node = castNode(AggState, pstate);
    2248      835410 :     TupleTableSlot *result = NULL;
    2249             : 
    2250      835410 :     CHECK_FOR_INTERRUPTS();
    2251             : 
    2252      835410 :     if (!node->agg_done)
    2253             :     {
    2254             :         /* Dispatch based on strategy */
    2255      765920 :         switch (node->phase->aggstrategy)
    2256             :         {
    2257      486764 :             case AGG_HASHED:
    2258      486764 :                 if (!node->table_filled)
    2259       17434 :                     agg_fill_hash_table(node);
    2260             :                 /* FALLTHROUGH */
    2261             :             case AGG_MIXED:
    2262      514126 :                 result = agg_retrieve_hash_table(node);
    2263      514126 :                 break;
    2264      251794 :             case AGG_PLAIN:
    2265             :             case AGG_SORTED:
    2266      251794 :                 result = agg_retrieve_direct(node);
    2267      251602 :                 break;
    2268             :         }
    2269             : 
    2270      765728 :         if (!TupIsNull(result))
    2271      746534 :             return result;
    2272             :     }
    2273             : 
    2274       88684 :     return NULL;
    2275             : }
    2276             : 
    2277             : /*
    2278             :  * ExecAgg for non-hashed case
    2279             :  */
    2280             : static TupleTableSlot *
    2281      251794 : agg_retrieve_direct(AggState *aggstate)
    2282             : {
    2283      251794 :     Agg        *node = aggstate->phase->aggnode;
    2284             :     ExprContext *econtext;
    2285             :     ExprContext *tmpcontext;
    2286             :     AggStatePerAgg peragg;
    2287             :     AggStatePerGroup *pergroups;
    2288             :     TupleTableSlot *outerslot;
    2289             :     TupleTableSlot *firstSlot;
    2290             :     TupleTableSlot *result;
    2291      251794 :     bool        hasGroupingSets = aggstate->phase->numsets > 0;
    2292      251794 :     int         numGroupingSets = Max(aggstate->phase->numsets, 1);
    2293             :     int         currentSet;
    2294             :     int         nextSetSize;
    2295             :     int         numReset;
    2296             :     int         i;
    2297             : 
    2298             :     /*
    2299             :      * get state info from node
    2300             :      *
    2301             :      * econtext is the per-output-tuple expression context
    2302             :      *
    2303             :      * tmpcontext is the per-input-tuple expression context
    2304             :      */
    2305      251794 :     econtext = aggstate->ss.ps.ps_ExprContext;
    2306      251794 :     tmpcontext = aggstate->tmpcontext;
    2307             : 
    2308      251794 :     peragg = aggstate->peragg;
    2309      251794 :     pergroups = aggstate->pergroups;
    2310      251794 :     firstSlot = aggstate->ss.ss_ScanTupleSlot;
    2311             : 
    2312             :     /*
    2313             :      * We loop retrieving groups until we find one matching
    2314             :      * aggstate->ss.ps.qual
    2315             :      *
    2316             :      * For grouping sets, we have the invariant that aggstate->projected_set
    2317             :      * is either -1 (initial call) or the index (starting from 0) in
    2318             :      * gset_lengths for the group we just completed (either by projecting a
    2319             :      * row or by discarding it in the qual).
    2320             :      */
    2321      322626 :     while (!aggstate->agg_done)
    2322             :     {
    2323             :         /*
    2324             :          * Clear the per-output-tuple context for each group, as well as
    2325             :          * aggcontext (which contains any pass-by-ref transvalues of the old
    2326             :          * group).  Some aggregate functions store working state in child
    2327             :          * contexts; those now get reset automatically without us needing to
    2328             :          * do anything special.
    2329             :          *
    2330             :          * We use ReScanExprContext not just ResetExprContext because we want
    2331             :          * any registered shutdown callbacks to be called.  That allows
    2332             :          * aggregate functions to ensure they've cleaned up any non-memory
    2333             :          * resources.
    2334             :          */
    2335      322416 :         ReScanExprContext(econtext);
    2336             : 
    2337             :         /*
    2338             :          * Determine how many grouping sets need to be reset at this boundary.
    2339             :          */
    2340      322416 :         if (aggstate->projected_set >= 0 &&
    2341      247052 :             aggstate->projected_set < numGroupingSets)
    2342      247034 :             numReset = aggstate->projected_set + 1;
    2343             :         else
    2344       75382 :             numReset = numGroupingSets;
    2345             : 
    2346             :         /*
    2347             :          * numReset can change on a phase boundary, but that's OK; we want to
    2348             :          * reset the contexts used in _this_ phase, and later, after possibly
    2349             :          * changing phase, initialize the right number of aggregates for the
    2350             :          * _new_ phase.
    2351             :          */
    2352             : 
    2353      667110 :         for (i = 0; i < numReset; i++)
    2354             :         {
    2355      344694 :             ReScanExprContext(aggstate->aggcontexts[i]);
    2356             :         }
    2357             : 
    2358             :         /*
    2359             :          * Check if input is complete and there are no more groups to project
    2360             :          * in this phase; move to next phase or mark as done.
    2361             :          */
    2362      322416 :         if (aggstate->input_done == true &&
    2363        1614 :             aggstate->projected_set >= (numGroupingSets - 1))
    2364             :         {
    2365         798 :             if (aggstate->current_phase < aggstate->numphases - 1)
    2366             :             {
    2367         204 :                 initialize_phase(aggstate, aggstate->current_phase + 1);
    2368         204 :                 aggstate->input_done = false;
    2369         204 :                 aggstate->projected_set = -1;
    2370         204 :                 numGroupingSets = Max(aggstate->phase->numsets, 1);
    2371         204 :                 node = aggstate->phase->aggnode;
    2372         204 :                 numReset = numGroupingSets;
    2373             :             }
    2374         594 :             else if (aggstate->aggstrategy == AGG_MIXED)
    2375             :             {
    2376             :                 /*
    2377             :                  * Mixed mode; we've output all the grouped stuff and have
    2378             :                  * full hashtables, so switch to outputting those.
    2379             :                  */
    2380         156 :                 initialize_phase(aggstate, 0);
    2381         156 :                 aggstate->table_filled = true;
    2382         156 :                 ResetTupleHashIterator(aggstate->perhash[0].hashtable,
    2383             :                                        &aggstate->perhash[0].hashiter);
    2384         156 :                 select_current_set(aggstate, 0, true);
    2385         156 :                 return agg_retrieve_hash_table(aggstate);
    2386             :             }
    2387             :             else
    2388             :             {
    2389         438 :                 aggstate->agg_done = true;
    2390         438 :                 break;
    2391             :             }
    2392             :         }
    2393             : 
    2394             :         /*
    2395             :          * Get the number of columns in the next grouping set after the last
    2396             :          * projected one (if any). This is the number of columns to compare to
    2397             :          * see if we reached the boundary of that set too.
    2398             :          */
    2399      321822 :         if (aggstate->projected_set >= 0 &&
    2400      246254 :             aggstate->projected_set < (numGroupingSets - 1))
    2401       27294 :             nextSetSize = aggstate->phase->gset_lengths[aggstate->projected_set + 1];
    2402             :         else
    2403      294528 :             nextSetSize = 0;
    2404             : 
    2405             :         /*----------
    2406             :          * If a subgroup for the current grouping set is present, project it.
    2407             :          *
    2408             :          * We have a new group if:
    2409             :          *  - we're out of input but haven't projected all grouping sets
    2410             :          *    (checked above)
    2411             :          * OR
    2412             :          *    - we already projected a row that wasn't from the last grouping
    2413             :          *      set
    2414             :          *    AND
    2415             :          *    - the next grouping set has at least one grouping column (since
    2416             :          *      empty grouping sets project only once input is exhausted)
    2417             :          *    AND
    2418             :          *    - the previous and pending rows differ on the grouping columns
    2419             :          *      of the next grouping set
    2420             :          *----------
    2421             :          */
    2422      321822 :         tmpcontext->ecxt_innertuple = econtext->ecxt_outertuple;
    2423      321822 :         if (aggstate->input_done ||
    2424      321006 :             (node->aggstrategy != AGG_PLAIN &&
    2425      247400 :              aggstate->projected_set != -1 &&
    2426      245438 :              aggstate->projected_set < (numGroupingSets - 1) &&
    2427       19946 :              nextSetSize > 0 &&
    2428       19946 :              !ExecQualAndReset(aggstate->phase->eqfunctions[nextSetSize - 1],
    2429             :                                tmpcontext)))
    2430             :         {
    2431       14150 :             aggstate->projected_set += 1;
    2432             : 
    2433             :             Assert(aggstate->projected_set < numGroupingSets);
    2434       14150 :             Assert(nextSetSize > 0 || aggstate->input_done);
    2435             :         }
    2436             :         else
    2437             :         {
    2438             :             /*
    2439             :              * We no longer care what group we just projected, the next
    2440             :              * projection will always be the first (or only) grouping set
    2441             :              * (unless the input proves to be empty).
    2442             :              */
    2443      307672 :             aggstate->projected_set = 0;
    2444             : 
    2445             :             /*
    2446             :              * If we don't already have the first tuple of the new group,
    2447             :              * fetch it from the outer plan.
    2448             :              */
    2449      307672 :             if (aggstate->grp_firstTuple == NULL)
    2450             :             {
    2451       75568 :                 outerslot = fetch_input_tuple(aggstate);
    2452       75508 :                 if (!TupIsNull(outerslot))
    2453             :                 {
    2454             :                     /*
    2455             :                      * Make a copy of the first input tuple; we will use this
    2456             :                      * for comparisons (in group mode) and for projection.
    2457             :                      */
    2458       62018 :                     aggstate->grp_firstTuple = ExecCopySlotHeapTuple(outerslot);
    2459             :                 }
    2460             :                 else
    2461             :                 {
    2462             :                     /* outer plan produced no tuples at all */
    2463       13490 :                     if (hasGroupingSets)
    2464             :                     {
    2465             :                         /*
    2466             :                          * If there was no input at all, we need to project
    2467             :                          * rows only if there are grouping sets of size 0.
    2468             :                          * Note that this implies that there can't be any
    2469             :                          * references to ungrouped Vars, which would otherwise
    2470             :                          * cause issues with the empty output slot.
    2471             :                          *
    2472             :                          * XXX: This is no longer true, we currently deal with
    2473             :                          * this in finalize_aggregates().
    2474             :                          */
    2475          78 :                         aggstate->input_done = true;
    2476             : 
    2477         108 :                         while (aggstate->phase->gset_lengths[aggstate->projected_set] > 0)
    2478             :                         {
    2479          48 :                             aggstate->projected_set += 1;
    2480          48 :                             if (aggstate->projected_set >= numGroupingSets)
    2481             :                             {
    2482             :                                 /*
    2483             :                                  * We can't set agg_done here because we might
    2484             :                                  * have more phases to do, even though the
    2485             :                                  * input is empty. So we need to restart the
    2486             :                                  * whole outer loop.
    2487             :                                  */
    2488          18 :                                 break;
    2489             :                             }
    2490             :                         }
    2491             : 
    2492          78 :                         if (aggstate->projected_set >= numGroupingSets)
    2493          18 :                             continue;
    2494             :                     }
    2495             :                     else
    2496             :                     {
    2497       13412 :                         aggstate->agg_done = true;
    2498             :                         /* If we are grouping, we should produce no tuples too */
    2499       13412 :                         if (node->aggstrategy != AGG_PLAIN)
    2500         156 :                             return NULL;
    2501             :                     }
    2502             :                 }
    2503             :             }
    2504             : 
    2505             :             /*
    2506             :              * Initialize working state for a new input tuple group.
    2507             :              */
    2508      307438 :             initialize_aggregates(aggstate, pergroups, numReset);
    2509             : 
    2510      307438 :             if (aggstate->grp_firstTuple != NULL)
    2511             :             {
    2512             :                 /*
    2513             :                  * Store the copied first input tuple in the tuple table slot
    2514             :                  * reserved for it.  The tuple will be deleted when it is
    2515             :                  * cleared from the slot.
    2516             :                  */
    2517      294122 :                 ExecForceStoreHeapTuple(aggstate->grp_firstTuple,
    2518             :                                         firstSlot, true);
    2519      294122 :                 aggstate->grp_firstTuple = NULL; /* don't keep two pointers */
    2520             : 
    2521             :                 /* set up for first advance_aggregates call */
    2522      294122 :                 tmpcontext->ecxt_outertuple = firstSlot;
    2523             : 
    2524             :                 /*
    2525             :                  * Process each outer-plan tuple, and then fetch the next one,
    2526             :                  * until we exhaust the outer plan or cross a group boundary.
    2527             :                  */
    2528             :                 for (;;)
    2529             :                 {
    2530             :                     /*
    2531             :                      * During phase 1 only of a mixed agg, we need to update
    2532             :                      * hashtables as well in advance_aggregates.
    2533             :                      */
    2534    22065970 :                     if (aggstate->aggstrategy == AGG_MIXED &&
    2535       38062 :                         aggstate->current_phase == 1)
    2536             :                     {
    2537       38062 :                         lookup_hash_entries(aggstate);
    2538             :                     }
    2539             : 
    2540             :                     /* Advance the aggregates (or combine functions) */
    2541    22065970 :                     advance_aggregates(aggstate);
    2542             : 
    2543             :                     /* Reset per-input-tuple context after each tuple */
    2544    22065892 :                     ResetExprContext(tmpcontext);
    2545             : 
    2546    22065892 :                     outerslot = fetch_input_tuple(aggstate);
    2547    22065862 :                     if (TupIsNull(outerslot))
    2548             :                     {
    2549             :                         /* no more outer-plan tuples available */
    2550             : 
    2551             :                         /* if we built hash tables, finalize any spills */
    2552       61886 :                         if (aggstate->aggstrategy == AGG_MIXED &&
    2553         144 :                             aggstate->current_phase == 1)
    2554         144 :                             hashagg_finish_initial_spills(aggstate);
    2555             : 
    2556       61886 :                         if (hasGroupingSets)
    2557             :                         {
    2558         720 :                             aggstate->input_done = true;
    2559         720 :                             break;
    2560             :                         }
    2561             :                         else
    2562             :                         {
    2563       61166 :                             aggstate->agg_done = true;
    2564       61166 :                             break;
    2565             :                         }
    2566             :                     }
    2567             :                     /* set up for next advance_aggregates call */
    2568    22003976 :                     tmpcontext->ecxt_outertuple = outerslot;
    2569             : 
    2570             :                     /*
    2571             :                      * If we are grouping, check whether we've crossed a group
    2572             :                      * boundary.
    2573             :                      */
    2574    22003976 :                     if (node->aggstrategy != AGG_PLAIN && node->numCols > 0)
    2575             :                     {
    2576     2471888 :                         tmpcontext->ecxt_innertuple = firstSlot;
    2577     2471888 :                         if (!ExecQual(aggstate->phase->eqfunctions[node->numCols - 1],
    2578             :                                       tmpcontext))
    2579             :                         {
    2580      232128 :                             aggstate->grp_firstTuple = ExecCopySlotHeapTuple(outerslot);
    2581      232128 :                             break;
    2582             :                         }
    2583             :                     }
    2584             :                 }
    2585             :             }
    2586             : 
    2587             :             /*
    2588             :              * Use the representative input tuple for any references to
    2589             :              * non-aggregated input columns in aggregate direct args, the node
    2590             :              * qual, and the tlist.  (If we are not grouping, and there are no
    2591             :              * input rows at all, we will come here with an empty firstSlot
    2592             :              * ... but if not grouping, there can't be any references to
    2593             :              * non-aggregated input columns, so no problem.)
    2594             :              */
    2595      307330 :             econtext->ecxt_outertuple = firstSlot;
    2596             :         }
    2597             : 
    2598             :         Assert(aggstate->projected_set >= 0);
    2599             : 
    2600      321480 :         currentSet = aggstate->projected_set;
    2601             : 
    2602      321480 :         prepare_projection_slot(aggstate, econtext->ecxt_outertuple, currentSet);
    2603             : 
    2604      321480 :         select_current_set(aggstate, currentSet, false);
    2605             : 
    2606      321480 :         finalize_aggregates(aggstate,
    2607             :                             peragg,
    2608      321480 :                             pergroups[currentSet]);
    2609             : 
    2610             :         /*
    2611             :          * If there's no row to project right now, we must continue rather
    2612             :          * than returning a null since there might be more groups.
    2613             :          */
    2614      321468 :         result = project_aggregates(aggstate);
    2615      321456 :         if (result)
    2616      250642 :             return result;
    2617             :     }
    2618             : 
    2619             :     /* No more groups */
    2620         648 :     return NULL;
    2621             : }
    2622             : 
    2623             : /*
    2624             :  * ExecAgg for hashed case: read input and build hash table
    2625             :  */
    2626             : static void
    2627       17434 : agg_fill_hash_table(AggState *aggstate)
    2628             : {
    2629             :     TupleTableSlot *outerslot;
    2630       17434 :     ExprContext *tmpcontext = aggstate->tmpcontext;
    2631             : 
    2632             :     /*
    2633             :      * Process each outer-plan tuple, and then fetch the next one, until we
    2634             :      * exhaust the outer plan.
    2635             :      */
    2636             :     for (;;)
    2637             :     {
    2638     6897456 :         outerslot = fetch_input_tuple(aggstate);
    2639     6897456 :         if (TupIsNull(outerslot))
    2640             :             break;
    2641             : 
    2642             :         /* set up for lookup_hash_entries and advance_aggregates */
    2643     6880022 :         tmpcontext->ecxt_outertuple = outerslot;
    2644             : 
    2645             :         /* Find or build hashtable entries */
    2646     6880022 :         lookup_hash_entries(aggstate);
    2647             : 
    2648             :         /* Advance the aggregates (or combine functions) */
    2649     6880022 :         advance_aggregates(aggstate);
    2650             : 
    2651             :         /*
    2652             :          * Reset per-input-tuple context after each tuple, but note that the
    2653             :          * hash lookups do this too
    2654             :          */
    2655     6880022 :         ResetExprContext(aggstate->tmpcontext);
    2656             :     }
    2657             : 
    2658             :     /* finalize spills, if any */
    2659       17434 :     hashagg_finish_initial_spills(aggstate);
    2660             : 
    2661       17434 :     aggstate->table_filled = true;
    2662             :     /* Initialize to walk the first hash table */
    2663       17434 :     select_current_set(aggstate, 0, true);
    2664       17434 :     ResetTupleHashIterator(aggstate->perhash[0].hashtable,
    2665             :                            &aggstate->perhash[0].hashiter);
    2666       17434 : }
    2667             : 
    2668             : /*
    2669             :  * If any data was spilled during hash aggregation, reset the hash table and
    2670             :  * reprocess one batch of spilled data. After reprocessing a batch, the hash
    2671             :  * table will again contain data, ready to be consumed by
    2672             :  * agg_retrieve_hash_table_in_memory().
    2673             :  *
    2674             :  * Should only be called after all in memory hash table entries have been
    2675             :  * finalized and emitted.
    2676             :  *
    2677             :  * Return false when input is exhausted and there's no more work to be done;
    2678             :  * otherwise return true.
    2679             :  */
    2680             : static bool
    2681       45334 : agg_refill_hash_table(AggState *aggstate)
    2682             : {
    2683             :     HashAggBatch *batch;
    2684             :     AggStatePerHash perhash;
    2685             :     HashAggSpill spill;
    2686       45334 :     LogicalTapeSet *tapeset = aggstate->hash_tapeset;
    2687       45334 :     bool        spill_initialized = false;
    2688             : 
    2689       45334 :     if (aggstate->hash_batches == NIL)
    2690       18390 :         return false;
    2691             : 
    2692             :     /* hash_batches is a stack, with the top item at the end of the list */
    2693       26944 :     batch = llast(aggstate->hash_batches);
    2694       26944 :     aggstate->hash_batches = list_delete_last(aggstate->hash_batches);
    2695             : 
    2696       26944 :     hash_agg_set_limits(aggstate->hashentrysize, batch->input_card,
    2697             :                         batch->used_bits, &aggstate->hash_mem_limit,
    2698             :                         &aggstate->hash_ngroups_limit, NULL);
    2699             : 
    2700             :     /*
    2701             :      * Each batch only processes one grouping set; set the rest to NULL so
    2702             :      * that advance_aggregates() knows to ignore them. We don't touch
    2703             :      * pergroups for sorted grouping sets here, because they will be needed if
    2704             :      * we rescan later. The expressions for sorted grouping sets will not be
    2705             :      * evaluated after we recompile anyway.
    2706             :      */
    2707      207428 :     MemSet(aggstate->hash_pergroup, 0,
    2708             :            sizeof(AggStatePerGroup) * aggstate->num_hashes);
    2709             : 
    2710             :     /* free memory and reset hash tables */
    2711       26944 :     ReScanExprContext(aggstate->hashcontext);
    2712      207428 :     for (int setno = 0; setno < aggstate->num_hashes; setno++)
    2713      180484 :         ResetTupleHashTable(aggstate->perhash[setno].hashtable);
    2714             : 
    2715       26944 :     aggstate->hash_ngroups_current = 0;
    2716             : 
    2717             :     /*
    2718             :      * In AGG_MIXED mode, hash aggregation happens in phase 1 and the output
    2719             :      * happens in phase 0. So, we switch to phase 1 when processing a batch,
    2720             :      * and back to phase 0 after the batch is done.
    2721             :      */
    2722             :     Assert(aggstate->current_phase == 0);
    2723       26944 :     if (aggstate->phase->aggstrategy == AGG_MIXED)
    2724             :     {
    2725       26262 :         aggstate->current_phase = 1;
    2726       26262 :         aggstate->phase = &aggstate->phases[aggstate->current_phase];
    2727             :     }
    2728             : 
    2729       26944 :     select_current_set(aggstate, batch->setno, true);
    2730             : 
    2731       26944 :     perhash = &aggstate->perhash[aggstate->current_set];
    2732             : 
    2733             :     /*
    2734             :      * Spilled tuples are always read back as MinimalTuples, which may be
    2735             :      * different from the outer plan, so recompile the aggregate expressions.
    2736             :      *
    2737             :      * We still need the NULL check, because we are only processing one
    2738             :      * grouping set at a time and the rest will be NULL.
    2739             :      */
    2740       26944 :     hashagg_recompile_expressions(aggstate, true, true);
    2741             : 
    2742       26944 :     INJECTION_POINT("hash-aggregate-process-batch", NULL);
    2743             :     for (;;)
    2744     1216776 :     {
    2745     1243720 :         TupleTableSlot *spillslot = aggstate->hash_spill_rslot;
    2746     1243720 :         TupleTableSlot *hashslot = perhash->hashslot;
    2747     1243720 :         TupleHashTable hashtable = perhash->hashtable;
    2748             :         TupleHashEntry entry;
    2749             :         MinimalTuple tuple;
    2750             :         uint32      hash;
    2751     1243720 :         bool        isnew = false;
    2752     1243720 :         bool       *p_isnew = aggstate->hash_spill_mode ? NULL : &isnew;
    2753             : 
    2754     1243720 :         CHECK_FOR_INTERRUPTS();
    2755             : 
    2756     1243720 :         tuple = hashagg_batch_read(batch, &hash);
    2757     1243720 :         if (tuple == NULL)
    2758       26944 :             break;
    2759             : 
    2760     1216776 :         ExecStoreMinimalTuple(tuple, spillslot, true);
    2761     1216776 :         aggstate->tmpcontext->ecxt_outertuple = spillslot;
    2762             : 
    2763     1216776 :         prepare_hash_slot(perhash,
    2764     1216776 :                           aggstate->tmpcontext->ecxt_outertuple,
    2765             :                           hashslot);
    2766     1216776 :         entry = LookupTupleHashEntryHash(hashtable, hashslot,
    2767             :                                          p_isnew, hash);
    2768             : 
    2769     1216776 :         if (entry != NULL)
    2770             :         {
    2771      767236 :             if (isnew)
    2772      155680 :                 initialize_hash_entry(aggstate, hashtable, entry);
    2773      767236 :             aggstate->hash_pergroup[batch->setno] = TupleHashEntryGetAdditional(hashtable, entry);
    2774      767236 :             advance_aggregates(aggstate);
    2775             :         }
    2776             :         else
    2777             :         {
    2778      449540 :             if (!spill_initialized)
    2779             :             {
    2780             :                 /*
    2781             :                  * Avoid initializing the spill until we actually need it so
    2782             :                  * that we don't assign tapes that will never be used.
    2783             :                  */
    2784       12500 :                 spill_initialized = true;
    2785       12500 :                 hashagg_spill_init(&spill, tapeset, batch->used_bits,
    2786             :                                    batch->input_card, aggstate->hashentrysize);
    2787             :             }
    2788             :             /* no memory for a new group, spill */
    2789      449540 :             hashagg_spill_tuple(aggstate, &spill, spillslot, hash);
    2790             : 
    2791      449540 :             aggstate->hash_pergroup[batch->setno] = NULL;
    2792             :         }
    2793             : 
    2794             :         /*
    2795             :          * Reset per-input-tuple context after each tuple, but note that the
    2796             :          * hash lookups do this too
    2797             :          */
    2798     1216776 :         ResetExprContext(aggstate->tmpcontext);
    2799             :     }
    2800             : 
    2801       26944 :     LogicalTapeClose(batch->input_tape);
    2802             : 
    2803             :     /* change back to phase 0 */
    2804       26944 :     aggstate->current_phase = 0;
    2805       26944 :     aggstate->phase = &aggstate->phases[aggstate->current_phase];
    2806             : 
    2807       26944 :     if (spill_initialized)
    2808             :     {
    2809       12500 :         hashagg_spill_finish(aggstate, &spill, batch->setno);
    2810       12500 :         hash_agg_update_metrics(aggstate, true, spill.npartitions);
    2811             :     }
    2812             :     else
    2813       14444 :         hash_agg_update_metrics(aggstate, true, 0);
    2814             : 
    2815       26944 :     aggstate->hash_spill_mode = false;
    2816             : 
    2817             :     /* prepare to walk the first hash table */
    2818       26944 :     select_current_set(aggstate, batch->setno, true);
    2819       26944 :     ResetTupleHashIterator(aggstate->perhash[batch->setno].hashtable,
    2820             :                            &aggstate->perhash[batch->setno].hashiter);
    2821             : 
    2822       26944 :     pfree(batch);
    2823             : 
    2824       26944 :     return true;
    2825             : }
    2826             : 
    2827             : /*
    2828             :  * ExecAgg for hashed case: retrieving groups from hash table
    2829             :  *
    2830             :  * After exhausting in-memory tuples, also try refilling the hash table using
    2831             :  * previously-spilled tuples. Only returns NULL after all in-memory and
    2832             :  * spilled tuples are exhausted.
    2833             :  */
    2834             : static TupleTableSlot *
    2835      514282 : agg_retrieve_hash_table(AggState *aggstate)
    2836             : {
    2837      514282 :     TupleTableSlot *result = NULL;
    2838             : 
    2839     1037118 :     while (result == NULL)
    2840             :     {
    2841      541226 :         result = agg_retrieve_hash_table_in_memory(aggstate);
    2842      541226 :         if (result == NULL)
    2843             :         {
    2844       45334 :             if (!agg_refill_hash_table(aggstate))
    2845             :             {
    2846       18390 :                 aggstate->agg_done = true;
    2847       18390 :                 break;
    2848             :             }
    2849             :         }
    2850             :     }
    2851             : 
    2852      514282 :     return result;
    2853             : }
    2854             : 
    2855             : /*
    2856             :  * Retrieve the groups from the in-memory hash tables without considering any
    2857             :  * spilled tuples.
    2858             :  */
    2859             : static TupleTableSlot *
    2860      541226 : agg_retrieve_hash_table_in_memory(AggState *aggstate)
    2861             : {
    2862             :     ExprContext *econtext;
    2863             :     AggStatePerAgg peragg;
    2864             :     AggStatePerGroup pergroup;
    2865             :     TupleHashEntry entry;
    2866             :     TupleTableSlot *firstSlot;
    2867             :     TupleTableSlot *result;
    2868             :     AggStatePerHash perhash;
    2869             : 
    2870             :     /*
    2871             :      * get state info from node.
    2872             :      *
    2873             :      * econtext is the per-output-tuple expression context.
    2874             :      */
    2875      541226 :     econtext = aggstate->ss.ps.ps_ExprContext;
    2876      541226 :     peragg = aggstate->peragg;
    2877      541226 :     firstSlot = aggstate->ss.ss_ScanTupleSlot;
    2878             : 
    2879             :     /*
    2880             :      * Note that perhash (and therefore anything accessed through it) can
    2881             :      * change inside the loop, as we change between grouping sets.
    2882             :      */
    2883      541226 :     perhash = &aggstate->perhash[aggstate->current_set];
    2884             : 
    2885             :     /*
    2886             :      * We loop retrieving groups until we find one satisfying
    2887             :      * aggstate->ss.ps.qual
    2888             :      */
    2889             :     for (;;)
    2890      135954 :     {
    2891      677180 :         TupleTableSlot *hashslot = perhash->hashslot;
    2892      677180 :         TupleHashTable hashtable = perhash->hashtable;
    2893             :         int         i;
    2894             : 
    2895      677180 :         CHECK_FOR_INTERRUPTS();
    2896             : 
    2897             :         /*
    2898             :          * Find the next entry in the hash table
    2899             :          */
    2900      677180 :         entry = ScanTupleHashTable(hashtable, &perhash->hashiter);
    2901      677180 :         if (entry == NULL)
    2902             :         {
    2903      145626 :             int         nextset = aggstate->current_set + 1;
    2904             : 
    2905      145626 :             if (nextset < aggstate->num_hashes)
    2906             :             {
    2907             :                 /*
    2908             :                  * Switch to next grouping set, reinitialize, and restart the
    2909             :                  * loop.
    2910             :                  */
    2911      100292 :                 select_current_set(aggstate, nextset, true);
    2912             : 
    2913      100292 :                 perhash = &aggstate->perhash[aggstate->current_set];
    2914             : 
    2915      100292 :                 ResetTupleHashIterator(perhash->hashtable, &perhash->hashiter);
    2916             : 
    2917      100292 :                 continue;
    2918             :             }
    2919             :             else
    2920             :             {
    2921       45334 :                 return NULL;
    2922             :             }
    2923             :         }
    2924             : 
    2925             :         /*
    2926             :          * Clear the per-output-tuple context for each group
    2927             :          *
    2928             :          * We intentionally don't use ReScanExprContext here; if any aggs have
    2929             :          * registered shutdown callbacks, they mustn't be called yet, since we
    2930             :          * might not be done with that agg.
    2931             :          */
    2932      531554 :         ResetExprContext(econtext);
    2933             : 
    2934             :         /*
    2935             :          * Transform representative tuple back into one with the right
    2936             :          * columns.
    2937             :          */
    2938      531554 :         ExecStoreMinimalTuple(TupleHashEntryGetTuple(entry), hashslot, false);
    2939      531554 :         slot_getallattrs(hashslot);
    2940             : 
    2941      531554 :         ExecClearTuple(firstSlot);
    2942      531554 :         memset(firstSlot->tts_isnull, true,
    2943      531554 :                firstSlot->tts_tupleDescriptor->natts * sizeof(bool));
    2944             : 
    2945     1396654 :         for (i = 0; i < perhash->numhashGrpCols; i++)
    2946             :         {
    2947      865100 :             int         varNumber = perhash->hashGrpColIdxInput[i] - 1;
    2948             : 
    2949      865100 :             firstSlot->tts_values[varNumber] = hashslot->tts_values[i];
    2950      865100 :             firstSlot->tts_isnull[varNumber] = hashslot->tts_isnull[i];
    2951             :         }
    2952      531554 :         ExecStoreVirtualTuple(firstSlot);
    2953             : 
    2954      531554 :         pergroup = (AggStatePerGroup) TupleHashEntryGetAdditional(hashtable, entry);
    2955             : 
    2956             :         /*
    2957             :          * Use the representative input tuple for any references to
    2958             :          * non-aggregated input columns in the qual and tlist.
    2959             :          */
    2960      531554 :         econtext->ecxt_outertuple = firstSlot;
    2961             : 
    2962      531554 :         prepare_projection_slot(aggstate,
    2963             :                                 econtext->ecxt_outertuple,
    2964             :                                 aggstate->current_set);
    2965             : 
    2966      531554 :         finalize_aggregates(aggstate, peragg, pergroup);
    2967             : 
    2968      531554 :         result = project_aggregates(aggstate);
    2969      531554 :         if (result)
    2970      495892 :             return result;
    2971             :     }
    2972             : 
    2973             :     /* No more groups */
    2974             :     return NULL;
    2975             : }
    2976             : 
    2977             : /*
    2978             :  * hashagg_spill_init
    2979             :  *
    2980             :  * Called after we determined that spilling is necessary. Chooses the number
    2981             :  * of partitions to create, and initializes them.
    2982             :  */
    2983             : static void
    2984       12622 : hashagg_spill_init(HashAggSpill *spill, LogicalTapeSet *tapeset, int used_bits,
    2985             :                    double input_groups, double hashentrysize)
    2986             : {
    2987             :     int         npartitions;
    2988             :     int         partition_bits;
    2989             : 
    2990       12622 :     npartitions = hash_choose_num_partitions(input_groups, hashentrysize,
    2991             :                                              used_bits, &partition_bits);
    2992             : 
    2993             : #ifdef USE_INJECTION_POINTS
    2994       12622 :     if (IS_INJECTION_POINT_ATTACHED("hash-aggregate-single-partition"))
    2995             :     {
    2996          10 :         npartitions = 1;
    2997          10 :         partition_bits = 0;
    2998          10 :         INJECTION_POINT_CACHED("hash-aggregate-single-partition", NULL);
    2999             :     }
    3000             : #endif
    3001             : 
    3002       12622 :     spill->partitions = palloc0_array(LogicalTape *, npartitions);
    3003       12622 :     spill->ntuples = palloc0_array(int64, npartitions);
    3004       12622 :     spill->hll_card = palloc0_array(hyperLogLogState, npartitions);
    3005             : 
    3006       63080 :     for (int i = 0; i < npartitions; i++)
    3007       50458 :         spill->partitions[i] = LogicalTapeCreate(tapeset);
    3008             : 
    3009       12622 :     spill->shift = 32 - used_bits - partition_bits;
    3010       12622 :     if (spill->shift < 32)
    3011       12612 :         spill->mask = (npartitions - 1) << spill->shift;
    3012             :     else
    3013          10 :         spill->mask = 0;
    3014       12622 :     spill->npartitions = npartitions;
    3015             : 
    3016       63080 :     for (int i = 0; i < npartitions; i++)
    3017       50458 :         initHyperLogLog(&spill->hll_card[i], HASHAGG_HLL_BIT_WIDTH);
    3018       12622 : }
    3019             : 
    3020             : /*
    3021             :  * hashagg_spill_tuple
    3022             :  *
    3023             :  * No room for new groups in the hash table. Save for later in the appropriate
    3024             :  * partition.
    3025             :  */
    3026             : static Size
    3027     1216776 : hashagg_spill_tuple(AggState *aggstate, HashAggSpill *spill,
    3028             :                     TupleTableSlot *inputslot, uint32 hash)
    3029             : {
    3030             :     TupleTableSlot *spillslot;
    3031             :     int         partition;
    3032             :     MinimalTuple tuple;
    3033             :     LogicalTape *tape;
    3034     1216776 :     int         total_written = 0;
    3035             :     bool        shouldFree;
    3036             : 
    3037             :     Assert(spill->partitions != NULL);
    3038             : 
    3039             :     /* spill only attributes that we actually need */
    3040     1216776 :     if (!aggstate->all_cols_needed)
    3041             :     {
    3042        1572 :         spillslot = aggstate->hash_spill_wslot;
    3043        1572 :         slot_getsomeattrs(inputslot, aggstate->max_colno_needed);
    3044        1572 :         ExecClearTuple(spillslot);
    3045        4716 :         for (int i = 0; i < spillslot->tts_tupleDescriptor->natts; i++)
    3046             :         {
    3047        3144 :             if (bms_is_member(i + 1, aggstate->colnos_needed))
    3048             :             {
    3049        1572 :                 spillslot->tts_values[i] = inputslot->tts_values[i];
    3050        1572 :                 spillslot->tts_isnull[i] = inputslot->tts_isnull[i];
    3051             :             }
    3052             :             else
    3053        1572 :                 spillslot->tts_isnull[i] = true;
    3054             :         }
    3055        1572 :         ExecStoreVirtualTuple(spillslot);
    3056             :     }
    3057             :     else
    3058     1215204 :         spillslot = inputslot;
    3059             : 
    3060     1216776 :     tuple = ExecFetchSlotMinimalTuple(spillslot, &shouldFree);
    3061             : 
    3062     1216776 :     if (spill->shift < 32)
    3063     1195776 :         partition = (hash & spill->mask) >> spill->shift;
    3064             :     else
    3065       21000 :         partition = 0;
    3066             : 
    3067     1216776 :     spill->ntuples[partition]++;
    3068             : 
    3069             :     /*
    3070             :      * All hash values destined for a given partition have some bits in
    3071             :      * common, which causes bad HLL cardinality estimates. Hash the hash to
    3072             :      * get a more uniform distribution.
    3073             :      */
    3074     1216776 :     addHyperLogLog(&spill->hll_card[partition], hash_bytes_uint32(hash));
    3075             : 
    3076     1216776 :     tape = spill->partitions[partition];
    3077             : 
    3078     1216776 :     LogicalTapeWrite(tape, &hash, sizeof(uint32));
    3079     1216776 :     total_written += sizeof(uint32);
    3080             : 
    3081     1216776 :     LogicalTapeWrite(tape, tuple, tuple->t_len);
    3082     1216776 :     total_written += tuple->t_len;
    3083             : 
    3084     1216776 :     if (shouldFree)
    3085      767236 :         pfree(tuple);
    3086             : 
    3087     1216776 :     return total_written;
    3088             : }
    3089             : 
    3090             : /*
    3091             :  * hashagg_batch_new
    3092             :  *
    3093             :  * Construct a HashAggBatch item, which represents one iteration of HashAgg to
    3094             :  * be done.
    3095             :  */
    3096             : static HashAggBatch *
    3097       26944 : hashagg_batch_new(LogicalTape *input_tape, int setno,
    3098             :                   int64 input_tuples, double input_card, int used_bits)
    3099             : {
    3100       26944 :     HashAggBatch *batch = palloc0_object(HashAggBatch);
    3101             : 
    3102       26944 :     batch->setno = setno;
    3103       26944 :     batch->used_bits = used_bits;
    3104       26944 :     batch->input_tape = input_tape;
    3105       26944 :     batch->input_tuples = input_tuples;
    3106       26944 :     batch->input_card = input_card;
    3107             : 
    3108       26944 :     return batch;
    3109             : }
    3110             : 
    3111             : /*
    3112             :  * hashagg_batch_read
    3113             :  *      read the next tuple from a batch's tape.  Return NULL if no more.
    3114             :  */
    3115             : static MinimalTuple
    3116     1243720 : hashagg_batch_read(HashAggBatch *batch, uint32 *hashp)
    3117             : {
    3118     1243720 :     LogicalTape *tape = batch->input_tape;
    3119             :     MinimalTuple tuple;
    3120             :     uint32      t_len;
    3121             :     size_t      nread;
    3122             :     uint32      hash;
    3123             : 
    3124     1243720 :     nread = LogicalTapeRead(tape, &hash, sizeof(uint32));
    3125     1243720 :     if (nread == 0)
    3126       26944 :         return NULL;
    3127     1216776 :     if (nread != sizeof(uint32))
    3128           0 :         ereport(ERROR,
    3129             :                 (errcode_for_file_access(),
    3130             :                  errmsg_internal("unexpected EOF for tape %p: requested %zu bytes, read %zu bytes",
    3131             :                                  tape, sizeof(uint32), nread)));
    3132     1216776 :     if (hashp != NULL)
    3133     1216776 :         *hashp = hash;
    3134             : 
    3135     1216776 :     nread = LogicalTapeRead(tape, &t_len, sizeof(t_len));
    3136     1216776 :     if (nread != sizeof(uint32))
    3137           0 :         ereport(ERROR,
    3138             :                 (errcode_for_file_access(),
    3139             :                  errmsg_internal("unexpected EOF for tape %p: requested %zu bytes, read %zu bytes",
    3140             :                                  tape, sizeof(uint32), nread)));
    3141             : 
    3142     1216776 :     tuple = (MinimalTuple) palloc(t_len);
    3143     1216776 :     tuple->t_len = t_len;
    3144             : 
    3145     1216776 :     nread = LogicalTapeRead(tape,
    3146             :                             (char *) tuple + sizeof(uint32),
    3147             :                             t_len - sizeof(uint32));
    3148     1216776 :     if (nread != t_len - sizeof(uint32))
    3149           0 :         ereport(ERROR,
    3150             :                 (errcode_for_file_access(),
    3151             :                  errmsg_internal("unexpected EOF for tape %p: requested %zu bytes, read %zu bytes",
    3152             :                                  tape, t_len - sizeof(uint32), nread)));
    3153             : 
    3154     1216776 :     return tuple;
    3155             : }
    3156             : 
    3157             : /*
    3158             :  * hashagg_finish_initial_spills
    3159             :  *
    3160             :  * After a HashAggBatch has been processed, it may have spilled tuples to
    3161             :  * disk. If so, turn the spilled partitions into new batches that must later
    3162             :  * be executed.
    3163             :  */
    3164             : static void
    3165       17578 : hashagg_finish_initial_spills(AggState *aggstate)
    3166             : {
    3167             :     int         setno;
    3168       17578 :     int         total_npartitions = 0;
    3169             : 
    3170       17578 :     if (aggstate->hash_spills != NULL)
    3171             :     {
    3172         184 :         for (setno = 0; setno < aggstate->num_hashes; setno++)
    3173             :         {
    3174         122 :             HashAggSpill *spill = &aggstate->hash_spills[setno];
    3175             : 
    3176         122 :             total_npartitions += spill->npartitions;
    3177         122 :             hashagg_spill_finish(aggstate, spill, setno);
    3178             :         }
    3179             : 
    3180             :         /*
    3181             :          * We're not processing tuples from outer plan any more; only
    3182             :          * processing batches of spilled tuples. The initial spill structures
    3183             :          * are no longer needed.
    3184             :          */
    3185          62 :         pfree(aggstate->hash_spills);
    3186          62 :         aggstate->hash_spills = NULL;
    3187             :     }
    3188             : 
    3189       17578 :     hash_agg_update_metrics(aggstate, false, total_npartitions);
    3190       17578 :     aggstate->hash_spill_mode = false;
    3191       17578 : }
    3192             : 
    3193             : /*
    3194             :  * hashagg_spill_finish
    3195             :  *
    3196             :  * Transform spill partitions into new batches.
    3197             :  */
    3198             : static void
    3199       12622 : hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno)
    3200             : {
    3201             :     int         i;
    3202       12622 :     int         used_bits = 32 - spill->shift;
    3203             : 
    3204       12622 :     if (spill->npartitions == 0)
    3205           0 :         return;                 /* didn't spill */
    3206             : 
    3207       63080 :     for (i = 0; i < spill->npartitions; i++)
    3208             :     {
    3209       50458 :         LogicalTape *tape = spill->partitions[i];
    3210             :         HashAggBatch *new_batch;
    3211             :         double      cardinality;
    3212             : 
    3213             :         /* if the partition is empty, don't create a new batch of work */
    3214       50458 :         if (spill->ntuples[i] == 0)
    3215       23514 :             continue;
    3216             : 
    3217       26944 :         cardinality = estimateHyperLogLog(&spill->hll_card[i]);
    3218       26944 :         freeHyperLogLog(&spill->hll_card[i]);
    3219             : 
    3220             :         /* rewinding frees the buffer while not in use */
    3221       26944 :         LogicalTapeRewindForRead(tape, HASHAGG_READ_BUFFER_SIZE);
    3222             : 
    3223       26944 :         new_batch = hashagg_batch_new(tape, setno,
    3224       26944 :                                       spill->ntuples[i], cardinality,
    3225             :                                       used_bits);
    3226       26944 :         aggstate->hash_batches = lappend(aggstate->hash_batches, new_batch);
    3227       26944 :         aggstate->hash_batches_used++;
    3228             :     }
    3229             : 
    3230       12622 :     pfree(spill->ntuples);
    3231       12622 :     pfree(spill->hll_card);
    3232       12622 :     pfree(spill->partitions);
    3233             : }
    3234             : 
    3235             : /*
    3236             :  * Free resources related to a spilled HashAgg.
    3237             :  */
    3238             : static void
    3239       66424 : hashagg_reset_spill_state(AggState *aggstate)
    3240             : {
    3241             :     /* free spills from initial pass */
    3242       66424 :     if (aggstate->hash_spills != NULL)
    3243             :     {
    3244             :         int         setno;
    3245             : 
    3246           0 :         for (setno = 0; setno < aggstate->num_hashes; setno++)
    3247             :         {
    3248           0 :             HashAggSpill *spill = &aggstate->hash_spills[setno];
    3249             : 
    3250           0 :             pfree(spill->ntuples);
    3251           0 :             pfree(spill->partitions);
    3252             :         }
    3253           0 :         pfree(aggstate->hash_spills);
    3254           0 :         aggstate->hash_spills = NULL;
    3255             :     }
    3256             : 
    3257             :     /* free batches */
    3258       66424 :     list_free_deep(aggstate->hash_batches);
    3259       66424 :     aggstate->hash_batches = NIL;
    3260             : 
    3261             :     /* close tape set */
    3262       66424 :     if (aggstate->hash_tapeset != NULL)
    3263             :     {
    3264          62 :         LogicalTapeSetClose(aggstate->hash_tapeset);
    3265          62 :         aggstate->hash_tapeset = NULL;
    3266             :     }
    3267       66424 : }
    3268             : 
    3269             : 
    3270             : /* -----------------
    3271             :  * ExecInitAgg
    3272             :  *
    3273             :  *  Creates the run-time information for the agg node produced by the
    3274             :  *  planner and initializes its outer subtree.
    3275             :  *
    3276             :  * -----------------
    3277             :  */
    3278             : AggState *
    3279       53976 : ExecInitAgg(Agg *node, EState *estate, int eflags)
    3280             : {
    3281             :     AggState   *aggstate;
    3282             :     AggStatePerAgg peraggs;
    3283             :     AggStatePerTrans pertransstates;
    3284             :     AggStatePerGroup *pergroups;
    3285             :     Plan       *outerPlan;
    3286             :     ExprContext *econtext;
    3287             :     TupleDesc   scanDesc;
    3288             :     int         max_aggno;
    3289             :     int         max_transno;
    3290             :     int         numaggrefs;
    3291             :     int         numaggs;
    3292             :     int         numtrans;
    3293             :     int         phase;
    3294             :     int         phaseidx;
    3295             :     ListCell   *l;
    3296       53976 :     Bitmapset  *all_grouped_cols = NULL;
    3297       53976 :     int         numGroupingSets = 1;
    3298             :     int         numPhases;
    3299             :     int         numHashes;
    3300       53976 :     int         i = 0;
    3301       53976 :     int         j = 0;
    3302      101152 :     bool        use_hashing = (node->aggstrategy == AGG_HASHED ||
    3303       47176 :                                node->aggstrategy == AGG_MIXED);
    3304             : 
    3305             :     /* check for unsupported flags */
    3306             :     Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
    3307             : 
    3308             :     /*
    3309             :      * create state structure
    3310             :      */
    3311       53976 :     aggstate = makeNode(AggState);
    3312       53976 :     aggstate->ss.ps.plan = (Plan *) node;
    3313       53976 :     aggstate->ss.ps.state = estate;
    3314       53976 :     aggstate->ss.ps.ExecProcNode = ExecAgg;
    3315             : 
    3316       53976 :     aggstate->aggs = NIL;
    3317       53976 :     aggstate->numaggs = 0;
    3318       53976 :     aggstate->numtrans = 0;
    3319       53976 :     aggstate->aggstrategy = node->aggstrategy;
    3320       53976 :     aggstate->aggsplit = node->aggsplit;
    3321       53976 :     aggstate->maxsets = 0;
    3322       53976 :     aggstate->projected_set = -1;
    3323       53976 :     aggstate->current_set = 0;
    3324       53976 :     aggstate->peragg = NULL;
    3325       53976 :     aggstate->pertrans = NULL;
    3326       53976 :     aggstate->curperagg = NULL;
    3327       53976 :     aggstate->curpertrans = NULL;
    3328       53976 :     aggstate->input_done = false;
    3329       53976 :     aggstate->agg_done = false;
    3330       53976 :     aggstate->pergroups = NULL;
    3331       53976 :     aggstate->grp_firstTuple = NULL;
    3332       53976 :     aggstate->sort_in = NULL;
    3333       53976 :     aggstate->sort_out = NULL;
    3334             : 
    3335             :     /*
    3336             :      * phases[0] always exists, but is dummy in sorted/plain mode
    3337             :      */
    3338       53976 :     numPhases = (use_hashing ? 1 : 2);
    3339       53976 :     numHashes = (use_hashing ? 1 : 0);
    3340             : 
    3341             :     /*
    3342             :      * Calculate the maximum number of grouping sets in any phase; this
    3343             :      * determines the size of some allocations.  Also calculate the number of
    3344             :      * phases, since all hashed/mixed nodes contribute to only a single phase.
    3345             :      */
    3346       53976 :     if (node->groupingSets)
    3347             :     {
    3348         920 :         numGroupingSets = list_length(node->groupingSets);
    3349             : 
    3350        1924 :         foreach(l, node->chain)
    3351             :         {
    3352        1004 :             Agg        *agg = lfirst(l);
    3353             : 
    3354        1004 :             numGroupingSets = Max(numGroupingSets,
    3355             :                                   list_length(agg->groupingSets));
    3356             : 
    3357             :             /*
    3358             :              * additional AGG_HASHED aggs become part of phase 0, but all
    3359             :              * others add an extra phase.
    3360             :              */
    3361        1004 :             if (agg->aggstrategy != AGG_HASHED)
    3362         490 :                 ++numPhases;
    3363             :             else
    3364         514 :                 ++numHashes;
    3365             :         }
    3366             :     }
    3367             : 
    3368       53976 :     aggstate->maxsets = numGroupingSets;
    3369       53976 :     aggstate->numphases = numPhases;
    3370             : 
    3371       53976 :     aggstate->aggcontexts = palloc0_array(ExprContext *, numGroupingSets);
    3372             : 
    3373             :     /*
    3374             :      * Create expression contexts.  We need three or more, one for
    3375             :      * per-input-tuple processing, one for per-output-tuple processing, one
    3376             :      * for all the hashtables, and one for each grouping set.  The per-tuple
    3377             :      * memory context of the per-grouping-set ExprContexts (aggcontexts)
    3378             :      * replaces the standalone memory context formerly used to hold transition
    3379             :      * values.  We cheat a little by using ExecAssignExprContext() to build
    3380             :      * all of them.
    3381             :      *
    3382             :      * NOTE: the details of what is stored in aggcontexts and what is stored
    3383             :      * in the regular per-query memory context are driven by a simple
    3384             :      * decision: we want to reset the aggcontext at group boundaries (if not
    3385             :      * hashing) and in ExecReScanAgg to recover no-longer-wanted space.
    3386             :      */
    3387       53976 :     ExecAssignExprContext(estate, &aggstate->ss.ps);
    3388       53976 :     aggstate->tmpcontext = aggstate->ss.ps.ps_ExprContext;
    3389             : 
    3390      108816 :     for (i = 0; i < numGroupingSets; ++i)
    3391             :     {
    3392       54840 :         ExecAssignExprContext(estate, &aggstate->ss.ps);
    3393       54840 :         aggstate->aggcontexts[i] = aggstate->ss.ps.ps_ExprContext;
    3394             :     }
    3395             : 
    3396       53976 :     if (use_hashing)
    3397        7038 :         hash_create_memory(aggstate);
    3398             : 
    3399       53976 :     ExecAssignExprContext(estate, &aggstate->ss.ps);
    3400             : 
    3401             :     /*
    3402             :      * Initialize child nodes.
    3403             :      *
    3404             :      * If we are doing a hashed aggregation then the child plan does not need
    3405             :      * to handle REWIND efficiently; see ExecReScanAgg.
    3406             :      */
    3407       53976 :     if (node->aggstrategy == AGG_HASHED)
    3408        6800 :         eflags &= ~EXEC_FLAG_REWIND;
    3409       53976 :     outerPlan = outerPlan(node);
    3410       53976 :     outerPlanState(aggstate) = ExecInitNode(outerPlan, estate, eflags);
    3411             : 
    3412             :     /*
    3413             :      * initialize source tuple type.
    3414             :      */
    3415       53976 :     aggstate->ss.ps.outerops =
    3416       53976 :         ExecGetResultSlotOps(outerPlanState(&aggstate->ss),
    3417             :                              &aggstate->ss.ps.outeropsfixed);
    3418       53976 :     aggstate->ss.ps.outeropsset = true;
    3419             : 
    3420       53976 :     ExecCreateScanSlotFromOuterPlan(estate, &aggstate->ss,
    3421             :                                     aggstate->ss.ps.outerops);
    3422       53976 :     scanDesc = aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor;
    3423             : 
    3424             :     /*
    3425             :      * If there are more than two phases (including a potential dummy phase
    3426             :      * 0), input will be resorted using tuplesort. Need a slot for that.
    3427             :      */
    3428       53976 :     if (numPhases > 2)
    3429             :     {
    3430         210 :         aggstate->sort_slot = ExecInitExtraTupleSlot(estate, scanDesc,
    3431             :                                                      &TTSOpsMinimalTuple);
    3432             : 
    3433             :         /*
    3434             :          * The output of the tuplesort, and the output from the outer child
    3435             :          * might not use the same type of slot. In most cases the child will
    3436             :          * be a Sort, and thus return a TTSOpsMinimalTuple type slot - but the
    3437             :          * input can also be presorted due an index, in which case it could be
    3438             :          * a different type of slot.
    3439             :          *
    3440             :          * XXX: For efficiency it would be good to instead/additionally
    3441             :          * generate expressions with corresponding settings of outerops* for
    3442             :          * the individual phases - deforming is often a bottleneck for
    3443             :          * aggregations with lots of rows per group. If there's multiple
    3444             :          * sorts, we know that all but the first use TTSOpsMinimalTuple (via
    3445             :          * the nodeAgg.c internal tuplesort).
    3446             :          */
    3447         210 :         if (aggstate->ss.ps.outeropsfixed &&
    3448         210 :             aggstate->ss.ps.outerops != &TTSOpsMinimalTuple)
    3449          12 :             aggstate->ss.ps.outeropsfixed = false;
    3450             :     }
    3451             : 
    3452             :     /*
    3453             :      * Initialize result type, slot and projection.
    3454             :      */
    3455       53976 :     ExecInitResultTupleSlotTL(&aggstate->ss.ps, &TTSOpsVirtual);
    3456       53976 :     ExecAssignProjectionInfo(&aggstate->ss.ps, NULL);
    3457             : 
    3458             :     /*
    3459             :      * initialize child expressions
    3460             :      *
    3461             :      * We expect the parser to have checked that no aggs contain other agg
    3462             :      * calls in their arguments (and just to be sure, we verify it again while
    3463             :      * initializing the plan node).  This would make no sense under SQL
    3464             :      * semantics, and it's forbidden by the spec.  Because it is true, we
    3465             :      * don't need to worry about evaluating the aggs in any particular order.
    3466             :      *
    3467             :      * Note: execExpr.c finds Aggrefs for us, and adds them to aggstate->aggs.
    3468             :      * Aggrefs in the qual are found here; Aggrefs in the targetlist are found
    3469             :      * during ExecAssignProjectionInfo, above.
    3470             :      */
    3471       53976 :     aggstate->ss.ps.qual =
    3472       53976 :         ExecInitQual(node->plan.qual, (PlanState *) aggstate);
    3473             : 
    3474             :     /*
    3475             :      * We should now have found all Aggrefs in the targetlist and quals.
    3476             :      */
    3477       53976 :     numaggrefs = list_length(aggstate->aggs);
    3478       53976 :     max_aggno = -1;
    3479       53976 :     max_transno = -1;
    3480      113978 :     foreach(l, aggstate->aggs)
    3481             :     {
    3482       60002 :         Aggref     *aggref = (Aggref *) lfirst(l);
    3483             : 
    3484       60002 :         max_aggno = Max(max_aggno, aggref->aggno);
    3485       60002 :         max_transno = Max(max_transno, aggref->aggtransno);
    3486             :     }
    3487       53976 :     aggstate->numaggs = numaggs = max_aggno + 1;
    3488       53976 :     aggstate->numtrans = numtrans = max_transno + 1;
    3489             : 
    3490             :     /*
    3491             :      * For each phase, prepare grouping set data and fmgr lookup data for
    3492             :      * compare functions.  Accumulate all_grouped_cols in passing.
    3493             :      */
    3494       53976 :     aggstate->phases = palloc0_array(AggStatePerPhaseData, numPhases);
    3495             : 
    3496       53976 :     aggstate->num_hashes = numHashes;
    3497       53976 :     if (numHashes)
    3498             :     {
    3499        7038 :         aggstate->perhash = palloc0_array(AggStatePerHashData, numHashes);
    3500        7038 :         aggstate->phases[0].numsets = 0;
    3501        7038 :         aggstate->phases[0].gset_lengths = palloc_array(int, numHashes);
    3502        7038 :         aggstate->phases[0].grouped_cols = palloc_array(Bitmapset *, numHashes);
    3503             :     }
    3504             : 
    3505       53976 :     phase = 0;
    3506      108956 :     for (phaseidx = 0; phaseidx <= list_length(node->chain); ++phaseidx)
    3507             :     {
    3508             :         Agg        *aggnode;
    3509             :         Sort       *sortnode;
    3510             : 
    3511       54980 :         if (phaseidx > 0)
    3512             :         {
    3513        1004 :             aggnode = list_nth_node(Agg, node->chain, phaseidx - 1);
    3514        1004 :             sortnode = castNode(Sort, outerPlan(aggnode));
    3515             :         }
    3516             :         else
    3517             :         {
    3518       53976 :             aggnode = node;
    3519       53976 :             sortnode = NULL;
    3520             :         }
    3521             : 
    3522             :         Assert(phase <= 1 || sortnode);
    3523             : 
    3524       54980 :         if (aggnode->aggstrategy == AGG_HASHED
    3525       47666 :             || aggnode->aggstrategy == AGG_MIXED)
    3526        7552 :         {
    3527        7552 :             AggStatePerPhase phasedata = &aggstate->phases[0];
    3528             :             AggStatePerHash perhash;
    3529        7552 :             Bitmapset  *cols = NULL;
    3530             : 
    3531             :             Assert(phase == 0);
    3532        7552 :             i = phasedata->numsets++;
    3533        7552 :             perhash = &aggstate->perhash[i];
    3534             : 
    3535             :             /* phase 0 always points to the "real" Agg in the hash case */
    3536        7552 :             phasedata->aggnode = node;
    3537        7552 :             phasedata->aggstrategy = node->aggstrategy;
    3538             : 
    3539             :             /* but the actual Agg node representing this hash is saved here */
    3540        7552 :             perhash->aggnode = aggnode;
    3541             : 
    3542        7552 :             phasedata->gset_lengths[i] = perhash->numCols = aggnode->numCols;
    3543             : 
    3544       19060 :             for (j = 0; j < aggnode->numCols; ++j)
    3545       11508 :                 cols = bms_add_member(cols, aggnode->grpColIdx[j]);
    3546             : 
    3547        7552 :             phasedata->grouped_cols[i] = cols;
    3548             : 
    3549        7552 :             all_grouped_cols = bms_add_members(all_grouped_cols, cols);
    3550        7552 :             continue;
    3551             :         }
    3552             :         else
    3553             :         {
    3554       47428 :             AggStatePerPhase phasedata = &aggstate->phases[++phase];
    3555             :             int         num_sets;
    3556             : 
    3557       47428 :             phasedata->numsets = num_sets = list_length(aggnode->groupingSets);
    3558             : 
    3559       47428 :             if (num_sets)
    3560             :             {
    3561        1006 :                 phasedata->gset_lengths = palloc(num_sets * sizeof(int));
    3562        1006 :                 phasedata->grouped_cols = palloc(num_sets * sizeof(Bitmapset *));
    3563             : 
    3564        1006 :                 i = 0;
    3565        2948 :                 foreach(l, aggnode->groupingSets)
    3566             :                 {
    3567        1942 :                     int         current_length = list_length(lfirst(l));
    3568        1942 :                     Bitmapset  *cols = NULL;
    3569             : 
    3570             :                     /* planner forces this to be correct */
    3571        3810 :                     for (j = 0; j < current_length; ++j)
    3572        1868 :                         cols = bms_add_member(cols, aggnode->grpColIdx[j]);
    3573             : 
    3574        1942 :                     phasedata->grouped_cols[i] = cols;
    3575        1942 :                     phasedata->gset_lengths[i] = current_length;
    3576             : 
    3577        1942 :                     ++i;
    3578             :                 }
    3579             : 
    3580        1006 :                 all_grouped_cols = bms_add_members(all_grouped_cols,
    3581        1006 :                                                    phasedata->grouped_cols[0]);
    3582             :             }
    3583             :             else
    3584             :             {
    3585             :                 Assert(phaseidx == 0);
    3586             : 
    3587       46422 :                 phasedata->gset_lengths = NULL;
    3588       46422 :                 phasedata->grouped_cols = NULL;
    3589             :             }
    3590             : 
    3591             :             /*
    3592             :              * If we are grouping, precompute fmgr lookup data for inner loop.
    3593             :              */
    3594       47428 :             if (aggnode->aggstrategy == AGG_SORTED)
    3595             :             {
    3596             :                 /*
    3597             :                  * Build a separate function for each subset of columns that
    3598             :                  * need to be compared.
    3599             :                  */
    3600        2654 :                 phasedata->eqfunctions = palloc0_array(ExprState *, aggnode->numCols);
    3601             : 
    3602             :                 /* for each grouping set */
    3603        4294 :                 for (int k = 0; k < phasedata->numsets; k++)
    3604             :                 {
    3605        1640 :                     int         length = phasedata->gset_lengths[k];
    3606             : 
    3607             :                     /* nothing to do for empty grouping set */
    3608        1640 :                     if (length == 0)
    3609         338 :                         continue;
    3610             : 
    3611             :                     /* if we already had one of this length, it'll do */
    3612        1302 :                     if (phasedata->eqfunctions[length - 1] != NULL)
    3613         138 :                         continue;
    3614             : 
    3615        1164 :                     phasedata->eqfunctions[length - 1] =
    3616        1164 :                         execTuplesMatchPrepare(scanDesc,
    3617             :                                                length,
    3618        1164 :                                                aggnode->grpColIdx,
    3619        1164 :                                                aggnode->grpOperators,
    3620        1164 :                                                aggnode->grpCollations,
    3621             :                                                (PlanState *) aggstate);
    3622             :                 }
    3623             : 
    3624             :                 /* and for all grouped columns, unless already computed */
    3625        2654 :                 if (aggnode->numCols > 0 &&
    3626        2560 :                     phasedata->eqfunctions[aggnode->numCols - 1] == NULL)
    3627             :                 {
    3628        1784 :                     phasedata->eqfunctions[aggnode->numCols - 1] =
    3629        1784 :                         execTuplesMatchPrepare(scanDesc,
    3630             :                                                aggnode->numCols,
    3631        1784 :                                                aggnode->grpColIdx,
    3632        1784 :                                                aggnode->grpOperators,
    3633        1784 :                                                aggnode->grpCollations,
    3634             :                                                (PlanState *) aggstate);
    3635             :                 }
    3636             :             }
    3637             : 
    3638       47428 :             phasedata->aggnode = aggnode;
    3639       47428 :             phasedata->aggstrategy = aggnode->aggstrategy;
    3640       47428 :             phasedata->sortnode = sortnode;
    3641             :         }
    3642             :     }
    3643             : 
    3644             :     /*
    3645             :      * Convert all_grouped_cols to a descending-order list.
    3646             :      */
    3647       53976 :     i = -1;
    3648       66104 :     while ((i = bms_next_member(all_grouped_cols, i)) >= 0)
    3649       12128 :         aggstate->all_grouped_cols = lcons_int(i, aggstate->all_grouped_cols);
    3650             : 
    3651             :     /*
    3652             :      * Set up aggregate-result storage in the output expr context, and also
    3653             :      * allocate my private per-agg working storage
    3654             :      */
    3655       53976 :     econtext = aggstate->ss.ps.ps_ExprContext;
    3656       53976 :     econtext->ecxt_aggvalues = palloc0_array(Datum, numaggs);
    3657       53976 :     econtext->ecxt_aggnulls = palloc0_array(bool, numaggs);
    3658             : 
    3659       53976 :     peraggs = palloc0_array(AggStatePerAggData, numaggs);
    3660       53976 :     pertransstates = palloc0_array(AggStatePerTransData, numtrans);
    3661             : 
    3662       53976 :     aggstate->peragg = peraggs;
    3663       53976 :     aggstate->pertrans = pertransstates;
    3664             : 
    3665             : 
    3666       53976 :     aggstate->all_pergroups = palloc0_array(AggStatePerGroup, numGroupingSets + numHashes);
    3667       53976 :     pergroups = aggstate->all_pergroups;
    3668             : 
    3669       53976 :     if (node->aggstrategy != AGG_HASHED)
    3670             :     {
    3671       95216 :         for (i = 0; i < numGroupingSets; i++)
    3672             :         {
    3673       48040 :             pergroups[i] = palloc0_array(AggStatePerGroupData, numaggs);
    3674             :         }
    3675             : 
    3676       47176 :         aggstate->pergroups = pergroups;
    3677       47176 :         pergroups += numGroupingSets;
    3678             :     }
    3679             : 
    3680             :     /*
    3681             :      * Hashing can only appear in the initial phase.
    3682             :      */
    3683       53976 :     if (use_hashing)
    3684             :     {
    3685        7038 :         Plan       *outerplan = outerPlan(node);
    3686        7038 :         double      totalGroups = 0;
    3687             : 
    3688        7038 :         aggstate->hash_spill_rslot = ExecInitExtraTupleSlot(estate, scanDesc,
    3689             :                                                             &TTSOpsMinimalTuple);
    3690        7038 :         aggstate->hash_spill_wslot = ExecInitExtraTupleSlot(estate, scanDesc,
    3691             :                                                             &TTSOpsVirtual);
    3692             : 
    3693             :         /* this is an array of pointers, not structures */
    3694        7038 :         aggstate->hash_pergroup = pergroups;
    3695             : 
    3696       14076 :         aggstate->hashentrysize = hash_agg_entry_size(aggstate->numtrans,
    3697        7038 :                                                       outerplan->plan_width,
    3698             :                                                       node->transitionSpace);
    3699             : 
    3700             :         /*
    3701             :          * Consider all of the grouping sets together when setting the limits
    3702             :          * and estimating the number of partitions. This can be inaccurate
    3703             :          * when there is more than one grouping set, but should still be
    3704             :          * reasonable.
    3705             :          */
    3706       14590 :         for (int k = 0; k < aggstate->num_hashes; k++)
    3707        7552 :             totalGroups += aggstate->perhash[k].aggnode->numGroups;
    3708             : 
    3709        7038 :         hash_agg_set_limits(aggstate->hashentrysize, totalGroups, 0,
    3710             :                             &aggstate->hash_mem_limit,
    3711             :                             &aggstate->hash_ngroups_limit,
    3712             :                             &aggstate->hash_planned_partitions);
    3713        7038 :         find_hash_columns(aggstate);
    3714             : 
    3715             :         /* Skip massive memory allocation if we are just doing EXPLAIN */
    3716        7038 :         if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
    3717        5180 :             build_hash_tables(aggstate);
    3718             : 
    3719        7038 :         aggstate->table_filled = false;
    3720             : 
    3721             :         /* Initialize this to 1, meaning nothing spilled, yet */
    3722        7038 :         aggstate->hash_batches_used = 1;
    3723             :     }
    3724             : 
    3725             :     /*
    3726             :      * Initialize current phase-dependent values to initial phase. The initial
    3727             :      * phase is 1 (first sort pass) for all strategies that use sorting (if
    3728             :      * hashing is being done too, then phase 0 is processed last); but if only
    3729             :      * hashing is being done, then phase 0 is all there is.
    3730             :      */
    3731       53976 :     if (node->aggstrategy == AGG_HASHED)
    3732             :     {
    3733        6800 :         aggstate->current_phase = 0;
    3734        6800 :         initialize_phase(aggstate, 0);
    3735        6800 :         select_current_set(aggstate, 0, true);
    3736             :     }
    3737             :     else
    3738             :     {
    3739       47176 :         aggstate->current_phase = 1;
    3740       47176 :         initialize_phase(aggstate, 1);
    3741       47176 :         select_current_set(aggstate, 0, false);
    3742             :     }
    3743             : 
    3744             :     /*
    3745             :      * Perform lookups of aggregate function info, and initialize the
    3746             :      * unchanging fields of the per-agg and per-trans data.
    3747             :      */
    3748      113972 :     foreach(l, aggstate->aggs)
    3749             :     {
    3750       60002 :         Aggref     *aggref = lfirst(l);
    3751             :         AggStatePerAgg peragg;
    3752             :         AggStatePerTrans pertrans;
    3753             :         Oid         aggTransFnInputTypes[FUNC_MAX_ARGS];
    3754             :         int         numAggTransFnArgs;
    3755             :         int         numDirectArgs;
    3756             :         HeapTuple   aggTuple;
    3757             :         Form_pg_aggregate aggform;
    3758             :         AclResult   aclresult;
    3759             :         Oid         finalfn_oid;
    3760             :         Oid         serialfn_oid,
    3761             :                     deserialfn_oid;
    3762             :         Oid         aggOwner;
    3763             :         Expr       *finalfnexpr;
    3764             :         Oid         aggtranstype;
    3765             : 
    3766             :         /* Planner should have assigned aggregate to correct level */
    3767             :         Assert(aggref->agglevelsup == 0);
    3768             :         /* ... and the split mode should match */
    3769             :         Assert(aggref->aggsplit == aggstate->aggsplit);
    3770             : 
    3771       60002 :         peragg = &peraggs[aggref->aggno];
    3772             : 
    3773             :         /* Check if we initialized the state for this aggregate already. */
    3774       60002 :         if (peragg->aggref != NULL)
    3775         488 :             continue;
    3776             : 
    3777       59514 :         peragg->aggref = aggref;
    3778       59514 :         peragg->transno = aggref->aggtransno;
    3779             : 
    3780             :         /* Fetch the pg_aggregate row */
    3781       59514 :         aggTuple = SearchSysCache1(AGGFNOID,
    3782             :                                    ObjectIdGetDatum(aggref->aggfnoid));
    3783       59514 :         if (!HeapTupleIsValid(aggTuple))
    3784           0 :             elog(ERROR, "cache lookup failed for aggregate %u",
    3785             :                  aggref->aggfnoid);
    3786       59514 :         aggform = (Form_pg_aggregate) GETSTRUCT(aggTuple);
    3787             : 
    3788             :         /* Check permission to call aggregate function */
    3789       59514 :         aclresult = object_aclcheck(ProcedureRelationId, aggref->aggfnoid, GetUserId(),
    3790             :                                     ACL_EXECUTE);
    3791       59514 :         if (aclresult != ACLCHECK_OK)
    3792           6 :             aclcheck_error(aclresult, OBJECT_AGGREGATE,
    3793           6 :                            get_func_name(aggref->aggfnoid));
    3794       59508 :         InvokeFunctionExecuteHook(aggref->aggfnoid);
    3795             : 
    3796             :         /* planner recorded transition state type in the Aggref itself */
    3797       59508 :         aggtranstype = aggref->aggtranstype;
    3798             :         Assert(OidIsValid(aggtranstype));
    3799             : 
    3800             :         /* Final function only required if we're finalizing the aggregates */
    3801       59508 :         if (DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit))
    3802        5544 :             peragg->finalfn_oid = finalfn_oid = InvalidOid;
    3803             :         else
    3804       53964 :             peragg->finalfn_oid = finalfn_oid = aggform->aggfinalfn;
    3805             : 
    3806       59508 :         serialfn_oid = InvalidOid;
    3807       59508 :         deserialfn_oid = InvalidOid;
    3808             : 
    3809             :         /*
    3810             :          * Check if serialization/deserialization is required.  We only do it
    3811             :          * for aggregates that have transtype INTERNAL.
    3812             :          */
    3813       59508 :         if (aggtranstype == INTERNALOID)
    3814             :         {
    3815             :             /*
    3816             :              * The planner should only have generated a serialize agg node if
    3817             :              * every aggregate with an INTERNAL state has a serialization
    3818             :              * function.  Verify that.
    3819             :              */
    3820       22644 :             if (DO_AGGSPLIT_SERIALIZE(aggstate->aggsplit))
    3821             :             {
    3822             :                 /* serialization only valid when not running finalfn */
    3823             :                 Assert(DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
    3824             : 
    3825         336 :                 if (!OidIsValid(aggform->aggserialfn))
    3826           0 :                     elog(ERROR, "serialfunc not provided for serialization aggregation");
    3827         336 :                 serialfn_oid = aggform->aggserialfn;
    3828             :             }
    3829             : 
    3830             :             /* Likewise for deserialization functions */
    3831       22644 :             if (DO_AGGSPLIT_DESERIALIZE(aggstate->aggsplit))
    3832             :             {
    3833             :                 /* deserialization only valid when combining states */
    3834             :                 Assert(DO_AGGSPLIT_COMBINE(aggstate->aggsplit));
    3835             : 
    3836         120 :                 if (!OidIsValid(aggform->aggdeserialfn))
    3837           0 :                     elog(ERROR, "deserialfunc not provided for deserialization aggregation");
    3838         120 :                 deserialfn_oid = aggform->aggdeserialfn;
    3839             :             }
    3840             :         }
    3841             : 
    3842             :         /* Check that aggregate owner has permission to call component fns */
    3843             :         {
    3844             :             HeapTuple   procTuple;
    3845             : 
    3846       59508 :             procTuple = SearchSysCache1(PROCOID,
    3847             :                                         ObjectIdGetDatum(aggref->aggfnoid));
    3848       59508 :             if (!HeapTupleIsValid(procTuple))
    3849           0 :                 elog(ERROR, "cache lookup failed for function %u",
    3850             :                      aggref->aggfnoid);
    3851       59508 :             aggOwner = ((Form_pg_proc) GETSTRUCT(procTuple))->proowner;
    3852       59508 :             ReleaseSysCache(procTuple);
    3853             : 
    3854       59508 :             if (OidIsValid(finalfn_oid))
    3855             :             {
    3856       24266 :                 aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner,
    3857             :                                             ACL_EXECUTE);
    3858       24266 :                 if (aclresult != ACLCHECK_OK)
    3859           0 :                     aclcheck_error(aclresult, OBJECT_FUNCTION,
    3860           0 :                                    get_func_name(finalfn_oid));
    3861       24266 :                 InvokeFunctionExecuteHook(finalfn_oid);
    3862             :             }
    3863       59508 :             if (OidIsValid(serialfn_oid))
    3864             :             {
    3865         336 :                 aclresult = object_aclcheck(ProcedureRelationId, serialfn_oid, aggOwner,
    3866             :                                             ACL_EXECUTE);
    3867         336 :                 if (aclresult != ACLCHECK_OK)
    3868           0 :                     aclcheck_error(aclresult, OBJECT_FUNCTION,
    3869           0 :                                    get_func_name(serialfn_oid));
    3870         336 :                 InvokeFunctionExecuteHook(serialfn_oid);
    3871             :             }
    3872       59508 :             if (OidIsValid(deserialfn_oid))
    3873             :             {
    3874         120 :                 aclresult = object_aclcheck(ProcedureRelationId, deserialfn_oid, aggOwner,
    3875             :                                             ACL_EXECUTE);
    3876         120 :                 if (aclresult != ACLCHECK_OK)
    3877           0 :                     aclcheck_error(aclresult, OBJECT_FUNCTION,
    3878           0 :                                    get_func_name(deserialfn_oid));
    3879         120 :                 InvokeFunctionExecuteHook(deserialfn_oid);
    3880             :             }
    3881             :         }
    3882             : 
    3883             :         /*
    3884             :          * Get actual datatypes of the (nominal) aggregate inputs.  These
    3885             :          * could be different from the agg's declared input types, when the
    3886             :          * agg accepts ANY or a polymorphic type.
    3887             :          */
    3888       59508 :         numAggTransFnArgs = get_aggregate_argtypes(aggref,
    3889             :                                                    aggTransFnInputTypes);
    3890             : 
    3891             :         /* Count the "direct" arguments, if any */
    3892       59508 :         numDirectArgs = list_length(aggref->aggdirectargs);
    3893             : 
    3894             :         /* Detect how many arguments to pass to the finalfn */
    3895       59508 :         if (aggform->aggfinalextra)
    3896       16254 :             peragg->numFinalArgs = numAggTransFnArgs + 1;
    3897             :         else
    3898       43254 :             peragg->numFinalArgs = numDirectArgs + 1;
    3899             : 
    3900             :         /* Initialize any direct-argument expressions */
    3901       59508 :         peragg->aggdirectargs = ExecInitExprList(aggref->aggdirectargs,
    3902             :                                                  (PlanState *) aggstate);
    3903             : 
    3904             :         /*
    3905             :          * build expression trees using actual argument & result types for the
    3906             :          * finalfn, if it exists and is required.
    3907             :          */
    3908       59508 :         if (OidIsValid(finalfn_oid))
    3909             :         {
    3910       24266 :             build_aggregate_finalfn_expr(aggTransFnInputTypes,
    3911             :                                          peragg->numFinalArgs,
    3912             :                                          aggtranstype,
    3913             :                                          aggref->aggtype,
    3914             :                                          aggref->inputcollid,
    3915             :                                          finalfn_oid,
    3916             :                                          &finalfnexpr);
    3917       24266 :             fmgr_info(finalfn_oid, &peragg->finalfn);
    3918       24266 :             fmgr_info_set_expr((Node *) finalfnexpr, &peragg->finalfn);
    3919             :         }
    3920             : 
    3921             :         /* get info about the output value's datatype */
    3922       59508 :         get_typlenbyval(aggref->aggtype,
    3923             :                         &peragg->resulttypeLen,
    3924             :                         &peragg->resulttypeByVal);
    3925             : 
    3926             :         /*
    3927             :          * Build working state for invoking the transition function, if we
    3928             :          * haven't done it already.
    3929             :          */
    3930       59508 :         pertrans = &pertransstates[aggref->aggtransno];
    3931       59508 :         if (pertrans->aggref == NULL)
    3932             :         {
    3933             :             Datum       textInitVal;
    3934             :             Datum       initValue;
    3935             :             bool        initValueIsNull;
    3936             :             Oid         transfn_oid;
    3937             : 
    3938             :             /*
    3939             :              * If this aggregation is performing state combines, then instead
    3940             :              * of using the transition function, we'll use the combine
    3941             :              * function.
    3942             :              */
    3943       59226 :             if (DO_AGGSPLIT_COMBINE(aggstate->aggsplit))
    3944             :             {
    3945        2240 :                 transfn_oid = aggform->aggcombinefn;
    3946             : 
    3947             :                 /* If not set then the planner messed up */
    3948        2240 :                 if (!OidIsValid(transfn_oid))
    3949           0 :                     elog(ERROR, "combinefn not set for aggregate function");
    3950             :             }
    3951             :             else
    3952       56986 :                 transfn_oid = aggform->aggtransfn;
    3953             : 
    3954       59226 :             aclresult = object_aclcheck(ProcedureRelationId, transfn_oid, aggOwner, ACL_EXECUTE);
    3955       59226 :             if (aclresult != ACLCHECK_OK)
    3956           0 :                 aclcheck_error(aclresult, OBJECT_FUNCTION,
    3957           0 :                                get_func_name(transfn_oid));
    3958       59226 :             InvokeFunctionExecuteHook(transfn_oid);
    3959             : 
    3960             :             /*
    3961             :              * initval is potentially null, so don't try to access it as a
    3962             :              * struct field. Must do it the hard way with SysCacheGetAttr.
    3963             :              */
    3964       59226 :             textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
    3965             :                                           Anum_pg_aggregate_agginitval,
    3966             :                                           &initValueIsNull);
    3967       59226 :             if (initValueIsNull)
    3968       31928 :                 initValue = (Datum) 0;
    3969             :             else
    3970       27298 :                 initValue = GetAggInitVal(textInitVal, aggtranstype);
    3971             : 
    3972       59226 :             if (DO_AGGSPLIT_COMBINE(aggstate->aggsplit))
    3973             :             {
    3974        2240 :                 Oid         combineFnInputTypes[] = {aggtranstype,
    3975             :                 aggtranstype};
    3976             : 
    3977             :                 /*
    3978             :                  * When combining there's only one input, the to-be-combined
    3979             :                  * transition value.  The transition value is not counted
    3980             :                  * here.
    3981             :                  */
    3982        2240 :                 pertrans->numTransInputs = 1;
    3983             : 
    3984             :                 /* aggcombinefn always has two arguments of aggtranstype */
    3985        2240 :                 build_pertrans_for_aggref(pertrans, aggstate, estate,
    3986             :                                           aggref, transfn_oid, aggtranstype,
    3987             :                                           serialfn_oid, deserialfn_oid,
    3988             :                                           initValue, initValueIsNull,
    3989             :                                           combineFnInputTypes, 2);
    3990             : 
    3991             :                 /*
    3992             :                  * Ensure that a combine function to combine INTERNAL states
    3993             :                  * is not strict. This should have been checked during CREATE
    3994             :                  * AGGREGATE, but the strict property could have been changed
    3995             :                  * since then.
    3996             :                  */
    3997        2240 :                 if (pertrans->transfn.fn_strict && aggtranstype == INTERNALOID)
    3998           0 :                     ereport(ERROR,
    3999             :                             (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
    4000             :                              errmsg("combine function with transition type %s must not be declared STRICT",
    4001             :                                     format_type_be(aggtranstype))));
    4002             :             }
    4003             :             else
    4004             :             {
    4005             :                 /* Detect how many arguments to pass to the transfn */
    4006       56986 :                 if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
    4007         252 :                     pertrans->numTransInputs = list_length(aggref->args);
    4008             :                 else
    4009       56734 :                     pertrans->numTransInputs = numAggTransFnArgs;
    4010             : 
    4011       56986 :                 build_pertrans_for_aggref(pertrans, aggstate, estate,
    4012             :                                           aggref, transfn_oid, aggtranstype,
    4013             :                                           serialfn_oid, deserialfn_oid,
    4014             :                                           initValue, initValueIsNull,
    4015             :                                           aggTransFnInputTypes,
    4016             :                                           numAggTransFnArgs);
    4017             : 
    4018             :                 /*
    4019             :                  * If the transfn is strict and the initval is NULL, make sure
    4020             :                  * input type and transtype are the same (or at least
    4021             :                  * binary-compatible), so that it's OK to use the first
    4022             :                  * aggregated input value as the initial transValue.  This
    4023             :                  * should have been checked at agg definition time, but we
    4024             :                  * must check again in case the transfn's strictness property
    4025             :                  * has been changed.
    4026             :                  */
    4027       56986 :                 if (pertrans->transfn.fn_strict && pertrans->initValueIsNull)
    4028             :                 {
    4029        5132 :                     if (numAggTransFnArgs <= numDirectArgs ||
    4030        5132 :                         !IsBinaryCoercible(aggTransFnInputTypes[numDirectArgs],
    4031             :                                            aggtranstype))
    4032           0 :                         ereport(ERROR,
    4033             :                                 (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
    4034             :                                  errmsg("aggregate %u needs to have compatible input type and transition type",
    4035             :                                         aggref->aggfnoid)));
    4036             :                 }
    4037             :             }
    4038             :         }
    4039             :         else
    4040         282 :             pertrans->aggshared = true;
    4041       59508 :         ReleaseSysCache(aggTuple);
    4042             :     }
    4043             : 
    4044             :     /*
    4045             :      * Last, check whether any more aggregates got added onto the node while
    4046             :      * we processed the expressions for the aggregate arguments (including not
    4047             :      * only the regular arguments and FILTER expressions handled immediately
    4048             :      * above, but any direct arguments we might've handled earlier).  If so,
    4049             :      * we have nested aggregate functions, which is semantically nonsensical,
    4050             :      * so complain.  (This should have been caught by the parser, so we don't
    4051             :      * need to work hard on a helpful error message; but we defend against it
    4052             :      * here anyway, just to be sure.)
    4053             :      */
    4054       53970 :     if (numaggrefs != list_length(aggstate->aggs))
    4055           0 :         ereport(ERROR,
    4056             :                 (errcode(ERRCODE_GROUPING_ERROR),
    4057             :                  errmsg("aggregate function calls cannot be nested")));
    4058             : 
    4059             :     /*
    4060             :      * Build expressions doing all the transition work at once. We build a
    4061             :      * different one for each phase, as the number of transition function
    4062             :      * invocation can differ between phases. Note this'll work both for
    4063             :      * transition and combination functions (although there'll only be one
    4064             :      * phase in the latter case).
    4065             :      */
    4066      155362 :     for (phaseidx = 0; phaseidx < aggstate->numphases; phaseidx++)
    4067             :     {
    4068      101392 :         AggStatePerPhase phase = &aggstate->phases[phaseidx];
    4069      101392 :         bool        dohash = false;
    4070      101392 :         bool        dosort = false;
    4071             : 
    4072             :         /* phase 0 doesn't necessarily exist */
    4073      101392 :         if (!phase->aggnode)
    4074       46932 :             continue;
    4075             : 
    4076       54460 :         if (aggstate->aggstrategy == AGG_MIXED && phaseidx == 1)
    4077             :         {
    4078             :             /*
    4079             :              * Phase one, and only phase one, in a mixed agg performs both
    4080             :              * sorting and aggregation.
    4081             :              */
    4082         238 :             dohash = true;
    4083         238 :             dosort = true;
    4084             :         }
    4085       54222 :         else if (aggstate->aggstrategy == AGG_MIXED && phaseidx == 0)
    4086             :         {
    4087             :             /*
    4088             :              * No need to compute a transition function for an AGG_MIXED phase
    4089             :              * 0 - the contents of the hashtables will have been computed
    4090             :              * during phase 1.
    4091             :              */
    4092         238 :             continue;
    4093             :         }
    4094       53984 :         else if (phase->aggstrategy == AGG_PLAIN ||
    4095        9392 :                  phase->aggstrategy == AGG_SORTED)
    4096             :         {
    4097       47184 :             dohash = false;
    4098       47184 :             dosort = true;
    4099             :         }
    4100        6800 :         else if (phase->aggstrategy == AGG_HASHED)
    4101             :         {
    4102        6800 :             dohash = true;
    4103        6800 :             dosort = false;
    4104             :         }
    4105             :         else
    4106             :             Assert(false);
    4107             : 
    4108       54222 :         phase->evaltrans = ExecBuildAggTrans(aggstate, phase, dosort, dohash,
    4109             :                                              false);
    4110             : 
    4111             :         /* cache compiled expression for outer slot without NULL check */
    4112       54222 :         phase->evaltrans_cache[0][0] = phase->evaltrans;
    4113             :     }
    4114             : 
    4115       53970 :     return aggstate;
    4116             : }
    4117             : 
    4118             : /*
    4119             :  * Build the state needed to calculate a state value for an aggregate.
    4120             :  *
    4121             :  * This initializes all the fields in 'pertrans'. 'aggref' is the aggregate
    4122             :  * to initialize the state for. 'transfn_oid', 'aggtranstype', and the rest
    4123             :  * of the arguments could be calculated from 'aggref', but the caller has
    4124             :  * calculated them already, so might as well pass them.
    4125             :  *
    4126             :  * 'transfn_oid' may be either the Oid of the aggtransfn or the aggcombinefn.
    4127             :  */
    4128             : static void
    4129       59226 : build_pertrans_for_aggref(AggStatePerTrans pertrans,
    4130             :                           AggState *aggstate, EState *estate,
    4131             :                           Aggref *aggref,
    4132             :                           Oid transfn_oid, Oid aggtranstype,
    4133             :                           Oid aggserialfn, Oid aggdeserialfn,
    4134             :                           Datum initValue, bool initValueIsNull,
    4135             :                           Oid *inputTypes, int numArguments)
    4136             : {
    4137       59226 :     int         numGroupingSets = Max(aggstate->maxsets, 1);
    4138             :     Expr       *transfnexpr;
    4139             :     int         numTransArgs;
    4140       59226 :     Expr       *serialfnexpr = NULL;
    4141       59226 :     Expr       *deserialfnexpr = NULL;
    4142             :     ListCell   *lc;
    4143             :     int         numInputs;
    4144             :     int         numDirectArgs;
    4145             :     List       *sortlist;
    4146             :     int         numSortCols;
    4147             :     int         numDistinctCols;
    4148             :     int         i;
    4149             : 
    4150             :     /* Begin filling in the pertrans data */
    4151       59226 :     pertrans->aggref = aggref;
    4152       59226 :     pertrans->aggshared = false;
    4153       59226 :     pertrans->aggCollation = aggref->inputcollid;
    4154       59226 :     pertrans->transfn_oid = transfn_oid;
    4155       59226 :     pertrans->serialfn_oid = aggserialfn;
    4156       59226 :     pertrans->deserialfn_oid = aggdeserialfn;
    4157       59226 :     pertrans->initValue = initValue;
    4158       59226 :     pertrans->initValueIsNull = initValueIsNull;
    4159             : 
    4160             :     /* Count the "direct" arguments, if any */
    4161       59226 :     numDirectArgs = list_length(aggref->aggdirectargs);
    4162             : 
    4163             :     /* Count the number of aggregated input columns */
    4164       59226 :     pertrans->numInputs = numInputs = list_length(aggref->args);
    4165             : 
    4166       59226 :     pertrans->aggtranstype = aggtranstype;
    4167             : 
    4168             :     /* account for the current transition state */
    4169       59226 :     numTransArgs = pertrans->numTransInputs + 1;
    4170             : 
    4171             :     /*
    4172             :      * Set up infrastructure for calling the transfn.  Note that invtransfn is
    4173             :      * not needed here.
    4174             :      */
    4175       59226 :     build_aggregate_transfn_expr(inputTypes,
    4176             :                                  numArguments,
    4177             :                                  numDirectArgs,
    4178       59226 :                                  aggref->aggvariadic,
    4179             :                                  aggtranstype,
    4180             :                                  aggref->inputcollid,
    4181             :                                  transfn_oid,
    4182             :                                  InvalidOid,
    4183             :                                  &transfnexpr,
    4184             :                                  NULL);
    4185             : 
    4186       59226 :     fmgr_info(transfn_oid, &pertrans->transfn);
    4187       59226 :     fmgr_info_set_expr((Node *) transfnexpr, &pertrans->transfn);
    4188             : 
    4189       59226 :     pertrans->transfn_fcinfo =
    4190       59226 :         (FunctionCallInfo) palloc(SizeForFunctionCallInfo(numTransArgs));
    4191       59226 :     InitFunctionCallInfoData(*pertrans->transfn_fcinfo,
    4192             :                              &pertrans->transfn,
    4193             :                              numTransArgs,
    4194             :                              pertrans->aggCollation,
    4195             :                              (Node *) aggstate, NULL);
    4196             : 
    4197             :     /* get info about the state value's datatype */
    4198       59226 :     get_typlenbyval(aggtranstype,
    4199             :                     &pertrans->transtypeLen,
    4200             :                     &pertrans->transtypeByVal);
    4201             : 
    4202       59226 :     if (OidIsValid(aggserialfn))
    4203             :     {
    4204         336 :         build_aggregate_serialfn_expr(aggserialfn,
    4205             :                                       &serialfnexpr);
    4206         336 :         fmgr_info(aggserialfn, &pertrans->serialfn);
    4207         336 :         fmgr_info_set_expr((Node *) serialfnexpr, &pertrans->serialfn);
    4208             : 
    4209         336 :         pertrans->serialfn_fcinfo =
    4210         336 :             (FunctionCallInfo) palloc(SizeForFunctionCallInfo(1));
    4211         336 :         InitFunctionCallInfoData(*pertrans->serialfn_fcinfo,
    4212             :                                  &pertrans->serialfn,
    4213             :                                  1,
    4214             :                                  InvalidOid,
    4215             :                                  (Node *) aggstate, NULL);
    4216             :     }
    4217             : 
    4218       59226 :     if (OidIsValid(aggdeserialfn))
    4219             :     {
    4220         120 :         build_aggregate_deserialfn_expr(aggdeserialfn,
    4221             :                                         &deserialfnexpr);
    4222         120 :         fmgr_info(aggdeserialfn, &pertrans->deserialfn);
    4223         120 :         fmgr_info_set_expr((Node *) deserialfnexpr, &pertrans->deserialfn);
    4224             : 
    4225         120 :         pertrans->deserialfn_fcinfo =
    4226         120 :             (FunctionCallInfo) palloc(SizeForFunctionCallInfo(2));
    4227         120 :         InitFunctionCallInfoData(*pertrans->deserialfn_fcinfo,
    4228             :                                  &pertrans->deserialfn,
    4229             :                                  2,
    4230             :                                  InvalidOid,
    4231             :                                  (Node *) aggstate, NULL);
    4232             :     }
    4233             : 
    4234             :     /*
    4235             :      * If we're doing either DISTINCT or ORDER BY for a plain agg, then we
    4236             :      * have a list of SortGroupClause nodes; fish out the data in them and
    4237             :      * stick them into arrays.  We ignore ORDER BY for an ordered-set agg,
    4238             :      * however; the agg's transfn and finalfn are responsible for that.
    4239             :      *
    4240             :      * When the planner has set the aggpresorted flag, the input to the
    4241             :      * aggregate is already correctly sorted.  For ORDER BY aggregates we can
    4242             :      * simply treat these as normal aggregates.  For presorted DISTINCT
    4243             :      * aggregates an extra step must be added to remove duplicate consecutive
    4244             :      * inputs.
    4245             :      *
    4246             :      * Note that by construction, if there is a DISTINCT clause then the ORDER
    4247             :      * BY clause is a prefix of it (see transformDistinctClause).
    4248             :      */
    4249       59226 :     if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
    4250             :     {
    4251         252 :         sortlist = NIL;
    4252         252 :         numSortCols = numDistinctCols = 0;
    4253         252 :         pertrans->aggsortrequired = false;
    4254             :     }
    4255       58974 :     else if (aggref->aggpresorted && aggref->aggdistinct == NIL)
    4256             :     {
    4257        2106 :         sortlist = NIL;
    4258        2106 :         numSortCols = numDistinctCols = 0;
    4259        2106 :         pertrans->aggsortrequired = false;
    4260             :     }
    4261       56868 :     else if (aggref->aggdistinct)
    4262             :     {
    4263         588 :         sortlist = aggref->aggdistinct;
    4264         588 :         numSortCols = numDistinctCols = list_length(sortlist);
    4265             :         Assert(numSortCols >= list_length(aggref->aggorder));
    4266         588 :         pertrans->aggsortrequired = !aggref->aggpresorted;
    4267             :     }
    4268             :     else
    4269             :     {
    4270       56280 :         sortlist = aggref->aggorder;
    4271       56280 :         numSortCols = list_length(sortlist);
    4272       56280 :         numDistinctCols = 0;
    4273       56280 :         pertrans->aggsortrequired = (numSortCols > 0);
    4274             :     }
    4275             : 
    4276       59226 :     pertrans->numSortCols = numSortCols;
    4277       59226 :     pertrans->numDistinctCols = numDistinctCols;
    4278             : 
    4279             :     /*
    4280             :      * If we have either sorting or filtering to do, create a tupledesc and
    4281             :      * slot corresponding to the aggregated inputs (including sort
    4282             :      * expressions) of the agg.
    4283             :      */
    4284       59226 :     if (numSortCols > 0 || aggref->aggfilter)
    4285             :     {
    4286        1436 :         pertrans->sortdesc = ExecTypeFromTL(aggref->args);
    4287        1436 :         pertrans->sortslot =
    4288        1436 :             ExecInitExtraTupleSlot(estate, pertrans->sortdesc,
    4289             :                                    &TTSOpsMinimalTuple);
    4290             :     }
    4291             : 
    4292       59226 :     if (numSortCols > 0)
    4293             :     {
    4294             :         /*
    4295             :          * We don't implement DISTINCT or ORDER BY aggs in the HASHED case
    4296             :          * (yet)
    4297             :          */
    4298             :         Assert(aggstate->aggstrategy != AGG_HASHED && aggstate->aggstrategy != AGG_MIXED);
    4299             : 
    4300             :         /* ORDER BY aggregates are not supported with partial aggregation */
    4301             :         Assert(!DO_AGGSPLIT_COMBINE(aggstate->aggsplit));
    4302             : 
    4303             :         /* If we have only one input, we need its len/byval info. */
    4304         726 :         if (numInputs == 1)
    4305             :         {
    4306         576 :             get_typlenbyval(inputTypes[numDirectArgs],
    4307             :                             &pertrans->inputtypeLen,
    4308             :                             &pertrans->inputtypeByVal);
    4309             :         }
    4310         150 :         else if (numDistinctCols > 0)
    4311             :         {
    4312             :             /* we will need an extra slot to store prior values */
    4313         108 :             pertrans->uniqslot =
    4314         108 :                 ExecInitExtraTupleSlot(estate, pertrans->sortdesc,
    4315             :                                        &TTSOpsMinimalTuple);
    4316             :         }
    4317             : 
    4318             :         /* Extract the sort information for use later */
    4319         726 :         pertrans->sortColIdx =
    4320         726 :             (AttrNumber *) palloc(numSortCols * sizeof(AttrNumber));
    4321         726 :         pertrans->sortOperators =
    4322         726 :             (Oid *) palloc(numSortCols * sizeof(Oid));
    4323         726 :         pertrans->sortCollations =
    4324         726 :             (Oid *) palloc(numSortCols * sizeof(Oid));
    4325         726 :         pertrans->sortNullsFirst =
    4326         726 :             (bool *) palloc(numSortCols * sizeof(bool));
    4327             : 
    4328         726 :         i = 0;
    4329        1650 :         foreach(lc, sortlist)
    4330             :         {
    4331         924 :             SortGroupClause *sortcl = (SortGroupClause *) lfirst(lc);
    4332         924 :             TargetEntry *tle = get_sortgroupclause_tle(sortcl, aggref->args);
    4333             : 
    4334             :             /* the parser should have made sure of this */
    4335             :             Assert(OidIsValid(sortcl->sortop));
    4336             : 
    4337         924 :             pertrans->sortColIdx[i] = tle->resno;
    4338         924 :             pertrans->sortOperators[i] = sortcl->sortop;
    4339         924 :             pertrans->sortCollations[i] = exprCollation((Node *) tle->expr);
    4340         924 :             pertrans->sortNullsFirst[i] = sortcl->nulls_first;
    4341         924 :             i++;
    4342             :         }
    4343             :         Assert(i == numSortCols);
    4344             :     }
    4345             : 
    4346       59226 :     if (aggref->aggdistinct)
    4347             :     {
    4348             :         Oid        *ops;
    4349             : 
    4350             :         Assert(numArguments > 0);
    4351             :         Assert(list_length(aggref->aggdistinct) == numDistinctCols);
    4352             : 
    4353         588 :         ops = palloc(numDistinctCols * sizeof(Oid));
    4354             : 
    4355         588 :         i = 0;
    4356        1356 :         foreach(lc, aggref->aggdistinct)
    4357         768 :             ops[i++] = ((SortGroupClause *) lfirst(lc))->eqop;
    4358             : 
    4359             :         /* lookup / build the necessary comparators */
    4360         588 :         if (numDistinctCols == 1)
    4361         480 :             fmgr_info(get_opcode(ops[0]), &pertrans->equalfnOne);
    4362             :         else
    4363         108 :             pertrans->equalfnMulti =
    4364         108 :                 execTuplesMatchPrepare(pertrans->sortdesc,
    4365             :                                        numDistinctCols,
    4366         108 :                                        pertrans->sortColIdx,
    4367             :                                        ops,
    4368         108 :                                        pertrans->sortCollations,
    4369             :                                        &aggstate->ss.ps);
    4370         588 :         pfree(ops);
    4371             :     }
    4372             : 
    4373       59226 :     pertrans->sortstates = palloc0_array(Tuplesortstate *, numGroupingSets);
    4374       59226 : }
    4375             : 
    4376             : 
    4377             : static Datum
    4378       27298 : GetAggInitVal(Datum textInitVal, Oid transtype)
    4379             : {
    4380             :     Oid         typinput,
    4381             :                 typioparam;
    4382             :     char       *strInitVal;
    4383             :     Datum       initVal;
    4384             : 
    4385       27298 :     getTypeInputInfo(transtype, &typinput, &typioparam);
    4386       27298 :     strInitVal = TextDatumGetCString(textInitVal);
    4387       27298 :     initVal = OidInputFunctionCall(typinput, strInitVal,
    4388             :                                    typioparam, -1);
    4389       27298 :     pfree(strInitVal);
    4390       27298 :     return initVal;
    4391             : }
    4392             : 
    4393             : void
    4394       53766 : ExecEndAgg(AggState *node)
    4395             : {
    4396             :     PlanState  *outerPlan;
    4397             :     int         transno;
    4398       53766 :     int         numGroupingSets = Max(node->maxsets, 1);
    4399             :     int         setno;
    4400             : 
    4401             :     /*
    4402             :      * When ending a parallel worker, copy the statistics gathered by the
    4403             :      * worker back into shared memory so that it can be picked up by the main
    4404             :      * process to report in EXPLAIN ANALYZE.
    4405             :      */
    4406       53766 :     if (node->shared_info && IsParallelWorker())
    4407             :     {
    4408             :         AggregateInstrumentation *si;
    4409             : 
    4410             :         Assert(ParallelWorkerNumber <= node->shared_info->num_workers);
    4411         168 :         si = &node->shared_info->sinstrument[ParallelWorkerNumber];
    4412         168 :         si->hash_batches_used = node->hash_batches_used;
    4413         168 :         si->hash_disk_used = node->hash_disk_used;
    4414         168 :         si->hash_mem_peak = node->hash_mem_peak;
    4415             :     }
    4416             : 
    4417             :     /* Make sure we have closed any open tuplesorts */
    4418             : 
    4419       53766 :     if (node->sort_in)
    4420         162 :         tuplesort_end(node->sort_in);
    4421       53766 :     if (node->sort_out)
    4422          48 :         tuplesort_end(node->sort_out);
    4423             : 
    4424       53766 :     hashagg_reset_spill_state(node);
    4425             : 
    4426             :     /* Release hash tables too */
    4427       53766 :     if (node->hash_metacxt != NULL)
    4428             :     {
    4429        7030 :         MemoryContextDelete(node->hash_metacxt);
    4430        7030 :         node->hash_metacxt = NULL;
    4431             :     }
    4432       53766 :     if (node->hash_tuplescxt != NULL)
    4433             :     {
    4434        7030 :         MemoryContextDelete(node->hash_tuplescxt);
    4435        7030 :         node->hash_tuplescxt = NULL;
    4436             :     }
    4437             : 
    4438      112784 :     for (transno = 0; transno < node->numtrans; transno++)
    4439             :     {
    4440       59018 :         AggStatePerTrans pertrans = &node->pertrans[transno];
    4441             : 
    4442      119074 :         for (setno = 0; setno < numGroupingSets; setno++)
    4443             :         {
    4444       60056 :             if (pertrans->sortstates[setno])
    4445           0 :                 tuplesort_end(pertrans->sortstates[setno]);
    4446             :         }
    4447             :     }
    4448             : 
    4449             :     /* And ensure any agg shutdown callbacks have been called */
    4450      108396 :     for (setno = 0; setno < numGroupingSets; setno++)
    4451       54630 :         ReScanExprContext(node->aggcontexts[setno]);
    4452       53766 :     if (node->hashcontext)
    4453        7030 :         ReScanExprContext(node->hashcontext);
    4454             : 
    4455       53766 :     outerPlan = outerPlanState(node);
    4456       53766 :     ExecEndNode(outerPlan);
    4457       53766 : }
    4458             : 
    4459             : void
    4460       54772 : ExecReScanAgg(AggState *node)
    4461             : {
    4462       54772 :     ExprContext *econtext = node->ss.ps.ps_ExprContext;
    4463       54772 :     PlanState  *outerPlan = outerPlanState(node);
    4464       54772 :     Agg        *aggnode = (Agg *) node->ss.ps.plan;
    4465             :     int         transno;
    4466       54772 :     int         numGroupingSets = Max(node->maxsets, 1);
    4467             :     int         setno;
    4468             : 
    4469       54772 :     node->agg_done = false;
    4470             : 
    4471       54772 :     if (node->aggstrategy == AGG_HASHED)
    4472             :     {
    4473             :         /*
    4474             :          * In the hashed case, if we haven't yet built the hash table then we
    4475             :          * can just return; nothing done yet, so nothing to undo. If subnode's
    4476             :          * chgParam is not NULL then it will be re-scanned by ExecProcNode,
    4477             :          * else no reason to re-scan it at all.
    4478             :          */
    4479       13664 :         if (!node->table_filled)
    4480         152 :             return;
    4481             : 
    4482             :         /*
    4483             :          * If we do have the hash table, and it never spilled, and the subplan
    4484             :          * does not have any parameter changes, and none of our own parameter
    4485             :          * changes affect input expressions of the aggregated functions, then
    4486             :          * we can just rescan the existing hash table; no need to build it
    4487             :          * again.
    4488             :          */
    4489       13512 :         if (outerPlan->chgParam == NULL && !node->hash_ever_spilled &&
    4490         908 :             !bms_overlap(node->ss.ps.chgParam, aggnode->aggParams))
    4491             :         {
    4492         884 :             ResetTupleHashIterator(node->perhash[0].hashtable,
    4493             :                                    &node->perhash[0].hashiter);
    4494         884 :             select_current_set(node, 0, true);
    4495         884 :             return;
    4496             :         }
    4497             :     }
    4498             : 
    4499             :     /* Make sure we have closed any open tuplesorts */
    4500      121952 :     for (transno = 0; transno < node->numtrans; transno++)
    4501             :     {
    4502      136468 :         for (setno = 0; setno < numGroupingSets; setno++)
    4503             :         {
    4504       68252 :             AggStatePerTrans pertrans = &node->pertrans[transno];
    4505             : 
    4506       68252 :             if (pertrans->sortstates[setno])
    4507             :             {
    4508           0 :                 tuplesort_end(pertrans->sortstates[setno]);
    4509           0 :                 pertrans->sortstates[setno] = NULL;
    4510             :             }
    4511             :         }
    4512             :     }
    4513             : 
    4514             :     /*
    4515             :      * We don't need to ReScanExprContext the output tuple context here;
    4516             :      * ExecReScan already did it. But we do need to reset our per-grouping-set
    4517             :      * contexts, which may have transvalues stored in them. (We use rescan
    4518             :      * rather than just reset because transfns may have registered callbacks
    4519             :      * that need to be run now.) For the AGG_HASHED case, see below.
    4520             :      */
    4521             : 
    4522      107508 :     for (setno = 0; setno < numGroupingSets; setno++)
    4523             :     {
    4524       53772 :         ReScanExprContext(node->aggcontexts[setno]);
    4525             :     }
    4526             : 
    4527             :     /* Release first tuple of group, if we have made a copy */
    4528       53736 :     if (node->grp_firstTuple != NULL)
    4529             :     {
    4530           0 :         heap_freetuple(node->grp_firstTuple);
    4531           0 :         node->grp_firstTuple = NULL;
    4532             :     }
    4533       53736 :     ExecClearTuple(node->ss.ss_ScanTupleSlot);
    4534             : 
    4535             :     /* Forget current agg values */
    4536      121952 :     MemSet(econtext->ecxt_aggvalues, 0, sizeof(Datum) * node->numaggs);
    4537       53736 :     MemSet(econtext->ecxt_aggnulls, 0, sizeof(bool) * node->numaggs);
    4538             : 
    4539             :     /*
    4540             :      * With AGG_HASHED/MIXED, the hash table is allocated in a sub-context of
    4541             :      * the hashcontext. This used to be an issue, but now, resetting a context
    4542             :      * automatically deletes sub-contexts too.
    4543             :      */
    4544       53736 :     if (node->aggstrategy == AGG_HASHED || node->aggstrategy == AGG_MIXED)
    4545             :     {
    4546       12658 :         hashagg_reset_spill_state(node);
    4547             : 
    4548       12658 :         node->hash_ever_spilled = false;
    4549       12658 :         node->hash_spill_mode = false;
    4550       12658 :         node->hash_ngroups_current = 0;
    4551             : 
    4552       12658 :         ReScanExprContext(node->hashcontext);
    4553             :         /* Rebuild empty hash table(s) */
    4554       12658 :         build_hash_tables(node);
    4555       12658 :         node->table_filled = false;
    4556             :         /* iterator will be reset when the table is filled */
    4557             : 
    4558       12658 :         hashagg_recompile_expressions(node, false, false);
    4559             :     }
    4560             : 
    4561       53736 :     if (node->aggstrategy != AGG_HASHED)
    4562             :     {
    4563             :         /*
    4564             :          * Reset the per-group state (in particular, mark transvalues null)
    4565             :          */
    4566       82252 :         for (setno = 0; setno < numGroupingSets; setno++)
    4567             :         {
    4568      177552 :             MemSet(node->pergroups[setno], 0,
    4569             :                    sizeof(AggStatePerGroupData) * node->numaggs);
    4570             :         }
    4571             : 
    4572             :         /* reset to phase 1 */
    4573       41108 :         initialize_phase(node, 1);
    4574             : 
    4575       41108 :         node->input_done = false;
    4576       41108 :         node->projected_set = -1;
    4577             :     }
    4578             : 
    4579       53736 :     if (outerPlan->chgParam == NULL)
    4580         188 :         ExecReScan(outerPlan);
    4581             : }
    4582             : 
    4583             : 
    4584             : /***********************************************************************
    4585             :  * API exposed to aggregate functions
    4586             :  ***********************************************************************/
    4587             : 
    4588             : 
    4589             : /*
    4590             :  * AggCheckCallContext - test if a SQL function is being called as an aggregate
    4591             :  *
    4592             :  * The transition and/or final functions of an aggregate may want to verify
    4593             :  * that they are being called as aggregates, rather than as plain SQL
    4594             :  * functions.  They should use this function to do so.  The return value
    4595             :  * is nonzero if being called as an aggregate, or zero if not.  (Specific
    4596             :  * nonzero values are AGG_CONTEXT_AGGREGATE or AGG_CONTEXT_WINDOW, but more
    4597             :  * values could conceivably appear in future.)
    4598             :  *
    4599             :  * If aggcontext isn't NULL, the function also stores at *aggcontext the
    4600             :  * identity of the memory context that aggregate transition values are being
    4601             :  * stored in.  Note that the same aggregate call site (flinfo) may be called
    4602             :  * interleaved on different transition values in different contexts, so it's
    4603             :  * not kosher to cache aggcontext under fn_extra.  It is, however, kosher to
    4604             :  * cache it in the transvalue itself (for internal-type transvalues).
    4605             :  */
    4606             : int
    4607     5441670 : AggCheckCallContext(FunctionCallInfo fcinfo, MemoryContext *aggcontext)
    4608             : {
    4609     5441670 :     if (fcinfo->context && IsA(fcinfo->context, AggState))
    4610             :     {
    4611     5430284 :         if (aggcontext)
    4612             :         {
    4613     2752224 :             AggState   *aggstate = ((AggState *) fcinfo->context);
    4614     2752224 :             ExprContext *cxt = aggstate->curaggcontext;
    4615             : 
    4616     2752224 :             *aggcontext = cxt->ecxt_per_tuple_memory;
    4617             :         }
    4618     5430284 :         return AGG_CONTEXT_AGGREGATE;
    4619             :     }
    4620       11386 :     if (fcinfo->context && IsA(fcinfo->context, WindowAggState))
    4621             :     {
    4622        9512 :         if (aggcontext)
    4623         710 :             *aggcontext = ((WindowAggState *) fcinfo->context)->curaggcontext;
    4624        9512 :         return AGG_CONTEXT_WINDOW;
    4625             :     }
    4626             : 
    4627             :     /* this is just to prevent "uninitialized variable" warnings */
    4628        1874 :     if (aggcontext)
    4629        1826 :         *aggcontext = NULL;
    4630        1874 :     return 0;
    4631             : }
    4632             : 
    4633             : /*
    4634             :  * AggGetAggref - allow an aggregate support function to get its Aggref
    4635             :  *
    4636             :  * If the function is being called as an aggregate support function,
    4637             :  * return the Aggref node for the aggregate call.  Otherwise, return NULL.
    4638             :  *
    4639             :  * Aggregates sharing the same inputs and transition functions can get
    4640             :  * merged into a single transition calculation.  If the transition function
    4641             :  * calls AggGetAggref, it will get some one of the Aggrefs for which it is
    4642             :  * executing.  It must therefore not pay attention to the Aggref fields that
    4643             :  * relate to the final function, as those are indeterminate.  But if a final
    4644             :  * function calls AggGetAggref, it will get a precise result.
    4645             :  *
    4646             :  * Note that if an aggregate is being used as a window function, this will
    4647             :  * return NULL.  We could provide a similar function to return the relevant
    4648             :  * WindowFunc node in such cases, but it's not needed yet.
    4649             :  */
    4650             : Aggref *
    4651         246 : AggGetAggref(FunctionCallInfo fcinfo)
    4652             : {
    4653         246 :     if (fcinfo->context && IsA(fcinfo->context, AggState))
    4654             :     {
    4655         246 :         AggState   *aggstate = (AggState *) fcinfo->context;
    4656             :         AggStatePerAgg curperagg;
    4657             :         AggStatePerTrans curpertrans;
    4658             : 
    4659             :         /* check curperagg (valid when in a final function) */
    4660         246 :         curperagg = aggstate->curperagg;
    4661             : 
    4662         246 :         if (curperagg)
    4663           0 :             return curperagg->aggref;
    4664             : 
    4665             :         /* check curpertrans (valid when in a transition function) */
    4666         246 :         curpertrans = aggstate->curpertrans;
    4667             : 
    4668         246 :         if (curpertrans)
    4669         246 :             return curpertrans->aggref;
    4670             :     }
    4671           0 :     return NULL;
    4672             : }
    4673             : 
    4674             : /*
    4675             :  * AggGetTempMemoryContext - fetch short-term memory context for aggregates
    4676             :  *
    4677             :  * This is useful in agg final functions; the context returned is one that
    4678             :  * the final function can safely reset as desired.  This isn't useful for
    4679             :  * transition functions, since the context returned MAY (we don't promise)
    4680             :  * be the same as the context those are called in.
    4681             :  *
    4682             :  * As above, this is currently not useful for aggs called as window functions.
    4683             :  */
    4684             : MemoryContext
    4685           0 : AggGetTempMemoryContext(FunctionCallInfo fcinfo)
    4686             : {
    4687           0 :     if (fcinfo->context && IsA(fcinfo->context, AggState))
    4688             :     {
    4689           0 :         AggState   *aggstate = (AggState *) fcinfo->context;
    4690             : 
    4691           0 :         return aggstate->tmpcontext->ecxt_per_tuple_memory;
    4692             :     }
    4693           0 :     return NULL;
    4694             : }
    4695             : 
    4696             : /*
    4697             :  * AggStateIsShared - find out whether transition state is shared
    4698             :  *
    4699             :  * If the function is being called as an aggregate support function,
    4700             :  * return true if the aggregate's transition state is shared across
    4701             :  * multiple aggregates, false if it is not.
    4702             :  *
    4703             :  * Returns true if not called as an aggregate support function.
    4704             :  * This is intended as a conservative answer, ie "no you'd better not
    4705             :  * scribble on your input".  In particular, will return true if the
    4706             :  * aggregate is being used as a window function, which is a scenario
    4707             :  * in which changing the transition state is a bad idea.  We might
    4708             :  * want to refine the behavior for the window case in future.
    4709             :  */
    4710             : bool
    4711         246 : AggStateIsShared(FunctionCallInfo fcinfo)
    4712             : {
    4713         246 :     if (fcinfo->context && IsA(fcinfo->context, AggState))
    4714             :     {
    4715         246 :         AggState   *aggstate = (AggState *) fcinfo->context;
    4716             :         AggStatePerAgg curperagg;
    4717             :         AggStatePerTrans curpertrans;
    4718             : 
    4719             :         /* check curperagg (valid when in a final function) */
    4720         246 :         curperagg = aggstate->curperagg;
    4721             : 
    4722         246 :         if (curperagg)
    4723           0 :             return aggstate->pertrans[curperagg->transno].aggshared;
    4724             : 
    4725             :         /* check curpertrans (valid when in a transition function) */
    4726         246 :         curpertrans = aggstate->curpertrans;
    4727             : 
    4728         246 :         if (curpertrans)
    4729         246 :             return curpertrans->aggshared;
    4730             :     }
    4731           0 :     return true;
    4732             : }
    4733             : 
    4734             : /*
    4735             :  * AggRegisterCallback - register a cleanup callback for an aggregate
    4736             :  *
    4737             :  * This is useful for aggs to register shutdown callbacks, which will ensure
    4738             :  * that non-memory resources are freed.  The callback will occur just before
    4739             :  * the associated aggcontext (as returned by AggCheckCallContext) is reset,
    4740             :  * either between groups or as a result of rescanning the query.  The callback
    4741             :  * will NOT be called on error paths.  The typical use-case is for freeing of
    4742             :  * tuplestores or tuplesorts maintained in aggcontext, or pins held by slots
    4743             :  * created by the agg functions.  (The callback will not be called until after
    4744             :  * the result of the finalfn is no longer needed, so it's safe for the finalfn
    4745             :  * to return data that will be freed by the callback.)
    4746             :  *
    4747             :  * As above, this is currently not useful for aggs called as window functions.
    4748             :  */
    4749             : void
    4750         660 : AggRegisterCallback(FunctionCallInfo fcinfo,
    4751             :                     ExprContextCallbackFunction func,
    4752             :                     Datum arg)
    4753             : {
    4754         660 :     if (fcinfo->context && IsA(fcinfo->context, AggState))
    4755             :     {
    4756         660 :         AggState   *aggstate = (AggState *) fcinfo->context;
    4757         660 :         ExprContext *cxt = aggstate->curaggcontext;
    4758             : 
    4759         660 :         RegisterExprContextCallback(cxt, func, arg);
    4760             : 
    4761         660 :         return;
    4762             :     }
    4763           0 :     elog(ERROR, "aggregate function cannot register a callback in this context");
    4764             : }
    4765             : 
    4766             : 
    4767             : /* ----------------------------------------------------------------
    4768             :  *                      Parallel Query Support
    4769             :  * ----------------------------------------------------------------
    4770             :  */
    4771             : 
    4772             :  /* ----------------------------------------------------------------
    4773             :   *     ExecAggEstimate
    4774             :   *
    4775             :   *     Estimate space required to propagate aggregate statistics.
    4776             :   * ----------------------------------------------------------------
    4777             :   */
    4778             : void
    4779         584 : ExecAggEstimate(AggState *node, ParallelContext *pcxt)
    4780             : {
    4781             :     Size        size;
    4782             : 
    4783             :     /* don't need this if not instrumenting or no workers */
    4784         584 :     if (!node->ss.ps.instrument || pcxt->nworkers == 0)
    4785         482 :         return;
    4786             : 
    4787         102 :     size = mul_size(pcxt->nworkers, sizeof(AggregateInstrumentation));
    4788         102 :     size = add_size(size, offsetof(SharedAggInfo, sinstrument));
    4789         102 :     shm_toc_estimate_chunk(&pcxt->estimator, size);
    4790         102 :     shm_toc_estimate_keys(&pcxt->estimator, 1);
    4791             : }
    4792             : 
    4793             : /* ----------------------------------------------------------------
    4794             :  *      ExecAggInitializeDSM
    4795             :  *
    4796             :  *      Initialize DSM space for aggregate statistics.
    4797             :  * ----------------------------------------------------------------
    4798             :  */
    4799             : void
    4800         584 : ExecAggInitializeDSM(AggState *node, ParallelContext *pcxt)
    4801             : {
    4802             :     Size        size;
    4803             : 
    4804             :     /* don't need this if not instrumenting or no workers */
    4805         584 :     if (!node->ss.ps.instrument || pcxt->nworkers == 0)
    4806         482 :         return;
    4807             : 
    4808         102 :     size = offsetof(SharedAggInfo, sinstrument)
    4809         102 :         + pcxt->nworkers * sizeof(AggregateInstrumentation);
    4810         102 :     node->shared_info = shm_toc_allocate(pcxt->toc, size);
    4811             :     /* ensure any unfilled slots will contain zeroes */
    4812         102 :     memset(node->shared_info, 0, size);
    4813         102 :     node->shared_info->num_workers = pcxt->nworkers;
    4814         102 :     shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id,
    4815         102 :                    node->shared_info);
    4816             : }
    4817             : 
    4818             : /* ----------------------------------------------------------------
    4819             :  *      ExecAggInitializeWorker
    4820             :  *
    4821             :  *      Attach worker to DSM space for aggregate statistics.
    4822             :  * ----------------------------------------------------------------
    4823             :  */
    4824             : void
    4825        1652 : ExecAggInitializeWorker(AggState *node, ParallelWorkerContext *pwcxt)
    4826             : {
    4827        1652 :     node->shared_info =
    4828        1652 :         shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, true);
    4829        1652 : }
    4830             : 
    4831             : /* ----------------------------------------------------------------
    4832             :  *      ExecAggRetrieveInstrumentation
    4833             :  *
    4834             :  *      Transfer aggregate statistics from DSM to private memory.
    4835             :  * ----------------------------------------------------------------
    4836             :  */
    4837             : void
    4838         102 : ExecAggRetrieveInstrumentation(AggState *node)
    4839             : {
    4840             :     Size        size;
    4841             :     SharedAggInfo *si;
    4842             : 
    4843         102 :     if (node->shared_info == NULL)
    4844           0 :         return;
    4845             : 
    4846         102 :     size = offsetof(SharedAggInfo, sinstrument)
    4847         102 :         + node->shared_info->num_workers * sizeof(AggregateInstrumentation);
    4848         102 :     si = palloc(size);
    4849         102 :     memcpy(si, node->shared_info, size);
    4850         102 :     node->shared_info = si;
    4851             : }

Generated by: LCOV version 1.16