LCOV - code coverage report
Current view: top level - src/backend/executor - nodeAgg.c (source / functions) Hit Total Coverage
Test: PostgreSQL 15devel Lines: 1407 1462 96.2 %
Date: 2021-12-03 04:09:03 Functions: 56 57 98.2 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * nodeAgg.c
       4             :  *    Routines to handle aggregate nodes.
       5             :  *
       6             :  *    ExecAgg normally evaluates each aggregate in the following steps:
       7             :  *
       8             :  *       transvalue = initcond
       9             :  *       foreach input_tuple do
      10             :  *          transvalue = transfunc(transvalue, input_value(s))
      11             :  *       result = finalfunc(transvalue, direct_argument(s))
      12             :  *
      13             :  *    If a finalfunc is not supplied then the result is just the ending
      14             :  *    value of transvalue.
      15             :  *
      16             :  *    Other behaviors can be selected by the "aggsplit" mode, which exists
      17             :  *    to support partial aggregation.  It is possible to:
      18             :  *    * Skip running the finalfunc, so that the output is always the
      19             :  *    final transvalue state.
      20             :  *    * Substitute the combinefunc for the transfunc, so that transvalue
      21             :  *    states (propagated up from a child partial-aggregation step) are merged
      22             :  *    rather than processing raw input rows.  (The statements below about
      23             :  *    the transfunc apply equally to the combinefunc, when it's selected.)
      24             :  *    * Apply the serializefunc to the output values (this only makes sense
      25             :  *    when skipping the finalfunc, since the serializefunc works on the
      26             :  *    transvalue data type).
      27             :  *    * Apply the deserializefunc to the input values (this only makes sense
      28             :  *    when using the combinefunc, for similar reasons).
      29             :  *    It is the planner's responsibility to connect up Agg nodes using these
      30             :  *    alternate behaviors in a way that makes sense, with partial aggregation
      31             :  *    results being fed to nodes that expect them.
      32             :  *
      33             :  *    If a normal aggregate call specifies DISTINCT or ORDER BY, we sort the
      34             :  *    input tuples and eliminate duplicates (if required) before performing
      35             :  *    the above-depicted process.  (However, we don't do that for ordered-set
      36             :  *    aggregates; their "ORDER BY" inputs are ordinary aggregate arguments
      37             :  *    so far as this module is concerned.)  Note that partial aggregation
      38             :  *    is not supported in these cases, since we couldn't ensure global
      39             :  *    ordering or distinctness of the inputs.
      40             :  *
      41             :  *    If transfunc is marked "strict" in pg_proc and initcond is NULL,
      42             :  *    then the first non-NULL input_value is assigned directly to transvalue,
      43             :  *    and transfunc isn't applied until the second non-NULL input_value.
      44             :  *    The agg's first input type and transtype must be the same in this case!
      45             :  *
      46             :  *    If transfunc is marked "strict" then NULL input_values are skipped,
      47             :  *    keeping the previous transvalue.  If transfunc is not strict then it
      48             :  *    is called for every input tuple and must deal with NULL initcond
      49             :  *    or NULL input_values for itself.
      50             :  *
      51             :  *    If finalfunc is marked "strict" then it is not called when the
      52             :  *    ending transvalue is NULL, instead a NULL result is created
      53             :  *    automatically (this is just the usual handling of strict functions,
      54             :  *    of course).  A non-strict finalfunc can make its own choice of
      55             :  *    what to return for a NULL ending transvalue.
      56             :  *
      57             :  *    Ordered-set aggregates are treated specially in one other way: we
      58             :  *    evaluate any "direct" arguments and pass them to the finalfunc along
      59             :  *    with the transition value.
      60             :  *
      61             :  *    A finalfunc can have additional arguments beyond the transvalue and
      62             :  *    any "direct" arguments, corresponding to the input arguments of the
      63             :  *    aggregate.  These are always just passed as NULL.  Such arguments may be
      64             :  *    needed to allow resolution of a polymorphic aggregate's result type.
      65             :  *
      66             :  *    We compute aggregate input expressions and run the transition functions
      67             :  *    in a temporary econtext (aggstate->tmpcontext).  This is reset at least
      68             :  *    once per input tuple, so when the transvalue datatype is
      69             :  *    pass-by-reference, we have to be careful to copy it into a longer-lived
      70             :  *    memory context, and free the prior value to avoid memory leakage.  We
      71             :  *    store transvalues in another set of econtexts, aggstate->aggcontexts
      72             :  *    (one per grouping set, see below), which are also used for the hashtable
      73             :  *    structures in AGG_HASHED mode.  These econtexts are rescanned, not just
      74             :  *    reset, at group boundaries so that aggregate transition functions can
      75             :  *    register shutdown callbacks via AggRegisterCallback.
      76             :  *
      77             :  *    The node's regular econtext (aggstate->ss.ps.ps_ExprContext) is used to
      78             :  *    run finalize functions and compute the output tuple; this context can be
      79             :  *    reset once per output tuple.
      80             :  *
      81             :  *    The executor's AggState node is passed as the fmgr "context" value in
      82             :  *    all transfunc and finalfunc calls.  It is not recommended that the
      83             :  *    transition functions look at the AggState node directly, but they can
      84             :  *    use AggCheckCallContext() to verify that they are being called by
      85             :  *    nodeAgg.c (and not as ordinary SQL functions).  The main reason a
      86             :  *    transition function might want to know this is so that it can avoid
      87             :  *    palloc'ing a fixed-size pass-by-ref transition value on every call:
      88             :  *    it can instead just scribble on and return its left input.  Ordinarily
      89             :  *    it is completely forbidden for functions to modify pass-by-ref inputs,
      90             :  *    but in the aggregate case we know the left input is either the initial
      91             :  *    transition value or a previous function result, and in either case its
      92             :  *    value need not be preserved.  See int8inc() for an example.  Notice that
      93             :  *    the EEOP_AGG_PLAIN_TRANS step is coded to avoid a data copy step when
      94             :  *    the previous transition value pointer is returned.  It is also possible
      95             :  *    to avoid repeated data copying when the transition value is an expanded
      96             :  *    object: to do that, the transition function must take care to return
      97             :  *    an expanded object that is in a child context of the memory context
      98             :  *    returned by AggCheckCallContext().  Also, some transition functions want
      99             :  *    to store working state in addition to the nominal transition value; they
     100             :  *    can use the memory context returned by AggCheckCallContext() to do that.
     101             :  *
     102             :  *    Note: AggCheckCallContext() is available as of PostgreSQL 9.0.  The
     103             :  *    AggState is available as context in earlier releases (back to 8.1),
     104             :  *    but direct examination of the node is needed to use it before 9.0.
     105             :  *
     106             :  *    As of 9.4, aggregate transition functions can also use AggGetAggref()
     107             :  *    to get hold of the Aggref expression node for their aggregate call.
     108             :  *    This is mainly intended for ordered-set aggregates, which are not
     109             :  *    supported as window functions.  (A regular aggregate function would
     110             :  *    need some fallback logic to use this, since there's no Aggref node
     111             :  *    for a window function.)
     112             :  *
     113             :  *    Grouping sets:
     114             :  *
     115             :  *    A list of grouping sets which is structurally equivalent to a ROLLUP
     116             :  *    clause (e.g. (a,b,c), (a,b), (a)) can be processed in a single pass over
     117             :  *    ordered data.  We do this by keeping a separate set of transition values
     118             :  *    for each grouping set being concurrently processed; for each input tuple
     119             :  *    we update them all, and on group boundaries we reset those states
     120             :  *    (starting at the front of the list) whose grouping values have changed
     121             :  *    (the list of grouping sets is ordered from most specific to least
     122             :  *    specific).
     123             :  *
     124             :  *    Where more complex grouping sets are used, we break them down into
     125             :  *    "phases", where each phase has a different sort order (except phase 0
     126             :  *    which is reserved for hashing).  During each phase but the last, the
     127             :  *    input tuples are additionally stored in a tuplesort which is keyed to the
     128             :  *    next phase's sort order; during each phase but the first, the input
     129             :  *    tuples are drawn from the previously sorted data.  (The sorting of the
     130             :  *    data for the first phase is handled by the planner, as it might be
     131             :  *    satisfied by underlying nodes.)
     132             :  *
     133             :  *    Hashing can be mixed with sorted grouping.  To do this, we have an
     134             :  *    AGG_MIXED strategy that populates the hashtables during the first sorted
     135             :  *    phase, and switches to reading them out after completing all sort phases.
     136             :  *    We can also support AGG_HASHED with multiple hash tables and no sorting
     137             :  *    at all.
     138             :  *
     139             :  *    From the perspective of aggregate transition and final functions, the
     140             :  *    only issue regarding grouping sets is this: a single call site (flinfo)
     141             :  *    of an aggregate function may be used for updating several different
     142             :  *    transition values in turn. So the function must not cache in the flinfo
     143             :  *    anything which logically belongs as part of the transition value (most
     144             :  *    importantly, the memory context in which the transition value exists).
     145             :  *    The support API functions (AggCheckCallContext, AggRegisterCallback) are
     146             :  *    sensitive to the grouping set for which the aggregate function is
     147             :  *    currently being called.
     148             :  *
     149             :  *    Plan structure:
     150             :  *
     151             :  *    What we get from the planner is actually one "real" Agg node which is
     152             :  *    part of the plan tree proper, but which optionally has an additional list
     153             :  *    of Agg nodes hung off the side via the "chain" field.  This is because an
     154             :  *    Agg node happens to be a convenient representation of all the data we
     155             :  *    need for grouping sets.
     156             :  *
     157             :  *    For many purposes, we treat the "real" node as if it were just the first
     158             :  *    node in the chain.  The chain must be ordered such that hashed entries
     159             :  *    come before sorted/plain entries; the real node is marked AGG_MIXED if
     160             :  *    there are both types present (in which case the real node describes one
     161             :  *    of the hashed groupings, other AGG_HASHED nodes may optionally follow in
     162             :  *    the chain, followed in turn by AGG_SORTED or (one) AGG_PLAIN node).  If
     163             :  *    the real node is marked AGG_HASHED or AGG_SORTED, then all the chained
     164             :  *    nodes must be of the same type; if it is AGG_PLAIN, there can be no
     165             :  *    chained nodes.
     166             :  *
     167             :  *    We collect all hashed nodes into a single "phase", numbered 0, and create
     168             :  *    a sorted phase (numbered 1..n) for each AGG_SORTED or AGG_PLAIN node.
     169             :  *    Phase 0 is allocated even if there are no hashes, but remains unused in
     170             :  *    that case.
     171             :  *
     172             :  *    AGG_HASHED nodes actually refer to only a single grouping set each,
     173             :  *    because for each hashed grouping we need a separate grpColIdx and
     174             :  *    numGroups estimate.  AGG_SORTED nodes represent a "rollup", a list of
     175             :  *    grouping sets that share a sort order.  Each AGG_SORTED node other than
     176             :  *    the first one has an associated Sort node which describes the sort order
     177             :  *    to be used; the first sorted node takes its input from the outer subtree,
     178             :  *    which the planner has already arranged to provide ordered data.
     179             :  *
     180             :  *    Memory and ExprContext usage:
     181             :  *
     182             :  *    Because we're accumulating aggregate values across input rows, we need to
     183             :  *    use more memory contexts than just simple input/output tuple contexts.
     184             :  *    In fact, for a rollup, we need a separate context for each grouping set
     185             :  *    so that we can reset the inner (finer-grained) aggregates on their group
     186             :  *    boundaries while continuing to accumulate values for outer
     187             :  *    (coarser-grained) groupings.  On top of this, we might be simultaneously
     188             :  *    populating hashtables; however, we only need one context for all the
     189             :  *    hashtables.
     190             :  *
     191             :  *    So we create an array, aggcontexts, with an ExprContext for each grouping
     192             :  *    set in the largest rollup that we're going to process, and use the
     193             :  *    per-tuple memory context of those ExprContexts to store the aggregate
     194             :  *    transition values.  hashcontext is the single context created to support
     195             :  *    all hash tables.
     196             :  *
     197             :  *    Spilling To Disk
     198             :  *
     199             :  *    When performing hash aggregation, if the hash table memory exceeds the
     200             :  *    limit (see hash_agg_check_limits()), we enter "spill mode". In spill
     201             :  *    mode, we advance the transition states only for groups already in the
     202             :  *    hash table. For tuples that would need to create a new hash table
     203             :  *    entries (and initialize new transition states), we instead spill them to
     204             :  *    disk to be processed later. The tuples are spilled in a partitioned
     205             :  *    manner, so that subsequent batches are smaller and less likely to exceed
     206             :  *    hash_mem (if a batch does exceed hash_mem, it must be spilled
     207             :  *    recursively).
     208             :  *
     209             :  *    Spilled data is written to logical tapes. These provide better control
     210             :  *    over memory usage, disk space, and the number of files than if we were
     211             :  *    to use a BufFile for each spill.  We don't know the number of tapes needed
     212             :  *    at the start of the algorithm (because it can recurse), so a tape set is
     213             :  *    allocated at the beginning, and individual tapes are created as needed.
     214             :  *    As a particular tape is read, logtape.c recycles its disk space. When a
     215             :  *    tape is read to completion, it is destroyed entirely.
     216             :  *
     217             :  *    Tapes' buffers can take up substantial memory when many tapes are open at
     218             :  *    once. We only need one tape open at a time in read mode (using a buffer
     219             :  *    that's a multiple of BLCKSZ); but we need one tape open in write mode (each
     220             :  *    requiring a buffer of size BLCKSZ) for each partition.
     221             :  *
     222             :  *    Note that it's possible for transition states to start small but then
     223             :  *    grow very large; for instance in the case of ARRAY_AGG. In such cases,
     224             :  *    it's still possible to significantly exceed hash_mem. We try to avoid
     225             :  *    this situation by estimating what will fit in the available memory, and
     226             :  *    imposing a limit on the number of groups separately from the amount of
     227             :  *    memory consumed.
     228             :  *
     229             :  *    Transition / Combine function invocation:
     230             :  *
     231             :  *    For performance reasons transition functions, including combine
     232             :  *    functions, aren't invoked one-by-one from nodeAgg.c after computing
     233             :  *    arguments using the expression evaluation engine. Instead
     234             :  *    ExecBuildAggTrans() builds one large expression that does both argument
     235             :  *    evaluation and transition function invocation. That avoids performance
     236             :  *    issues due to repeated uses of expression evaluation, complications due
     237             :  *    to filter expressions having to be evaluated early, and allows to JIT
     238             :  *    the entire expression into one native function.
     239             :  *
     240             :  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
     241             :  * Portions Copyright (c) 1994, Regents of the University of California
     242             :  *
     243             :  * IDENTIFICATION
     244             :  *    src/backend/executor/nodeAgg.c
     245             :  *
     246             :  *-------------------------------------------------------------------------
     247             :  */
     248             : 
     249             : #include "postgres.h"
     250             : 
     251             : #include "access/htup_details.h"
     252             : #include "access/parallel.h"
     253             : #include "catalog/objectaccess.h"
     254             : #include "catalog/pg_aggregate.h"
     255             : #include "catalog/pg_proc.h"
     256             : #include "catalog/pg_type.h"
     257             : #include "common/hashfn.h"
     258             : #include "executor/execExpr.h"
     259             : #include "executor/executor.h"
     260             : #include "executor/nodeAgg.h"
     261             : #include "lib/hyperloglog.h"
     262             : #include "miscadmin.h"
     263             : #include "nodes/makefuncs.h"
     264             : #include "nodes/nodeFuncs.h"
     265             : #include "optimizer/optimizer.h"
     266             : #include "parser/parse_agg.h"
     267             : #include "parser/parse_coerce.h"
     268             : #include "utils/acl.h"
     269             : #include "utils/builtins.h"
     270             : #include "utils/datum.h"
     271             : #include "utils/dynahash.h"
     272             : #include "utils/expandeddatum.h"
     273             : #include "utils/logtape.h"
     274             : #include "utils/lsyscache.h"
     275             : #include "utils/memutils.h"
     276             : #include "utils/syscache.h"
     277             : #include "utils/tuplesort.h"
     278             : 
     279             : /*
     280             :  * Control how many partitions are created when spilling HashAgg to
     281             :  * disk.
     282             :  *
     283             :  * HASHAGG_PARTITION_FACTOR is multiplied by the estimated number of
     284             :  * partitions needed such that each partition will fit in memory. The factor
     285             :  * is set higher than one because there's not a high cost to having a few too
     286             :  * many partitions, and it makes it less likely that a partition will need to
     287             :  * be spilled recursively. Another benefit of having more, smaller partitions
     288             :  * is that small hash tables may perform better than large ones due to memory
     289             :  * caching effects.
     290             :  *
     291             :  * We also specify a min and max number of partitions per spill. Too few might
     292             :  * mean a lot of wasted I/O from repeated spilling of the same tuples. Too
     293             :  * many will result in lots of memory wasted buffering the spill files (which
     294             :  * could instead be spent on a larger hash table).
     295             :  */
     296             : #define HASHAGG_PARTITION_FACTOR 1.50
     297             : #define HASHAGG_MIN_PARTITIONS 4
     298             : #define HASHAGG_MAX_PARTITIONS 1024
     299             : 
     300             : /*
     301             :  * For reading from tapes, the buffer size must be a multiple of
     302             :  * BLCKSZ. Larger values help when reading from multiple tapes concurrently,
     303             :  * but that doesn't happen in HashAgg, so we simply use BLCKSZ. Writing to a
     304             :  * tape always uses a buffer of size BLCKSZ.
     305             :  */
     306             : #define HASHAGG_READ_BUFFER_SIZE BLCKSZ
     307             : #define HASHAGG_WRITE_BUFFER_SIZE BLCKSZ
     308             : 
     309             : /*
     310             :  * HyperLogLog is used for estimating the cardinality of the spilled tuples in
     311             :  * a given partition. 5 bits corresponds to a size of about 32 bytes and a
     312             :  * worst-case error of around 18%. That's effective enough to choose a
     313             :  * reasonable number of partitions when recursing.
     314             :  */
     315             : #define HASHAGG_HLL_BIT_WIDTH 5
     316             : 
     317             : /*
     318             :  * Estimate chunk overhead as a constant 16 bytes. XXX: should this be
     319             :  * improved?
     320             :  */
     321             : #define CHUNKHDRSZ 16
     322             : 
     323             : /*
     324             :  * Represents partitioned spill data for a single hashtable. Contains the
     325             :  * necessary information to route tuples to the correct partition, and to
     326             :  * transform the spilled data into new batches.
     327             :  *
     328             :  * The high bits are used for partition selection (when recursing, we ignore
     329             :  * the bits that have already been used for partition selection at an earlier
     330             :  * level).
     331             :  */
     332             : typedef struct HashAggSpill
     333             : {
     334             :     int         npartitions;    /* number of partitions */
     335             :     LogicalTape **partitions;   /* spill partition tapes */
     336             :     int64      *ntuples;        /* number of tuples in each partition */
     337             :     uint32      mask;           /* mask to find partition from hash value */
     338             :     int         shift;          /* after masking, shift by this amount */
     339             :     hyperLogLogState *hll_card; /* cardinality estimate for contents */
     340             : } HashAggSpill;
     341             : 
     342             : /*
     343             :  * Represents work to be done for one pass of hash aggregation (with only one
     344             :  * grouping set).
     345             :  *
     346             :  * Also tracks the bits of the hash already used for partition selection by
     347             :  * earlier iterations, so that this batch can use new bits. If all bits have
     348             :  * already been used, no partitioning will be done (any spilled data will go
     349             :  * to a single output tape).
     350             :  */
     351             : typedef struct HashAggBatch
     352             : {
     353             :     int         setno;          /* grouping set */
     354             :     int         used_bits;      /* number of bits of hash already used */
     355             :     LogicalTape *input_tape;    /* input partition tape */
     356             :     int64       input_tuples;   /* number of tuples in this batch */
     357             :     double      input_card;     /* estimated group cardinality */
     358             : } HashAggBatch;
     359             : 
     360             : /* used to find referenced colnos */
     361             : typedef struct FindColsContext
     362             : {
     363             :     bool        is_aggref;      /* is under an aggref */
     364             :     Bitmapset  *aggregated;     /* column references under an aggref */
     365             :     Bitmapset  *unaggregated;   /* other column references */
     366             : } FindColsContext;
     367             : 
     368             : static void select_current_set(AggState *aggstate, int setno, bool is_hash);
     369             : static void initialize_phase(AggState *aggstate, int newphase);
     370             : static TupleTableSlot *fetch_input_tuple(AggState *aggstate);
     371             : static void initialize_aggregates(AggState *aggstate,
     372             :                                   AggStatePerGroup *pergroups,
     373             :                                   int numReset);
     374             : static void advance_transition_function(AggState *aggstate,
     375             :                                         AggStatePerTrans pertrans,
     376             :                                         AggStatePerGroup pergroupstate);
     377             : static void advance_aggregates(AggState *aggstate);
     378             : static void process_ordered_aggregate_single(AggState *aggstate,
     379             :                                              AggStatePerTrans pertrans,
     380             :                                              AggStatePerGroup pergroupstate);
     381             : static void process_ordered_aggregate_multi(AggState *aggstate,
     382             :                                             AggStatePerTrans pertrans,
     383             :                                             AggStatePerGroup pergroupstate);
     384             : static void finalize_aggregate(AggState *aggstate,
     385             :                                AggStatePerAgg peragg,
     386             :                                AggStatePerGroup pergroupstate,
     387             :                                Datum *resultVal, bool *resultIsNull);
     388             : static void finalize_partialaggregate(AggState *aggstate,
     389             :                                       AggStatePerAgg peragg,
     390             :                                       AggStatePerGroup pergroupstate,
     391             :                                       Datum *resultVal, bool *resultIsNull);
     392             : static inline void prepare_hash_slot(AggStatePerHash perhash,
     393             :                                      TupleTableSlot *inputslot,
     394             :                                      TupleTableSlot *hashslot);
     395             : static void prepare_projection_slot(AggState *aggstate,
     396             :                                     TupleTableSlot *slot,
     397             :                                     int currentSet);
     398             : static void finalize_aggregates(AggState *aggstate,
     399             :                                 AggStatePerAgg peragg,
     400             :                                 AggStatePerGroup pergroup);
     401             : static TupleTableSlot *project_aggregates(AggState *aggstate);
     402             : static void find_cols(AggState *aggstate, Bitmapset **aggregated,
     403             :                       Bitmapset **unaggregated);
     404             : static bool find_cols_walker(Node *node, FindColsContext *context);
     405             : static void build_hash_tables(AggState *aggstate);
     406             : static void build_hash_table(AggState *aggstate, int setno, long nbuckets);
     407             : static void hashagg_recompile_expressions(AggState *aggstate, bool minslot,
     408             :                                           bool nullcheck);
     409             : static long hash_choose_num_buckets(double hashentrysize,
     410             :                                     long estimated_nbuckets,
     411             :                                     Size memory);
     412             : static int  hash_choose_num_partitions(double input_groups,
     413             :                                        double hashentrysize,
     414             :                                        int used_bits,
     415             :                                        int *log2_npartittions);
     416             : static void initialize_hash_entry(AggState *aggstate,
     417             :                                   TupleHashTable hashtable,
     418             :                                   TupleHashEntry entry);
     419             : static void lookup_hash_entries(AggState *aggstate);
     420             : static TupleTableSlot *agg_retrieve_direct(AggState *aggstate);
     421             : static void agg_fill_hash_table(AggState *aggstate);
     422             : static bool agg_refill_hash_table(AggState *aggstate);
     423             : static TupleTableSlot *agg_retrieve_hash_table(AggState *aggstate);
     424             : static TupleTableSlot *agg_retrieve_hash_table_in_memory(AggState *aggstate);
     425             : static void hash_agg_check_limits(AggState *aggstate);
     426             : static void hash_agg_enter_spill_mode(AggState *aggstate);
     427             : static void hash_agg_update_metrics(AggState *aggstate, bool from_tape,
     428             :                                     int npartitions);
     429             : static void hashagg_finish_initial_spills(AggState *aggstate);
     430             : static void hashagg_reset_spill_state(AggState *aggstate);
     431             : static HashAggBatch *hashagg_batch_new(LogicalTape *input_tape, int setno,
     432             :                                        int64 input_tuples, double input_card,
     433             :                                        int used_bits);
     434             : static MinimalTuple hashagg_batch_read(HashAggBatch *batch, uint32 *hashp);
     435             : static void hashagg_spill_init(HashAggSpill *spill, LogicalTapeSet *lts,
     436             :                                int used_bits, double input_groups,
     437             :                                double hashentrysize);
     438             : static Size hashagg_spill_tuple(AggState *aggstate, HashAggSpill *spill,
     439             :                                 TupleTableSlot *slot, uint32 hash);
     440             : static void hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill,
     441             :                                  int setno);
     442             : static Datum GetAggInitVal(Datum textInitVal, Oid transtype);
     443             : static void build_pertrans_for_aggref(AggStatePerTrans pertrans,
     444             :                                       AggState *aggstate, EState *estate,
     445             :                                       Aggref *aggref, Oid transfn_oid,
     446             :                                       Oid aggtranstype, Oid aggserialfn,
     447             :                                       Oid aggdeserialfn, Datum initValue,
     448             :                                       bool initValueIsNull, Oid *inputTypes,
     449             :                                       int numArguments);
     450             : 
     451             : 
     452             : /*
     453             :  * Select the current grouping set; affects current_set and
     454             :  * curaggcontext.
     455             :  */
     456             : static void
     457     7157390 : select_current_set(AggState *aggstate, int setno, bool is_hash)
     458             : {
     459             :     /*
     460             :      * When changing this, also adapt ExecAggPlainTransByVal() and
     461             :      * ExecAggPlainTransByRef().
     462             :      */
     463     7157390 :     if (is_hash)
     464     3760068 :         aggstate->curaggcontext = aggstate->hashcontext;
     465             :     else
     466     3397322 :         aggstate->curaggcontext = aggstate->aggcontexts[setno];
     467             : 
     468     7157390 :     aggstate->current_set = setno;
     469     7157390 : }
     470             : 
     471             : /*
     472             :  * Switch to phase "newphase", which must either be 0 or 1 (to reset) or
     473             :  * current_phase + 1. Juggle the tuplesorts accordingly.
     474             :  *
     475             :  * Phase 0 is for hashing, which we currently handle last in the AGG_MIXED
     476             :  * case, so when entering phase 0, all we need to do is drop open sorts.
     477             :  */
     478             : static void
     479     1530930 : initialize_phase(AggState *aggstate, int newphase)
     480             : {
     481             :     Assert(newphase <= 1 || newphase == aggstate->current_phase + 1);
     482             : 
     483             :     /*
     484             :      * Whatever the previous state, we're now done with whatever input
     485             :      * tuplesort was in use.
     486             :      */
     487     1530930 :     if (aggstate->sort_in)
     488             :     {
     489          28 :         tuplesort_end(aggstate->sort_in);
     490          28 :         aggstate->sort_in = NULL;
     491             :     }
     492             : 
     493     1530930 :     if (newphase <= 1)
     494             :     {
     495             :         /*
     496             :          * Discard any existing output tuplesort.
     497             :          */
     498     1530806 :         if (aggstate->sort_out)
     499             :         {
     500           4 :             tuplesort_end(aggstate->sort_out);
     501           4 :             aggstate->sort_out = NULL;
     502             :         }
     503             :     }
     504             :     else
     505             :     {
     506             :         /*
     507             :          * The old output tuplesort becomes the new input one, and this is the
     508             :          * right time to actually sort it.
     509             :          */
     510         124 :         aggstate->sort_in = aggstate->sort_out;
     511         124 :         aggstate->sort_out = NULL;
     512             :         Assert(aggstate->sort_in);
     513         124 :         tuplesort_performsort(aggstate->sort_in);
     514             :     }
     515             : 
     516             :     /*
     517             :      * If this isn't the last phase, we need to sort appropriately for the
     518             :      * next phase in sequence.
     519             :      */
     520     1530930 :     if (newphase > 0 && newphase < aggstate->numphases - 1)
     521             :     {
     522         156 :         Sort       *sortnode = aggstate->phases[newphase + 1].sortnode;
     523         156 :         PlanState  *outerNode = outerPlanState(aggstate);
     524         156 :         TupleDesc   tupDesc = ExecGetResultType(outerNode);
     525             : 
     526         156 :         aggstate->sort_out = tuplesort_begin_heap(tupDesc,
     527             :                                                   sortnode->numCols,
     528             :                                                   sortnode->sortColIdx,
     529             :                                                   sortnode->sortOperators,
     530             :                                                   sortnode->collations,
     531             :                                                   sortnode->nullsFirst,
     532             :                                                   work_mem,
     533             :                                                   NULL, false);
     534             :     }
     535             : 
     536     1530930 :     aggstate->current_phase = newphase;
     537     1530930 :     aggstate->phase = &aggstate->phases[newphase];
     538     1530930 : }
     539             : 
     540             : /*
     541             :  * Fetch a tuple from either the outer plan (for phase 1) or from the sorter
     542             :  * populated by the previous phase.  Copy it to the sorter for the next phase
     543             :  * if any.
     544             :  *
     545             :  * Callers cannot rely on memory for tuple in returned slot remaining valid
     546             :  * past any subsequently fetched tuple.
     547             :  */
     548             : static TupleTableSlot *
     549    18091484 : fetch_input_tuple(AggState *aggstate)
     550             : {
     551             :     TupleTableSlot *slot;
     552             : 
     553    18091484 :     if (aggstate->sort_in)
     554             :     {
     555             :         /* make sure we check for interrupts in either path through here */
     556      116588 :         CHECK_FOR_INTERRUPTS();
     557      116588 :         if (!tuplesort_gettupleslot(aggstate->sort_in, true, false,
     558             :                                     aggstate->sort_slot, NULL))
     559         124 :             return NULL;
     560      116464 :         slot = aggstate->sort_slot;
     561             :     }
     562             :     else
     563    17974896 :         slot = ExecProcNode(outerPlanState(aggstate));
     564             : 
     565    18091342 :     if (!TupIsNull(slot) && aggstate->sort_out)
     566      116464 :         tuplesort_puttupleslot(aggstate->sort_out, slot);
     567             : 
     568    18091342 :     return slot;
     569             : }
     570             : 
     571             : /*
     572             :  * (Re)Initialize an individual aggregate.
     573             :  *
     574             :  * This function handles only one grouping set, already set in
     575             :  * aggstate->current_set.
     576             :  *
     577             :  * When called, CurrentMemoryContext should be the per-query context.
     578             :  */
     579             : static void
     580     2132294 : initialize_aggregate(AggState *aggstate, AggStatePerTrans pertrans,
     581             :                      AggStatePerGroup pergroupstate)
     582             : {
     583             :     /*
     584             :      * Start a fresh sort operation for each DISTINCT/ORDER BY aggregate.
     585             :      */
     586     2132294 :     if (pertrans->numSortCols > 0)
     587             :     {
     588             :         /*
     589             :          * In case of rescan, maybe there could be an uncompleted sort
     590             :          * operation?  Clean it up if so.
     591             :          */
     592     1459200 :         if (pertrans->sortstates[aggstate->current_set])
     593           0 :             tuplesort_end(pertrans->sortstates[aggstate->current_set]);
     594             : 
     595             : 
     596             :         /*
     597             :          * We use a plain Datum sorter when there's a single input column;
     598             :          * otherwise sort the full tuple.  (See comments for
     599             :          * process_ordered_aggregate_single.)
     600             :          */
     601     1459200 :         if (pertrans->numInputs == 1)
     602             :         {
     603       69446 :             Form_pg_attribute attr = TupleDescAttr(pertrans->sortdesc, 0);
     604             : 
     605       69446 :             pertrans->sortstates[aggstate->current_set] =
     606       69446 :                 tuplesort_begin_datum(attr->atttypid,
     607       69446 :                                       pertrans->sortOperators[0],
     608       69446 :                                       pertrans->sortCollations[0],
     609       69446 :                                       pertrans->sortNullsFirst[0],
     610             :                                       work_mem, NULL, false);
     611             :         }
     612             :         else
     613     1389754 :             pertrans->sortstates[aggstate->current_set] =
     614     1389754 :                 tuplesort_begin_heap(pertrans->sortdesc,
     615             :                                      pertrans->numSortCols,
     616             :                                      pertrans->sortColIdx,
     617             :                                      pertrans->sortOperators,
     618             :                                      pertrans->sortCollations,
     619             :                                      pertrans->sortNullsFirst,
     620             :                                      work_mem, NULL, false);
     621             :     }
     622             : 
     623             :     /*
     624             :      * (Re)set transValue to the initial value.
     625             :      *
     626             :      * Note that when the initial value is pass-by-ref, we must copy it (into
     627             :      * the aggcontext) since we will pfree the transValue later.
     628             :      */
     629     2132294 :     if (pertrans->initValueIsNull)
     630     1799402 :         pergroupstate->transValue = pertrans->initValue;
     631             :     else
     632             :     {
     633             :         MemoryContext oldContext;
     634             : 
     635      332892 :         oldContext = MemoryContextSwitchTo(aggstate->curaggcontext->ecxt_per_tuple_memory);
     636      665784 :         pergroupstate->transValue = datumCopy(pertrans->initValue,
     637      332892 :                                               pertrans->transtypeByVal,
     638      332892 :                                               pertrans->transtypeLen);
     639      332892 :         MemoryContextSwitchTo(oldContext);
     640             :     }
     641     2132294 :     pergroupstate->transValueIsNull = pertrans->initValueIsNull;
     642             : 
     643             :     /*
     644             :      * If the initial value for the transition state doesn't exist in the
     645             :      * pg_aggregate table then we will let the first non-NULL value returned
     646             :      * from the outer procNode become the initial value. (This is useful for
     647             :      * aggregates like max() and min().) The noTransValue flag signals that we
     648             :      * still need to do this.
     649             :      */
     650     2132294 :     pergroupstate->noTransValue = pertrans->initValueIsNull;
     651     2132294 : }
     652             : 
     653             : /*
     654             :  * Initialize all aggregate transition states for a new group of input values.
     655             :  *
     656             :  * If there are multiple grouping sets, we initialize only the first numReset
     657             :  * of them (the grouping sets are ordered so that the most specific one, which
     658             :  * is reset most often, is first). As a convenience, if numReset is 0, we
     659             :  * reinitialize all sets.
     660             :  *
     661             :  * NB: This cannot be used for hash aggregates, as for those the grouping set
     662             :  * number has to be specified from further up.
     663             :  *
     664             :  * When called, CurrentMemoryContext should be the per-query context.
     665             :  */
     666             : static void
     667     1662680 : initialize_aggregates(AggState *aggstate,
     668             :                       AggStatePerGroup *pergroups,
     669             :                       int numReset)
     670             : {
     671             :     int         transno;
     672     1662680 :     int         numGroupingSets = Max(aggstate->phase->numsets, 1);
     673     1662680 :     int         setno = 0;
     674     1662680 :     int         numTrans = aggstate->numtrans;
     675     1662680 :     AggStatePerTrans transstates = aggstate->pertrans;
     676             : 
     677     1662680 :     if (numReset == 0)
     678           0 :         numReset = numGroupingSets;
     679             : 
     680     3334808 :     for (setno = 0; setno < numReset; setno++)
     681             :     {
     682     1672128 :         AggStatePerGroup pergroup = pergroups[setno];
     683             : 
     684     1672128 :         select_current_set(aggstate, setno, false);
     685             : 
     686     3559594 :         for (transno = 0; transno < numTrans; transno++)
     687             :         {
     688     1887466 :             AggStatePerTrans pertrans = &transstates[transno];
     689     1887466 :             AggStatePerGroup pergroupstate = &pergroup[transno];
     690             : 
     691     1887466 :             initialize_aggregate(aggstate, pertrans, pergroupstate);
     692             :         }
     693             :     }
     694     1662680 : }
     695             : 
     696             : /*
     697             :  * Given new input value(s), advance the transition function of one aggregate
     698             :  * state within one grouping set only (already set in aggstate->current_set)
     699             :  *
     700             :  * The new values (and null flags) have been preloaded into argument positions
     701             :  * 1 and up in pertrans->transfn_fcinfo, so that we needn't copy them again to
     702             :  * pass to the transition function.  We also expect that the static fields of
     703             :  * the fcinfo are already initialized; that was done by ExecInitAgg().
     704             :  *
     705             :  * It doesn't matter which memory context this is called in.
     706             :  */
     707             : static void
     708      585562 : advance_transition_function(AggState *aggstate,
     709             :                             AggStatePerTrans pertrans,
     710             :                             AggStatePerGroup pergroupstate)
     711             : {
     712      585562 :     FunctionCallInfo fcinfo = pertrans->transfn_fcinfo;
     713             :     MemoryContext oldContext;
     714             :     Datum       newVal;
     715             : 
     716      585562 :     if (pertrans->transfn.fn_strict)
     717             :     {
     718             :         /*
     719             :          * For a strict transfn, nothing happens when there's a NULL input; we
     720             :          * just keep the prior transValue.
     721             :          */
     722       86118 :         int         numTransInputs = pertrans->numTransInputs;
     723             :         int         i;
     724             : 
     725      172284 :         for (i = 1; i <= numTransInputs; i++)
     726             :         {
     727       86166 :             if (fcinfo->args[i].isnull)
     728           0 :                 return;
     729             :         }
     730       86118 :         if (pergroupstate->noTransValue)
     731             :         {
     732             :             /*
     733             :              * transValue has not been initialized. This is the first non-NULL
     734             :              * input value. We use it as the initial value for transValue. (We
     735             :              * already checked that the agg's input type is binary-compatible
     736             :              * with its transtype, so straight copy here is OK.)
     737             :              *
     738             :              * We must copy the datum into aggcontext if it is pass-by-ref. We
     739             :              * do not need to pfree the old transValue, since it's NULL.
     740             :              */
     741           8 :             oldContext = MemoryContextSwitchTo(aggstate->curaggcontext->ecxt_per_tuple_memory);
     742          16 :             pergroupstate->transValue = datumCopy(fcinfo->args[1].value,
     743           8 :                                                   pertrans->transtypeByVal,
     744           8 :                                                   pertrans->transtypeLen);
     745           8 :             pergroupstate->transValueIsNull = false;
     746           8 :             pergroupstate->noTransValue = false;
     747           8 :             MemoryContextSwitchTo(oldContext);
     748           8 :             return;
     749             :         }
     750       86110 :         if (pergroupstate->transValueIsNull)
     751             :         {
     752             :             /*
     753             :              * Don't call a strict function with NULL inputs.  Note it is
     754             :              * possible to get here despite the above tests, if the transfn is
     755             :              * strict *and* returned a NULL on a prior cycle. If that happens
     756             :              * we will propagate the NULL all the way to the end.
     757             :              */
     758           0 :             return;
     759             :         }
     760             :     }
     761             : 
     762             :     /* We run the transition functions in per-input-tuple memory context */
     763      585554 :     oldContext = MemoryContextSwitchTo(aggstate->tmpcontext->ecxt_per_tuple_memory);
     764             : 
     765             :     /* set up aggstate->curpertrans for AggGetAggref() */
     766      585554 :     aggstate->curpertrans = pertrans;
     767             : 
     768             :     /*
     769             :      * OK to call the transition function
     770             :      */
     771      585554 :     fcinfo->args[0].value = pergroupstate->transValue;
     772      585554 :     fcinfo->args[0].isnull = pergroupstate->transValueIsNull;
     773      585554 :     fcinfo->isnull = false;      /* just in case transfn doesn't set it */
     774             : 
     775      585554 :     newVal = FunctionCallInvoke(fcinfo);
     776             : 
     777      585554 :     aggstate->curpertrans = NULL;
     778             : 
     779             :     /*
     780             :      * If pass-by-ref datatype, must copy the new value into aggcontext and
     781             :      * free the prior transValue.  But if transfn returned a pointer to its
     782             :      * first input, we don't need to do anything.  Also, if transfn returned a
     783             :      * pointer to a R/W expanded object that is already a child of the
     784             :      * aggcontext, assume we can adopt that value without copying it.
     785             :      *
     786             :      * It's safe to compare newVal with pergroup->transValue without regard
     787             :      * for either being NULL, because ExecAggTransReparent() takes care to set
     788             :      * transValue to 0 when NULL. Otherwise we could end up accidentally not
     789             :      * reparenting, when the transValue has the same numerical value as
     790             :      * newValue, despite being NULL.  This is a somewhat hot path, making it
     791             :      * undesirable to instead solve this with another branch for the common
     792             :      * case of the transition function returning its (modified) input
     793             :      * argument.
     794             :      */
     795      585554 :     if (!pertrans->transtypeByVal &&
     796         240 :         DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
     797         240 :         newVal = ExecAggTransReparent(aggstate, pertrans,
     798         240 :                                       newVal, fcinfo->isnull,
     799             :                                       pergroupstate->transValue,
     800         240 :                                       pergroupstate->transValueIsNull);
     801             : 
     802      585554 :     pergroupstate->transValue = newVal;
     803      585554 :     pergroupstate->transValueIsNull = fcinfo->isnull;
     804             : 
     805      585554 :     MemoryContextSwitchTo(oldContext);
     806             : }
     807             : 
     808             : /*
     809             :  * Advance each aggregate transition state for one input tuple.  The input
     810             :  * tuple has been stored in tmpcontext->ecxt_outertuple, so that it is
     811             :  * accessible to ExecEvalExpr.
     812             :  *
     813             :  * We have two sets of transition states to handle: one for sorted aggregation
     814             :  * and one for hashed; we do them both here, to avoid multiple evaluation of
     815             :  * the inputs.
     816             :  *
     817             :  * When called, CurrentMemoryContext should be the per-query context.
     818             :  */
     819             : static void
     820    16745898 : advance_aggregates(AggState *aggstate)
     821             : {
     822             :     bool        dummynull;
     823             : 
     824    16745898 :     ExecEvalExprSwitchContext(aggstate->phase->evaltrans,
     825             :                               aggstate->tmpcontext,
     826             :                               &dummynull);
     827    16745870 : }
     828             : 
     829             : /*
     830             :  * Run the transition function for a DISTINCT or ORDER BY aggregate
     831             :  * with only one input.  This is called after we have completed
     832             :  * entering all the input values into the sort object.  We complete the
     833             :  * sort, read out the values in sorted order, and run the transition
     834             :  * function on each value (applying DISTINCT if appropriate).
     835             :  *
     836             :  * Note that the strictness of the transition function was checked when
     837             :  * entering the values into the sort, so we don't check it again here;
     838             :  * we just apply standard SQL DISTINCT logic.
     839             :  *
     840             :  * The one-input case is handled separately from the multi-input case
     841             :  * for performance reasons: for single by-value inputs, such as the
     842             :  * common case of count(distinct id), the tuplesort_getdatum code path
     843             :  * is around 300% faster.  (The speedup for by-reference types is less
     844             :  * but still noticeable.)
     845             :  *
     846             :  * This function handles only one grouping set (already set in
     847             :  * aggstate->current_set).
     848             :  *
     849             :  * When called, CurrentMemoryContext should be the per-query context.
     850             :  */
     851             : static void
     852       69446 : process_ordered_aggregate_single(AggState *aggstate,
     853             :                                  AggStatePerTrans pertrans,
     854             :                                  AggStatePerGroup pergroupstate)
     855             : {
     856       69446 :     Datum       oldVal = (Datum) 0;
     857       69446 :     bool        oldIsNull = true;
     858       69446 :     bool        haveOldVal = false;
     859       69446 :     MemoryContext workcontext = aggstate->tmpcontext->ecxt_per_tuple_memory;
     860             :     MemoryContext oldContext;
     861       69446 :     bool        isDistinct = (pertrans->numDistinctCols > 0);
     862       69446 :     Datum       newAbbrevVal = (Datum) 0;
     863       69446 :     Datum       oldAbbrevVal = (Datum) 0;
     864       69446 :     FunctionCallInfo fcinfo = pertrans->transfn_fcinfo;
     865             :     Datum      *newVal;
     866             :     bool       *isNull;
     867             : 
     868             :     Assert(pertrans->numDistinctCols < 2);
     869             : 
     870       69446 :     tuplesort_performsort(pertrans->sortstates[aggstate->current_set]);
     871             : 
     872             :     /* Load the column into argument 1 (arg 0 will be transition value) */
     873       69446 :     newVal = &fcinfo->args[1].value;
     874       69446 :     isNull = &fcinfo->args[1].isnull;
     875             : 
     876             :     /*
     877             :      * Note: if input type is pass-by-ref, the datums returned by the sort are
     878             :      * freshly palloc'd in the per-query context, so we must be careful to
     879             :      * pfree them when they are no longer needed.
     880             :      */
     881             : 
     882      881092 :     while (tuplesort_getdatum(pertrans->sortstates[aggstate->current_set],
     883             :                               true, newVal, isNull, &newAbbrevVal))
     884             :     {
     885             :         /*
     886             :          * Clear and select the working context for evaluation of the equality
     887             :          * function and transition function.
     888             :          */
     889      811646 :         MemoryContextReset(workcontext);
     890      811646 :         oldContext = MemoryContextSwitchTo(workcontext);
     891             : 
     892             :         /*
     893             :          * If DISTINCT mode, and not distinct from prior, skip it.
     894             :          */
     895      811646 :         if (isDistinct &&
     896      283396 :             haveOldVal &&
     897           4 :             ((oldIsNull && *isNull) ||
     898      283396 :              (!oldIsNull && !*isNull &&
     899      566744 :               oldAbbrevVal == newAbbrevVal &&
     900      283370 :               DatumGetBool(FunctionCall2Coll(&pertrans->equalfnOne,
     901             :                                              pertrans->aggCollation,
     902             :                                              oldVal, *newVal)))))
     903             :         {
     904             :             /* equal to prior, so forget this one */
     905      243932 :             if (!pertrans->inputtypeByVal && !*isNull)
     906      124600 :                 pfree(DatumGetPointer(*newVal));
     907             :         }
     908             :         else
     909             :         {
     910      567714 :             advance_transition_function(aggstate, pertrans, pergroupstate);
     911             :             /* forget the old value, if any */
     912      567714 :             if (!oldIsNull && !pertrans->inputtypeByVal)
     913      326288 :                 pfree(DatumGetPointer(oldVal));
     914             :             /* and remember the new one for subsequent equality checks */
     915      567714 :             oldVal = *newVal;
     916      567714 :             oldAbbrevVal = newAbbrevVal;
     917      567714 :             oldIsNull = *isNull;
     918      567714 :             haveOldVal = true;
     919             :         }
     920             : 
     921      811646 :         MemoryContextSwitchTo(oldContext);
     922             :     }
     923             : 
     924       69446 :     if (!oldIsNull && !pertrans->inputtypeByVal)
     925         166 :         pfree(DatumGetPointer(oldVal));
     926             : 
     927       69446 :     tuplesort_end(pertrans->sortstates[aggstate->current_set]);
     928       69446 :     pertrans->sortstates[aggstate->current_set] = NULL;
     929       69446 : }
     930             : 
     931             : /*
     932             :  * Run the transition function for a DISTINCT or ORDER BY aggregate
     933             :  * with more than one input.  This is called after we have completed
     934             :  * entering all the input values into the sort object.  We complete the
     935             :  * sort, read out the values in sorted order, and run the transition
     936             :  * function on each value (applying DISTINCT if appropriate).
     937             :  *
     938             :  * This function handles only one grouping set (already set in
     939             :  * aggstate->current_set).
     940             :  *
     941             :  * When called, CurrentMemoryContext should be the per-query context.
     942             :  */
     943             : static void
     944     1389754 : process_ordered_aggregate_multi(AggState *aggstate,
     945             :                                 AggStatePerTrans pertrans,
     946             :                                 AggStatePerGroup pergroupstate)
     947             : {
     948     1389754 :     ExprContext *tmpcontext = aggstate->tmpcontext;
     949     1389754 :     FunctionCallInfo fcinfo = pertrans->transfn_fcinfo;
     950     1389754 :     TupleTableSlot *slot1 = pertrans->sortslot;
     951     1389754 :     TupleTableSlot *slot2 = pertrans->uniqslot;
     952     1389754 :     int         numTransInputs = pertrans->numTransInputs;
     953     1389754 :     int         numDistinctCols = pertrans->numDistinctCols;
     954     1389754 :     Datum       newAbbrevVal = (Datum) 0;
     955     1389754 :     Datum       oldAbbrevVal = (Datum) 0;
     956     1389754 :     bool        haveOldValue = false;
     957     1389754 :     TupleTableSlot *save = aggstate->tmpcontext->ecxt_outertuple;
     958             :     int         i;
     959             : 
     960     1389754 :     tuplesort_performsort(pertrans->sortstates[aggstate->current_set]);
     961             : 
     962     1389754 :     ExecClearTuple(slot1);
     963     1389754 :     if (slot2)
     964          56 :         ExecClearTuple(slot2);
     965             : 
     966     1407874 :     while (tuplesort_gettupleslot(pertrans->sortstates[aggstate->current_set],
     967             :                                   true, true, slot1, &newAbbrevVal))
     968             :     {
     969       18120 :         CHECK_FOR_INTERRUPTS();
     970             : 
     971       18120 :         tmpcontext->ecxt_outertuple = slot1;
     972       18120 :         tmpcontext->ecxt_innertuple = slot2;
     973             : 
     974       18120 :         if (numDistinctCols == 0 ||
     975         472 :             !haveOldValue ||
     976         416 :             newAbbrevVal != oldAbbrevVal ||
     977         392 :             !ExecQual(pertrans->equalfnMulti, tmpcontext))
     978             :         {
     979             :             /*
     980             :              * Extract the first numTransInputs columns as datums to pass to
     981             :              * the transfn.
     982             :              */
     983       17848 :             slot_getsomeattrs(slot1, numTransInputs);
     984             : 
     985             :             /* Load values into fcinfo */
     986             :             /* Start from 1, since the 0th arg will be the transition value */
     987       36916 :             for (i = 0; i < numTransInputs; i++)
     988             :             {
     989       19068 :                 fcinfo->args[i + 1].value = slot1->tts_values[i];
     990       19068 :                 fcinfo->args[i + 1].isnull = slot1->tts_isnull[i];
     991             :             }
     992             : 
     993       17848 :             advance_transition_function(aggstate, pertrans, pergroupstate);
     994             : 
     995       17848 :             if (numDistinctCols > 0)
     996             :             {
     997             :                 /* swap the slot pointers to retain the current tuple */
     998         200 :                 TupleTableSlot *tmpslot = slot2;
     999             : 
    1000         200 :                 slot2 = slot1;
    1001         200 :                 slot1 = tmpslot;
    1002             :                 /* avoid ExecQual() calls by reusing abbreviated keys */
    1003         200 :                 oldAbbrevVal = newAbbrevVal;
    1004         200 :                 haveOldValue = true;
    1005             :             }
    1006             :         }
    1007             : 
    1008             :         /* Reset context each time */
    1009       18120 :         ResetExprContext(tmpcontext);
    1010             : 
    1011       18120 :         ExecClearTuple(slot1);
    1012             :     }
    1013             : 
    1014     1389754 :     if (slot2)
    1015          56 :         ExecClearTuple(slot2);
    1016             : 
    1017     1389754 :     tuplesort_end(pertrans->sortstates[aggstate->current_set]);
    1018     1389754 :     pertrans->sortstates[aggstate->current_set] = NULL;
    1019             : 
    1020             :     /* restore previous slot, potentially in use for grouping sets */
    1021     1389754 :     tmpcontext->ecxt_outertuple = save;
    1022     1389754 : }
    1023             : 
    1024             : /*
    1025             :  * Compute the final value of one aggregate.
    1026             :  *
    1027             :  * This function handles only one grouping set (already set in
    1028             :  * aggstate->current_set).
    1029             :  *
    1030             :  * The finalfn will be run, and the result delivered, in the
    1031             :  * output-tuple context; caller's CurrentMemoryContext does not matter.
    1032             :  *
    1033             :  * The finalfn uses the state as set in the transno. This also might be
    1034             :  * being used by another aggregate function, so it's important that we do
    1035             :  * nothing destructive here.
    1036             :  */
    1037             : static void
    1038     2127084 : finalize_aggregate(AggState *aggstate,
    1039             :                    AggStatePerAgg peragg,
    1040             :                    AggStatePerGroup pergroupstate,
    1041             :                    Datum *resultVal, bool *resultIsNull)
    1042             : {
    1043     2127084 :     LOCAL_FCINFO(fcinfo, FUNC_MAX_ARGS);
    1044     2127084 :     bool        anynull = false;
    1045             :     MemoryContext oldContext;
    1046             :     int         i;
    1047             :     ListCell   *lc;
    1048     2127084 :     AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
    1049             : 
    1050     2127084 :     oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
    1051             : 
    1052             :     /*
    1053             :      * Evaluate any direct arguments.  We do this even if there's no finalfn
    1054             :      * (which is unlikely anyway), so that side-effects happen as expected.
    1055             :      * The direct arguments go into arg positions 1 and up, leaving position 0
    1056             :      * for the transition state value.
    1057             :      */
    1058     2127084 :     i = 1;
    1059     2127748 :     foreach(lc, peragg->aggdirectargs)
    1060             :     {
    1061         664 :         ExprState  *expr = (ExprState *) lfirst(lc);
    1062             : 
    1063         664 :         fcinfo->args[i].value = ExecEvalExpr(expr,
    1064             :                                              aggstate->ss.ps.ps_ExprContext,
    1065             :                                              &fcinfo->args[i].isnull);
    1066         664 :         anynull |= fcinfo->args[i].isnull;
    1067         664 :         i++;
    1068             :     }
    1069             : 
    1070             :     /*
    1071             :      * Apply the agg's finalfn if one is provided, else return transValue.
    1072             :      */
    1073     2127084 :     if (OidIsValid(peragg->finalfn_oid))
    1074             :     {
    1075     1610380 :         int         numFinalArgs = peragg->numFinalArgs;
    1076             : 
    1077             :         /* set up aggstate->curperagg for AggGetAggref() */
    1078     1610380 :         aggstate->curperagg = peragg;
    1079             : 
    1080     1610380 :         InitFunctionCallInfoData(*fcinfo, &peragg->finalfn,
    1081             :                                  numFinalArgs,
    1082             :                                  pertrans->aggCollation,
    1083             :                                  (void *) aggstate, NULL);
    1084             : 
    1085             :         /* Fill in the transition state value */
    1086     1610380 :         fcinfo->args[0].value =
    1087     1610380 :             MakeExpandedObjectReadOnly(pergroupstate->transValue,
    1088             :                                        pergroupstate->transValueIsNull,
    1089             :                                        pertrans->transtypeLen);
    1090     1610380 :         fcinfo->args[0].isnull = pergroupstate->transValueIsNull;
    1091     1610380 :         anynull |= pergroupstate->transValueIsNull;
    1092             : 
    1093             :         /* Fill any remaining argument positions with nulls */
    1094     3097446 :         for (; i < numFinalArgs; i++)
    1095             :         {
    1096     1487066 :             fcinfo->args[i].value = (Datum) 0;
    1097     1487066 :             fcinfo->args[i].isnull = true;
    1098     1487066 :             anynull = true;
    1099             :         }
    1100             : 
    1101     1610380 :         if (fcinfo->flinfo->fn_strict && anynull)
    1102             :         {
    1103             :             /* don't call a strict function with NULL inputs */
    1104           0 :             *resultVal = (Datum) 0;
    1105           0 :             *resultIsNull = true;
    1106             :         }
    1107             :         else
    1108             :         {
    1109     1610380 :             *resultVal = FunctionCallInvoke(fcinfo);
    1110     1610380 :             *resultIsNull = fcinfo->isnull;
    1111             :         }
    1112     1610380 :         aggstate->curperagg = NULL;
    1113             :     }
    1114             :     else
    1115             :     {
    1116             :         /* Don't need MakeExpandedObjectReadOnly; datumCopy will copy it */
    1117      516704 :         *resultVal = pergroupstate->transValue;
    1118      516704 :         *resultIsNull = pergroupstate->transValueIsNull;
    1119             :     }
    1120             : 
    1121             :     /*
    1122             :      * If result is pass-by-ref, make sure it is in the right context.
    1123             :      */
    1124     2127084 :     if (!peragg->resulttypeByVal && !*resultIsNull &&
    1125      249180 :         !MemoryContextContains(CurrentMemoryContext,
    1126      249180 :                                DatumGetPointer(*resultVal)))
    1127       34214 :         *resultVal = datumCopy(*resultVal,
    1128       34214 :                                peragg->resulttypeByVal,
    1129       34214 :                                peragg->resulttypeLen);
    1130             : 
    1131     2127084 :     MemoryContextSwitchTo(oldContext);
    1132     2127084 : }
    1133             : 
    1134             : /*
    1135             :  * Compute the output value of one partial aggregate.
    1136             :  *
    1137             :  * The serialization function will be run, and the result delivered, in the
    1138             :  * output-tuple context; caller's CurrentMemoryContext does not matter.
    1139             :  */
    1140             : static void
    1141        7518 : finalize_partialaggregate(AggState *aggstate,
    1142             :                           AggStatePerAgg peragg,
    1143             :                           AggStatePerGroup pergroupstate,
    1144             :                           Datum *resultVal, bool *resultIsNull)
    1145             : {
    1146        7518 :     AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
    1147             :     MemoryContext oldContext;
    1148             : 
    1149        7518 :     oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
    1150             : 
    1151             :     /*
    1152             :      * serialfn_oid will be set if we must serialize the transvalue before
    1153             :      * returning it
    1154             :      */
    1155        7518 :     if (OidIsValid(pertrans->serialfn_oid))
    1156             :     {
    1157             :         /* Don't call a strict serialization function with NULL input. */
    1158         124 :         if (pertrans->serialfn.fn_strict && pergroupstate->transValueIsNull)
    1159             :         {
    1160          80 :             *resultVal = (Datum) 0;
    1161          80 :             *resultIsNull = true;
    1162             :         }
    1163             :         else
    1164             :         {
    1165          44 :             FunctionCallInfo fcinfo = pertrans->serialfn_fcinfo;
    1166             : 
    1167          44 :             fcinfo->args[0].value =
    1168          44 :                 MakeExpandedObjectReadOnly(pergroupstate->transValue,
    1169             :                                            pergroupstate->transValueIsNull,
    1170             :                                            pertrans->transtypeLen);
    1171          44 :             fcinfo->args[0].isnull = pergroupstate->transValueIsNull;
    1172          44 :             fcinfo->isnull = false;
    1173             : 
    1174          44 :             *resultVal = FunctionCallInvoke(fcinfo);
    1175          44 :             *resultIsNull = fcinfo->isnull;
    1176             :         }
    1177             :     }
    1178             :     else
    1179             :     {
    1180             :         /* Don't need MakeExpandedObjectReadOnly; datumCopy will copy it */
    1181        7394 :         *resultVal = pergroupstate->transValue;
    1182        7394 :         *resultIsNull = pergroupstate->transValueIsNull;
    1183             :     }
    1184             : 
    1185             :     /* If result is pass-by-ref, make sure it is in the right context. */
    1186        7518 :     if (!peragg->resulttypeByVal && !*resultIsNull &&
    1187        2360 :         !MemoryContextContains(CurrentMemoryContext,
    1188        2360 :                                DatumGetPointer(*resultVal)))
    1189        2316 :         *resultVal = datumCopy(*resultVal,
    1190        2316 :                                peragg->resulttypeByVal,
    1191        2316 :                                peragg->resulttypeLen);
    1192             : 
    1193        7518 :     MemoryContextSwitchTo(oldContext);
    1194        7518 : }
    1195             : 
    1196             : /*
    1197             :  * Extract the attributes that make up the grouping key into the
    1198             :  * hashslot. This is necessary to compute the hash or perform a lookup.
    1199             :  */
    1200             : static inline void
    1201     4167762 : prepare_hash_slot(AggStatePerHash perhash,
    1202             :                   TupleTableSlot *inputslot,
    1203             :                   TupleTableSlot *hashslot)
    1204             : {
    1205             :     int         i;
    1206             : 
    1207             :     /* transfer just the needed columns into hashslot */
    1208     4167762 :     slot_getsomeattrs(inputslot, perhash->largestGrpColIdx);
    1209     4167762 :     ExecClearTuple(hashslot);
    1210             : 
    1211    10262626 :     for (i = 0; i < perhash->numhashGrpCols; i++)
    1212             :     {
    1213     6094864 :         int         varNumber = perhash->hashGrpColIdxInput[i] - 1;
    1214             : 
    1215     6094864 :         hashslot->tts_values[i] = inputslot->tts_values[varNumber];
    1216     6094864 :         hashslot->tts_isnull[i] = inputslot->tts_isnull[varNumber];
    1217             :     }
    1218     4167762 :     ExecStoreVirtualTuple(hashslot);
    1219     4167762 : }
    1220             : 
    1221             : /*
    1222             :  * Prepare to finalize and project based on the specified representative tuple
    1223             :  * slot and grouping set.
    1224             :  *
    1225             :  * In the specified tuple slot, force to null all attributes that should be
    1226             :  * read as null in the context of the current grouping set.  Also stash the
    1227             :  * current group bitmap where GroupingExpr can get at it.
    1228             :  *
    1229             :  * This relies on three conditions:
    1230             :  *
    1231             :  * 1) Nothing is ever going to try and extract the whole tuple from this slot,
    1232             :  * only reference it in evaluations, which will only access individual
    1233             :  * attributes.
    1234             :  *
    1235             :  * 2) No system columns are going to need to be nulled. (If a system column is
    1236             :  * referenced in a group clause, it is actually projected in the outer plan
    1237             :  * tlist.)
    1238             :  *
    1239             :  * 3) Within a given phase, we never need to recover the value of an attribute
    1240             :  * once it has been set to null.
    1241             :  *
    1242             :  * Poking into the slot this way is a bit ugly, but the consensus is that the
    1243             :  * alternative was worse.
    1244             :  */
    1245             : static void
    1246     2024406 : prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet)
    1247             : {
    1248     2024406 :     if (aggstate->phase->grouped_cols)
    1249             :     {
    1250      371858 :         Bitmapset  *grouped_cols = aggstate->phase->grouped_cols[currentSet];
    1251             : 
    1252      371858 :         aggstate->grouped_cols = grouped_cols;
    1253             : 
    1254      371858 :         if (TTS_EMPTY(slot))
    1255             :         {
    1256             :             /*
    1257             :              * Force all values to be NULL if working on an empty input tuple
    1258             :              * (i.e. an empty grouping set for which no input rows were
    1259             :              * supplied).
    1260             :              */
    1261          32 :             ExecStoreAllNullTuple(slot);
    1262             :         }
    1263      371826 :         else if (aggstate->all_grouped_cols)
    1264             :         {
    1265             :             ListCell   *lc;
    1266             : 
    1267             :             /* all_grouped_cols is arranged in desc order */
    1268      371790 :             slot_getsomeattrs(slot, linitial_int(aggstate->all_grouped_cols));
    1269             : 
    1270     1032658 :             foreach(lc, aggstate->all_grouped_cols)
    1271             :             {
    1272      660868 :                 int         attnum = lfirst_int(lc);
    1273             : 
    1274      660868 :                 if (!bms_is_member(attnum, grouped_cols))
    1275       38268 :                     slot->tts_isnull[attnum - 1] = true;
    1276             :             }
    1277             :         }
    1278             :     }
    1279     2024406 : }
    1280             : 
    1281             : /*
    1282             :  * Compute the final value of all aggregates for one group.
    1283             :  *
    1284             :  * This function handles only one grouping set at a time, which the caller must
    1285             :  * have selected.  It's also the caller's responsibility to adjust the supplied
    1286             :  * pergroup parameter to point to the current set's transvalues.
    1287             :  *
    1288             :  * Results are stored in the output econtext aggvalues/aggnulls.
    1289             :  */
    1290             : static void
    1291     2024406 : finalize_aggregates(AggState *aggstate,
    1292             :                     AggStatePerAgg peraggs,
    1293             :                     AggStatePerGroup pergroup)
    1294             : {
    1295     2024406 :     ExprContext *econtext = aggstate->ss.ps.ps_ExprContext;
    1296     2024406 :     Datum      *aggvalues = econtext->ecxt_aggvalues;
    1297     2024406 :     bool       *aggnulls = econtext->ecxt_aggnulls;
    1298             :     int         aggno;
    1299             :     int         transno;
    1300             : 
    1301             :     /*
    1302             :      * If there were any DISTINCT and/or ORDER BY aggregates, sort their
    1303             :      * inputs and run the transition functions.
    1304             :      */
    1305     4158836 :     for (transno = 0; transno < aggstate->numtrans; transno++)
    1306             :     {
    1307     2134430 :         AggStatePerTrans pertrans = &aggstate->pertrans[transno];
    1308             :         AggStatePerGroup pergroupstate;
    1309             : 
    1310     2134430 :         pergroupstate = &pergroup[transno];
    1311             : 
    1312     2134430 :         if (pertrans->numSortCols > 0)
    1313             :         {
    1314             :             Assert(aggstate->aggstrategy != AGG_HASHED &&
    1315             :                    aggstate->aggstrategy != AGG_MIXED);
    1316             : 
    1317     1459200 :             if (pertrans->numInputs == 1)
    1318       69446 :                 process_ordered_aggregate_single(aggstate,
    1319             :                                                  pertrans,
    1320             :                                                  pergroupstate);
    1321             :             else
    1322     1389754 :                 process_ordered_aggregate_multi(aggstate,
    1323             :                                                 pertrans,
    1324             :                                                 pergroupstate);
    1325             :         }
    1326             :     }
    1327             : 
    1328             :     /*
    1329             :      * Run the final functions.
    1330             :      */
    1331     4159008 :     for (aggno = 0; aggno < aggstate->numaggs; aggno++)
    1332             :     {
    1333     2134602 :         AggStatePerAgg peragg = &peraggs[aggno];
    1334     2134602 :         int         transno = peragg->transno;
    1335             :         AggStatePerGroup pergroupstate;
    1336             : 
    1337     2134602 :         pergroupstate = &pergroup[transno];
    1338             : 
    1339     2134602 :         if (DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit))
    1340        7518 :             finalize_partialaggregate(aggstate, peragg, pergroupstate,
    1341        7518 :                                       &aggvalues[aggno], &aggnulls[aggno]);
    1342             :         else
    1343     2127084 :             finalize_aggregate(aggstate, peragg, pergroupstate,
    1344     2127084 :                                &aggvalues[aggno], &aggnulls[aggno]);
    1345             :     }
    1346     2024406 : }
    1347             : 
    1348             : /*
    1349             :  * Project the result of a group (whose aggs have already been calculated by
    1350             :  * finalize_aggregates). Returns the result slot, or NULL if no row is
    1351             :  * projected (suppressed by qual).
    1352             :  */
    1353             : static TupleTableSlot *
    1354     2024406 : project_aggregates(AggState *aggstate)
    1355             : {
    1356     2024406 :     ExprContext *econtext = aggstate->ss.ps.ps_ExprContext;
    1357             : 
    1358             :     /*
    1359             :      * Check the qual (HAVING clause); if the group does not match, ignore it.
    1360             :      */
    1361     2024406 :     if (ExecQual(aggstate->ss.ps.qual, econtext))
    1362             :     {
    1363             :         /*
    1364             :          * Form and return projection tuple using the aggregate results and
    1365             :          * the representative input tuple.
    1366             :          */
    1367     1973410 :         return ExecProject(aggstate->ss.ps.ps_ProjInfo);
    1368             :     }
    1369             :     else
    1370       50996 :         InstrCountFiltered1(aggstate, 1);
    1371             : 
    1372       50996 :     return NULL;
    1373             : }
    1374             : 
    1375             : /*
    1376             :  * Find input-tuple columns that are needed, dividing them into
    1377             :  * aggregated and unaggregated sets.
    1378             :  */
    1379             : static void
    1380        5038 : find_cols(AggState *aggstate, Bitmapset **aggregated, Bitmapset **unaggregated)
    1381             : {
    1382        5038 :     Agg        *agg = (Agg *) aggstate->ss.ps.plan;
    1383             :     FindColsContext context;
    1384             : 
    1385        5038 :     context.is_aggref = false;
    1386        5038 :     context.aggregated = NULL;
    1387        5038 :     context.unaggregated = NULL;
    1388             : 
    1389             :     /* Examine tlist and quals */
    1390        5038 :     (void) find_cols_walker((Node *) agg->plan.targetlist, &context);
    1391        5038 :     (void) find_cols_walker((Node *) agg->plan.qual, &context);
    1392             : 
    1393             :     /* In some cases, grouping columns will not appear in the tlist */
    1394       12062 :     for (int i = 0; i < agg->numCols; i++)
    1395        7024 :         context.unaggregated = bms_add_member(context.unaggregated,
    1396        7024 :                                               agg->grpColIdx[i]);
    1397             : 
    1398        5038 :     *aggregated = context.aggregated;
    1399        5038 :     *unaggregated = context.unaggregated;
    1400        5038 : }
    1401             : 
    1402             : static bool
    1403       46926 : find_cols_walker(Node *node, FindColsContext *context)
    1404             : {
    1405       46926 :     if (node == NULL)
    1406        8968 :         return false;
    1407       37958 :     if (IsA(node, Var))
    1408             :     {
    1409       10582 :         Var        *var = (Var *) node;
    1410             : 
    1411             :         /* setrefs.c should have set the varno to OUTER_VAR */
    1412             :         Assert(var->varno == OUTER_VAR);
    1413             :         Assert(var->varlevelsup == 0);
    1414       10582 :         if (context->is_aggref)
    1415        2894 :             context->aggregated = bms_add_member(context->aggregated,
    1416        2894 :                                                  var->varattno);
    1417             :         else
    1418        7688 :             context->unaggregated = bms_add_member(context->unaggregated,
    1419        7688 :                                                    var->varattno);
    1420       10582 :         return false;
    1421             :     }
    1422       27376 :     if (IsA(node, Aggref))
    1423             :     {
    1424             :         Assert(!context->is_aggref);
    1425        4290 :         context->is_aggref = true;
    1426        4290 :         expression_tree_walker(node, find_cols_walker, (void *) context);
    1427        4290 :         context->is_aggref = false;
    1428        4290 :         return false;
    1429             :     }
    1430       23086 :     return expression_tree_walker(node, find_cols_walker,
    1431             :                                   (void *) context);
    1432             : }
    1433             : 
    1434             : /*
    1435             :  * (Re-)initialize the hash table(s) to empty.
    1436             :  *
    1437             :  * To implement hashed aggregation, we need a hashtable that stores a
    1438             :  * representative tuple and an array of AggStatePerGroup structs for each
    1439             :  * distinct set of GROUP BY column values.  We compute the hash key from the
    1440             :  * GROUP BY columns.  The per-group data is allocated in lookup_hash_entry(),
    1441             :  * for each entry.
    1442             :  *
    1443             :  * We have a separate hashtable and associated perhash data structure for each
    1444             :  * grouping set for which we're doing hashing.
    1445             :  *
    1446             :  * The contents of the hash tables always live in the hashcontext's per-tuple
    1447             :  * memory context (there is only one of these for all tables together, since
    1448             :  * they are all reset at the same time).
    1449             :  */
    1450             : static void
    1451       63172 : build_hash_tables(AggState *aggstate)
    1452             : {
    1453             :     int         setno;
    1454             : 
    1455      126538 :     for (setno = 0; setno < aggstate->num_hashes; ++setno)
    1456             :     {
    1457       63366 :         AggStatePerHash perhash = &aggstate->perhash[setno];
    1458             :         long        nbuckets;
    1459             :         Size        memory;
    1460             : 
    1461       63366 :         if (perhash->hashtable != NULL)
    1462             :         {
    1463       58896 :             ResetTupleHashTable(perhash->hashtable);
    1464       58896 :             continue;
    1465             :         }
    1466             : 
    1467             :         Assert(perhash->aggnode->numGroups > 0);
    1468             : 
    1469        4470 :         memory = aggstate->hash_mem_limit / aggstate->num_hashes;
    1470             : 
    1471             :         /* choose reasonable number of buckets per hashtable */
    1472        4470 :         nbuckets = hash_choose_num_buckets(aggstate->hashentrysize,
    1473        4470 :                                            perhash->aggnode->numGroups,
    1474             :                                            memory);
    1475             : 
    1476        4470 :         build_hash_table(aggstate, setno, nbuckets);
    1477             :     }
    1478             : 
    1479       63172 :     aggstate->hash_ngroups_current = 0;
    1480       63172 : }
    1481             : 
    1482             : /*
    1483             :  * Build a single hashtable for this grouping set.
    1484             :  */
    1485             : static void
    1486        4470 : build_hash_table(AggState *aggstate, int setno, long nbuckets)
    1487             : {
    1488        4470 :     AggStatePerHash perhash = &aggstate->perhash[setno];
    1489        4470 :     MemoryContext metacxt = aggstate->hash_metacxt;
    1490        4470 :     MemoryContext hashcxt = aggstate->hashcontext->ecxt_per_tuple_memory;
    1491        4470 :     MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory;
    1492             :     Size        additionalsize;
    1493             : 
    1494             :     Assert(aggstate->aggstrategy == AGG_HASHED ||
    1495             :            aggstate->aggstrategy == AGG_MIXED);
    1496             : 
    1497             :     /*
    1498             :      * Used to make sure initial hash table allocation does not exceed
    1499             :      * hash_mem. Note that the estimate does not include space for
    1500             :      * pass-by-reference transition data values, nor for the representative
    1501             :      * tuple of each group.
    1502             :      */
    1503        4470 :     additionalsize = aggstate->numtrans * sizeof(AggStatePerGroupData);
    1504             : 
    1505        8940 :     perhash->hashtable = BuildTupleHashTableExt(&aggstate->ss.ps,
    1506        4470 :                                                 perhash->hashslot->tts_tupleDescriptor,
    1507             :                                                 perhash->numCols,
    1508             :                                                 perhash->hashGrpColIdxHash,
    1509        4470 :                                                 perhash->eqfuncoids,
    1510             :                                                 perhash->hashfunctions,
    1511        4470 :                                                 perhash->aggnode->grpCollations,
    1512             :                                                 nbuckets,
    1513             :                                                 additionalsize,
    1514             :                                                 metacxt,
    1515             :                                                 hashcxt,
    1516             :                                                 tmpcxt,
    1517        4470 :                                                 DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
    1518        4470 : }
    1519             : 
    1520             : /*
    1521             :  * Compute columns that actually need to be stored in hashtable entries.  The
    1522             :  * incoming tuples from the child plan node will contain grouping columns,
    1523             :  * other columns referenced in our targetlist and qual, columns used to
    1524             :  * compute the aggregate functions, and perhaps just junk columns we don't use
    1525             :  * at all.  Only columns of the first two types need to be stored in the
    1526             :  * hashtable, and getting rid of the others can make the table entries
    1527             :  * significantly smaller.  The hashtable only contains the relevant columns,
    1528             :  * and is packed/unpacked in lookup_hash_entry() / agg_retrieve_hash_table()
    1529             :  * into the format of the normal input descriptor.
    1530             :  *
    1531             :  * Additional columns, in addition to the columns grouped by, come from two
    1532             :  * sources: Firstly functionally dependent columns that we don't need to group
    1533             :  * by themselves, and secondly ctids for row-marks.
    1534             :  *
    1535             :  * To eliminate duplicates, we build a bitmapset of the needed columns, and
    1536             :  * then build an array of the columns included in the hashtable. We might
    1537             :  * still have duplicates if the passed-in grpColIdx has them, which can happen
    1538             :  * in edge cases from semijoins/distinct; these can't always be removed,
    1539             :  * because it's not certain that the duplicate cols will be using the same
    1540             :  * hash function.
    1541             :  *
    1542             :  * Note that the array is preserved over ExecReScanAgg, so we allocate it in
    1543             :  * the per-query context (unlike the hash table itself).
    1544             :  */
    1545             : static void
    1546        5038 : find_hash_columns(AggState *aggstate)
    1547             : {
    1548             :     Bitmapset  *base_colnos;
    1549             :     Bitmapset  *aggregated_colnos;
    1550        5038 :     TupleDesc   scanDesc = aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor;
    1551        5038 :     List       *outerTlist = outerPlanState(aggstate)->plan->targetlist;
    1552        5038 :     int         numHashes = aggstate->num_hashes;
    1553        5038 :     EState     *estate = aggstate->ss.ps.state;
    1554             :     int         j;
    1555             : 
    1556             :     /* Find Vars that will be needed in tlist and qual */
    1557        5038 :     find_cols(aggstate, &aggregated_colnos, &base_colnos);
    1558        5038 :     aggstate->colnos_needed = bms_union(base_colnos, aggregated_colnos);
    1559        5038 :     aggstate->max_colno_needed = 0;
    1560        5038 :     aggstate->all_cols_needed = true;
    1561             : 
    1562       17564 :     for (int i = 0; i < scanDesc->natts; i++)
    1563             :     {
    1564       12526 :         int         colno = i + 1;
    1565             : 
    1566       12526 :         if (bms_is_member(colno, aggstate->colnos_needed))
    1567        9110 :             aggstate->max_colno_needed = colno;
    1568             :         else
    1569        3416 :             aggstate->all_cols_needed = false;
    1570             :     }
    1571             : 
    1572       10344 :     for (j = 0; j < numHashes; ++j)
    1573             :     {
    1574        5306 :         AggStatePerHash perhash = &aggstate->perhash[j];
    1575        5306 :         Bitmapset  *colnos = bms_copy(base_colnos);
    1576        5306 :         AttrNumber *grpColIdx = perhash->aggnode->grpColIdx;
    1577        5306 :         List       *hashTlist = NIL;
    1578             :         TupleDesc   hashDesc;
    1579             :         int         maxCols;
    1580             :         int         i;
    1581             : 
    1582        5306 :         perhash->largestGrpColIdx = 0;
    1583             : 
    1584             :         /*
    1585             :          * If we're doing grouping sets, then some Vars might be referenced in
    1586             :          * tlist/qual for the benefit of other grouping sets, but not needed
    1587             :          * when hashing; i.e. prepare_projection_slot will null them out, so
    1588             :          * there'd be no point storing them.  Use prepare_projection_slot's
    1589             :          * logic to determine which.
    1590             :          */
    1591        5306 :         if (aggstate->phases[0].grouped_cols)
    1592             :         {
    1593        5306 :             Bitmapset  *grouped_cols = aggstate->phases[0].grouped_cols[j];
    1594             :             ListCell   *lc;
    1595             : 
    1596       13348 :             foreach(lc, aggstate->all_grouped_cols)
    1597             :             {
    1598        8042 :                 int         attnum = lfirst_int(lc);
    1599             : 
    1600        8042 :                 if (!bms_is_member(attnum, grouped_cols))
    1601         680 :                     colnos = bms_del_member(colnos, attnum);
    1602             :             }
    1603             :         }
    1604             : 
    1605             :         /*
    1606             :          * Compute maximum number of input columns accounting for possible
    1607             :          * duplications in the grpColIdx array, which can happen in some edge
    1608             :          * cases where HashAggregate was generated as part of a semijoin or a
    1609             :          * DISTINCT.
    1610             :          */
    1611        5306 :         maxCols = bms_num_members(colnos) + perhash->numCols;
    1612             : 
    1613        5306 :         perhash->hashGrpColIdxInput =
    1614        5306 :             palloc(maxCols * sizeof(AttrNumber));
    1615        5306 :         perhash->hashGrpColIdxHash =
    1616        5306 :             palloc(perhash->numCols * sizeof(AttrNumber));
    1617             : 
    1618             :         /* Add all the grouping columns to colnos */
    1619       12672 :         for (i = 0; i < perhash->numCols; i++)
    1620        7366 :             colnos = bms_add_member(colnos, grpColIdx[i]);
    1621             : 
    1622             :         /*
    1623             :          * First build mapping for columns directly hashed. These are the
    1624             :          * first, because they'll be accessed when computing hash values and
    1625             :          * comparing tuples for exact matches. We also build simple mapping
    1626             :          * for execGrouping, so it knows where to find the to-be-hashed /
    1627             :          * compared columns in the input.
    1628             :          */
    1629       12672 :         for (i = 0; i < perhash->numCols; i++)
    1630             :         {
    1631        7366 :             perhash->hashGrpColIdxInput[i] = grpColIdx[i];
    1632        7366 :             perhash->hashGrpColIdxHash[i] = i + 1;
    1633        7366 :             perhash->numhashGrpCols++;
    1634             :             /* delete already mapped columns */
    1635        7366 :             bms_del_member(colnos, grpColIdx[i]);
    1636             :         }
    1637             : 
    1638             :         /* and add the remaining columns */
    1639        5684 :         while ((i = bms_first_member(colnos)) >= 0)
    1640             :         {
    1641         378 :             perhash->hashGrpColIdxInput[perhash->numhashGrpCols] = i;
    1642         378 :             perhash->numhashGrpCols++;
    1643             :         }
    1644             : 
    1645             :         /* and build a tuple descriptor for the hashtable */
    1646       13050 :         for (i = 0; i < perhash->numhashGrpCols; i++)
    1647             :         {
    1648        7744 :             int         varNumber = perhash->hashGrpColIdxInput[i] - 1;
    1649             : 
    1650        7744 :             hashTlist = lappend(hashTlist, list_nth(outerTlist, varNumber));
    1651        7744 :             perhash->largestGrpColIdx =
    1652        7744 :                 Max(varNumber + 1, perhash->largestGrpColIdx);
    1653             :         }
    1654             : 
    1655        5306 :         hashDesc = ExecTypeFromTL(hashTlist);
    1656             : 
    1657        5306 :         execTuplesHashPrepare(perhash->numCols,
    1658        5306 :                               perhash->aggnode->grpOperators,
    1659             :                               &perhash->eqfuncoids,
    1660             :                               &perhash->hashfunctions);
    1661        5306 :         perhash->hashslot =
    1662        5306 :             ExecAllocTableSlot(&estate->es_tupleTable, hashDesc,
    1663             :                                &TTSOpsMinimalTuple);
    1664             : 
    1665        5306 :         list_free(hashTlist);
    1666        5306 :         bms_free(colnos);
    1667             :     }
    1668             : 
    1669        5038 :     bms_free(base_colnos);
    1670        5038 : }
    1671             : 
    1672             : /*
    1673             :  * Estimate per-hash-table-entry overhead.
    1674             :  */
    1675             : Size
    1676       18436 : hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace)
    1677             : {
    1678             :     Size        tupleChunkSize;
    1679             :     Size        pergroupChunkSize;
    1680             :     Size        transitionChunkSize;
    1681       18436 :     Size        tupleSize = (MAXALIGN(SizeofMinimalTupleHeader) +
    1682             :                              tupleWidth);
    1683       18436 :     Size        pergroupSize = numTrans * sizeof(AggStatePerGroupData);
    1684             : 
    1685       18436 :     tupleChunkSize = CHUNKHDRSZ + tupleSize;
    1686             : 
    1687       18436 :     if (pergroupSize > 0)
    1688        6834 :         pergroupChunkSize = CHUNKHDRSZ + pergroupSize;
    1689             :     else
    1690       11602 :         pergroupChunkSize = 0;
    1691             : 
    1692       18436 :     if (transitionSpace > 0)
    1693        3168 :         transitionChunkSize = CHUNKHDRSZ + transitionSpace;
    1694             :     else
    1695       15268 :         transitionChunkSize = 0;
    1696             : 
    1697             :     return
    1698             :         sizeof(TupleHashEntryData) +
    1699       18436 :         tupleChunkSize +
    1700       18436 :         pergroupChunkSize +
    1701             :         transitionChunkSize;
    1702             : }
    1703             : 
    1704             : /*
    1705             :  * hashagg_recompile_expressions()
    1706             :  *
    1707             :  * Identifies the right phase, compiles the right expression given the
    1708             :  * arguments, and then sets phase->evalfunc to that expression.
    1709             :  *
    1710             :  * Different versions of the compiled expression are needed depending on
    1711             :  * whether hash aggregation has spilled or not, and whether it's reading from
    1712             :  * the outer plan or a tape. Before spilling to disk, the expression reads
    1713             :  * from the outer plan and does not need to perform a NULL check. After
    1714             :  * HashAgg begins to spill, new groups will not be created in the hash table,
    1715             :  * and the AggStatePerGroup array may be NULL; therefore we need to add a null
    1716             :  * pointer check to the expression. Then, when reading spilled data from a
    1717             :  * tape, we change the outer slot type to be a fixed minimal tuple slot.
    1718             :  *
    1719             :  * It would be wasteful to recompile every time, so cache the compiled
    1720             :  * expressions in the AggStatePerPhase, and reuse when appropriate.
    1721             :  */
    1722             : static void
    1723       95804 : hashagg_recompile_expressions(AggState *aggstate, bool minslot, bool nullcheck)
    1724             : {
    1725             :     AggStatePerPhase phase;
    1726       95804 :     int         i = minslot ? 1 : 0;
    1727       95804 :     int         j = nullcheck ? 1 : 0;
    1728             : 
    1729             :     Assert(aggstate->aggstrategy == AGG_HASHED ||
    1730             :            aggstate->aggstrategy == AGG_MIXED);
    1731             : 
    1732       95804 :     if (aggstate->aggstrategy == AGG_HASHED)
    1733       60756 :         phase = &aggstate->phases[0];
    1734             :     else                        /* AGG_MIXED */
    1735       35048 :         phase = &aggstate->phases[1];
    1736             : 
    1737       95804 :     if (phase->evaltrans_cache[i][j] == NULL)
    1738             :     {
    1739          64 :         const TupleTableSlotOps *outerops = aggstate->ss.ps.outerops;
    1740          64 :         bool        outerfixed = aggstate->ss.ps.outeropsfixed;
    1741          64 :         bool        dohash = true;
    1742          64 :         bool        dosort = false;
    1743             : 
    1744             :         /*
    1745             :          * If minslot is true, that means we are processing a spilled batch
    1746             :          * (inside agg_refill_hash_table()), and we must not advance the
    1747             :          * sorted grouping sets.
    1748             :          */
    1749          64 :         if (aggstate->aggstrategy == AGG_MIXED && !minslot)
    1750           8 :             dosort = true;
    1751             : 
    1752             :         /* temporarily change the outerops while compiling the expression */
    1753          64 :         if (minslot)
    1754             :         {
    1755          32 :             aggstate->ss.ps.outerops = &TTSOpsMinimalTuple;
    1756          32 :             aggstate->ss.ps.outeropsfixed = true;
    1757             :         }
    1758             : 
    1759          64 :         phase->evaltrans_cache[i][j] = ExecBuildAggTrans(aggstate, phase,
    1760             :                                                          dosort, dohash,
    1761             :                                                          nullcheck);
    1762             : 
    1763             :         /* change back */
    1764          64 :         aggstate->ss.ps.outerops = outerops;
    1765          64 :         aggstate->ss.ps.outeropsfixed = outerfixed;
    1766             :     }
    1767             : 
    1768       95804 :     phase->evaltrans = phase->evaltrans_cache[i][j];
    1769       95804 : }
    1770             : 
    1771             : /*
    1772             :  * Set limits that trigger spilling to avoid exceeding hash_mem. Consider the
    1773             :  * number of partitions we expect to create (if we do spill).
    1774             :  *
    1775             :  * There are two limits: a memory limit, and also an ngroups limit. The
    1776             :  * ngroups limit becomes important when we expect transition values to grow
    1777             :  * substantially larger than the initial value.
    1778             :  */
    1779             : void
    1780       36156 : hash_agg_set_limits(double hashentrysize, double input_groups, int used_bits,
    1781             :                     Size *mem_limit, uint64 *ngroups_limit,
    1782             :                     int *num_partitions)
    1783             : {
    1784             :     int         npartitions;
    1785             :     Size        partition_mem;
    1786       36156 :     Size        hash_mem_limit = get_hash_memory_limit();
    1787             : 
    1788             :     /* if not expected to spill, use all of hash_mem */
    1789       36156 :     if (input_groups * hashentrysize <= hash_mem_limit)
    1790             :     {
    1791       34268 :         if (num_partitions != NULL)
    1792       17048 :             *num_partitions = 0;
    1793       34268 :         *mem_limit = hash_mem_limit;
    1794       34268 :         *ngroups_limit = hash_mem_limit / hashentrysize;
    1795       34268 :         return;
    1796             :     }
    1797             : 
    1798             :     /*
    1799             :      * Calculate expected memory requirements for spilling, which is the size
    1800             :      * of the buffers needed for all the tapes that need to be open at once.
    1801             :      * Then, subtract that from the memory available for holding hash tables.
    1802             :      */
    1803        1888 :     npartitions = hash_choose_num_partitions(input_groups,
    1804             :                                              hashentrysize,
    1805             :                                              used_bits,
    1806             :                                              NULL);
    1807        1888 :     if (num_partitions != NULL)
    1808          64 :         *num_partitions = npartitions;
    1809             : 
    1810        1888 :     partition_mem =
    1811        1888 :         HASHAGG_READ_BUFFER_SIZE +
    1812             :         HASHAGG_WRITE_BUFFER_SIZE * npartitions;
    1813             : 
    1814             :     /*
    1815             :      * Don't set the limit below 3/4 of hash_mem. In that case, we are at the
    1816             :      * minimum number of partitions, so we aren't going to dramatically exceed
    1817             :      * work mem anyway.
    1818             :      */
    1819        1888 :     if (hash_mem_limit > 4 * partition_mem)
    1820           0 :         *mem_limit = hash_mem_limit - partition_mem;
    1821             :     else
    1822        1888 :         *mem_limit = hash_mem_limit * 0.75;
    1823             : 
    1824        1888 :     if (*mem_limit > hashentrysize)
    1825        1888 :         *ngroups_limit = *mem_limit / hashentrysize;
    1826             :     else
    1827           0 :         *ngroups_limit = 1;
    1828             : }
    1829             : 
    1830             : /*
    1831             :  * hash_agg_check_limits
    1832             :  *
    1833             :  * After adding a new group to the hash table, check whether we need to enter
    1834             :  * spill mode. Allocations may happen without adding new groups (for instance,
    1835             :  * if the transition state size grows), so this check is imperfect.
    1836             :  */
    1837             : static void
    1838      351218 : hash_agg_check_limits(AggState *aggstate)
    1839             : {
    1840      351218 :     uint64      ngroups = aggstate->hash_ngroups_current;
    1841      351218 :     Size        meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt,
    1842             :                                                      true);
    1843      351218 :     Size        hashkey_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory,
    1844             :                                                         true);
    1845             : 
    1846             :     /*
    1847             :      * Don't spill unless there's at least one group in the hash table so we
    1848             :      * can be sure to make progress even in edge cases.
    1849             :      */
    1850      351218 :     if (aggstate->hash_ngroups_current > 0 &&
    1851      351218 :         (meta_mem + hashkey_mem > aggstate->hash_mem_limit ||
    1852      333330 :          ngroups > aggstate->hash_ngroups_limit))
    1853             :     {
    1854       17904 :         hash_agg_enter_spill_mode(aggstate);
    1855             :     }
    1856      351218 : }
    1857             : 
    1858             : /*
    1859             :  * Enter "spill mode", meaning that no new groups are added to any of the hash
    1860             :  * tables. Tuples that would create a new group are instead spilled, and
    1861             :  * processed later.
    1862             :  */
    1863             : static void
    1864       17904 : hash_agg_enter_spill_mode(AggState *aggstate)
    1865             : {
    1866       17904 :     aggstate->hash_spill_mode = true;
    1867       17904 :     hashagg_recompile_expressions(aggstate, aggstate->table_filled, true);
    1868             : 
    1869       17904 :     if (!aggstate->hash_ever_spilled)
    1870             :     {
    1871             :         Assert(aggstate->hash_tapeset == NULL);
    1872             :         Assert(aggstate->hash_spills == NULL);
    1873             : 
    1874          44 :         aggstate->hash_ever_spilled = true;
    1875             : 
    1876          44 :         aggstate->hash_tapeset = LogicalTapeSetCreate(true, NULL, -1);
    1877             : 
    1878          44 :         aggstate->hash_spills = palloc(sizeof(HashAggSpill) * aggstate->num_hashes);
    1879             : 
    1880         128 :         for (int setno = 0; setno < aggstate->num_hashes; setno++)
    1881             :         {
    1882          84 :             AggStatePerHash perhash = &aggstate->perhash[setno];
    1883          84 :             HashAggSpill *spill = &aggstate->hash_spills[setno];
    1884             : 
    1885          84 :             hashagg_spill_init(spill, aggstate->hash_tapeset, 0,
    1886          84 :                                perhash->aggnode->numGroups,
    1887             :                                aggstate->hashentrysize);
    1888             :         }
    1889             :     }
    1890       17904 : }
    1891             : 
    1892             : /*
    1893             :  * Update metrics after filling the hash table.
    1894             :  *
    1895             :  * If reading from the outer plan, from_tape should be false; if reading from
    1896             :  * another tape, from_tape should be true.
    1897             :  */
    1898             : static void
    1899       82034 : hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
    1900             : {
    1901             :     Size        meta_mem;
    1902             :     Size        hashkey_mem;
    1903             :     Size        buffer_mem;
    1904             :     Size        total_mem;
    1905             : 
    1906       82034 :     if (aggstate->aggstrategy != AGG_MIXED &&
    1907       64442 :         aggstate->aggstrategy != AGG_HASHED)
    1908           0 :         return;
    1909             : 
    1910             :     /* memory for the hash table itself */
    1911       82034 :     meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, true);
    1912             : 
    1913             :     /* memory for the group keys and transition states */
    1914       82034 :     hashkey_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, true);
    1915             : 
    1916             :     /* memory for read/write tape buffers, if spilled */
    1917       82034 :     buffer_mem = npartitions * HASHAGG_WRITE_BUFFER_SIZE;
    1918       82034 :     if (from_tape)
    1919       19044 :         buffer_mem += HASHAGG_READ_BUFFER_SIZE;
    1920             : 
    1921             :     /* update peak mem */
    1922       82034 :     total_mem = meta_mem + hashkey_mem + buffer_mem;
    1923       82034 :     if (total_mem > aggstate->hash_mem_peak)
    1924        4162 :         aggstate->hash_mem_peak = total_mem;
    1925             : 
    1926             :     /* update disk usage */
    1927       82034 :     if (aggstate->hash_tapeset != NULL)
    1928             :     {
    1929       19088 :         uint64      disk_used = LogicalTapeSetBlocks(aggstate->hash_tapeset) * (BLCKSZ / 1024);
    1930             : 
    1931       19088 :         if (aggstate->hash_disk_used < disk_used)
    1932          40 :             aggstate->hash_disk_used = disk_used;
    1933             :     }
    1934             : 
    1935             :     /* update hashentrysize estimate based on contents */
    1936       82034 :     if (aggstate->hash_ngroups_current > 0)
    1937             :     {
    1938       80530 :         aggstate->hashentrysize =
    1939       80530 :             sizeof(TupleHashEntryData) +
    1940       80530 :             (hashkey_mem / (double) aggstate->hash_ngroups_current);
    1941             :     }
    1942             : }
    1943             : 
    1944             : /*
    1945             :  * Choose a reasonable number of buckets for the initial hash table size.
    1946             :  */
    1947             : static long
    1948        4470 : hash_choose_num_buckets(double hashentrysize, long ngroups, Size memory)
    1949             : {
    1950             :     long        max_nbuckets;
    1951        4470 :     long        nbuckets = ngroups;
    1952             : 
    1953        4470 :     max_nbuckets = memory / hashentrysize;
    1954             : 
    1955             :     /*
    1956             :      * Underestimating is better than overestimating. Too many buckets crowd
    1957             :      * out space for group keys and transition state values.
    1958             :      */
    1959        4470 :     max_nbuckets >>= 1;
    1960             : 
    1961        4470 :     if (nbuckets > max_nbuckets)
    1962          28 :         nbuckets = max_nbuckets;
    1963             : 
    1964        4470 :     return Max(nbuckets, 1);
    1965             : }
    1966             : 
    1967             : /*
    1968             :  * Determine the number of partitions to create when spilling, which will
    1969             :  * always be a power of two. If log2_npartitions is non-NULL, set
    1970             :  * *log2_npartitions to the log2() of the number of partitions.
    1971             :  */
    1972             : static int
    1973       10568 : hash_choose_num_partitions(double input_groups, double hashentrysize,
    1974             :                            int used_bits, int *log2_npartitions)
    1975             : {
    1976       10568 :     Size        hash_mem_limit = get_hash_memory_limit();
    1977             :     double      partition_limit;
    1978             :     double      mem_wanted;
    1979             :     double      dpartitions;
    1980             :     int         npartitions;
    1981             :     int         partition_bits;
    1982             : 
    1983             :     /*
    1984             :      * Avoid creating so many partitions that the memory requirements of the
    1985             :      * open partition files are greater than 1/4 of hash_mem.
    1986             :      */
    1987       10568 :     partition_limit =
    1988       10568 :         (hash_mem_limit * 0.25 - HASHAGG_READ_BUFFER_SIZE) /
    1989             :         HASHAGG_WRITE_BUFFER_SIZE;
    1990             : 
    1991       10568 :     mem_wanted = HASHAGG_PARTITION_FACTOR * input_groups * hashentrysize;
    1992             : 
    1993             :     /* make enough partitions so that each one is likely to fit in memory */
    1994       10568 :     dpartitions = 1 + (mem_wanted / hash_mem_limit);
    1995             : 
    1996       10568 :     if (dpartitions > partition_limit)
    1997       10568 :         dpartitions = partition_limit;
    1998             : 
    1999       10568 :     if (dpartitions < HASHAGG_MIN_PARTITIONS)
    2000       10568 :         dpartitions = HASHAGG_MIN_PARTITIONS;
    2001       10568 :     if (dpartitions > HASHAGG_MAX_PARTITIONS)
    2002           0 :         dpartitions = HASHAGG_MAX_PARTITIONS;
    2003             : 
    2004             :     /* HASHAGG_MAX_PARTITIONS limit makes this safe */
    2005       10568 :     npartitions = (int) dpartitions;
    2006             : 
    2007             :     /* ceil(log2(npartitions)) */
    2008       10568 :     partition_bits = my_log2(npartitions);
    2009             : 
    2010             :     /* make sure that we don't exhaust the hash bits */
    2011       10568 :     if (partition_bits + used_bits >= 32)
    2012           0 :         partition_bits = 32 - used_bits;
    2013             : 
    2014       10568 :     if (log2_npartitions != NULL)
    2015        8680 :         *log2_npartitions = partition_bits;
    2016             : 
    2017             :     /* number of partitions will be a power of two */
    2018       10568 :     npartitions = 1 << partition_bits;
    2019             : 
    2020       10568 :     return npartitions;
    2021             : }
    2022             : 
    2023             : /*
    2024             :  * Initialize a freshly-created TupleHashEntry.
    2025             :  */
    2026             : static void
    2027      351218 : initialize_hash_entry(AggState *aggstate, TupleHashTable hashtable,
    2028             :                       TupleHashEntry entry)
    2029             : {
    2030             :     AggStatePerGroup pergroup;
    2031             :     int         transno;
    2032             : 
    2033      351218 :     aggstate->hash_ngroups_current++;
    2034      351218 :     hash_agg_check_limits(aggstate);
    2035             : 
    2036             :     /* no need to allocate or initialize per-group state */
    2037      351218 :     if (aggstate->numtrans == 0)
    2038      203204 :         return;
    2039             : 
    2040             :     pergroup = (AggStatePerGroup)
    2041      148014 :         MemoryContextAlloc(hashtable->tablecxt,
    2042      148014 :                            sizeof(AggStatePerGroupData) * aggstate->numtrans);
    2043             : 
    2044      148014 :     entry->additional = pergroup;
    2045             : 
    2046             :     /*
    2047             :      * Initialize aggregates for new tuple group, lookup_hash_entries()
    2048             :      * already has selected the relevant grouping set.
    2049             :      */
    2050      392842 :     for (transno = 0; transno < aggstate->numtrans; transno++)
    2051             :     {
    2052      244828 :         AggStatePerTrans pertrans = &aggstate->pertrans[transno];
    2053      244828 :         AggStatePerGroup pergroupstate = &pergroup[transno];
    2054             : 
    2055      244828 :         initialize_aggregate(aggstate, pertrans, pergroupstate);
    2056             :     }
    2057             : }
    2058             : 
    2059             : /*
    2060             :  * Look up hash entries for the current tuple in all hashed grouping sets.
    2061             :  *
    2062             :  * Be aware that lookup_hash_entry can reset the tmpcontext.
    2063             :  *
    2064             :  * Some entries may be left NULL if we are in "spill mode". The same tuple
    2065             :  * will belong to different groups for each grouping set, so may match a group
    2066             :  * already in memory for one set and match a group not in memory for another
    2067             :  * set. When in "spill mode", the tuple will be spilled for each grouping set
    2068             :  * where it doesn't match a group in memory.
    2069             :  *
    2070             :  * NB: It's possible to spill the same tuple for several different grouping
    2071             :  * sets. This may seem wasteful, but it's actually a trade-off: if we spill
    2072             :  * the tuple multiple times for multiple grouping sets, it can be partitioned
    2073             :  * for each grouping set, making the refilling of the hash table very
    2074             :  * efficient.
    2075             :  */
    2076             : static void
    2077     3496950 : lookup_hash_entries(AggState *aggstate)
    2078             : {
    2079     3496950 :     AggStatePerGroup *pergroup = aggstate->hash_pergroup;
    2080     3496950 :     TupleTableSlot *outerslot = aggstate->tmpcontext->ecxt_outertuple;
    2081             :     int         setno;
    2082             : 
    2083     7083640 :     for (setno = 0; setno < aggstate->num_hashes; setno++)
    2084             :     {
    2085     3586690 :         AggStatePerHash perhash = &aggstate->perhash[setno];
    2086     3586690 :         TupleHashTable hashtable = perhash->hashtable;
    2087     3586690 :         TupleTableSlot *hashslot = perhash->hashslot;
    2088             :         TupleHashEntry entry;
    2089             :         uint32      hash;
    2090     3586690 :         bool        isnew = false;
    2091             :         bool       *p_isnew;
    2092             : 
    2093             :         /* if hash table already spilled, don't create new entries */
    2094     3586690 :         p_isnew = aggstate->hash_spill_mode ? NULL : &isnew;
    2095             : 
    2096     3586690 :         select_current_set(aggstate, setno, true);
    2097     3586690 :         prepare_hash_slot(perhash,
    2098             :                           outerslot,
    2099             :                           hashslot);
    2100             : 
    2101     3586690 :         entry = LookupTupleHashEntry(hashtable, hashslot,
    2102             :                                      p_isnew, &hash);
    2103             : 
    2104     3586690 :         if (entry != NULL)
    2105             :         {
    2106     3380258 :             if (isnew)
    2107      280246 :                 initialize_hash_entry(aggstate, hashtable, entry);
    2108     3380258 :             pergroup[setno] = entry->additional;
    2109             :         }
    2110             :         else
    2111             :         {
    2112      206432 :             HashAggSpill *spill = &aggstate->hash_spills[setno];
    2113      206432 :             TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple;
    2114             : 
    2115      206432 :             if (spill->partitions == NULL)
    2116           0 :                 hashagg_spill_init(spill, aggstate->hash_tapeset, 0,
    2117           0 :                                    perhash->aggnode->numGroups,
    2118             :                                    aggstate->hashentrysize);
    2119             : 
    2120      206432 :             hashagg_spill_tuple(aggstate, spill, slot, hash);
    2121      206432 :             pergroup[setno] = NULL;
    2122             :         }
    2123             :     }
    2124     3496950 : }
    2125             : 
    2126             : /*
    2127             :  * ExecAgg -
    2128             :  *
    2129             :  *    ExecAgg receives tuples from its outer subplan and aggregates over
    2130             :  *    the appropriate attribute for each aggregate function use (Aggref
    2131             :  *    node) appearing in the targetlist or qual of the node.  The number
    2132             :  *    of tuples to aggregate over depends on whether grouped or plain
    2133             :  *    aggregation is selected.  In grouped aggregation, we produce a result
    2134             :  *    row for each group; in plain aggregation there's a single result row
    2135             :  *    for the whole query.  In either case, the value of each aggregate is
    2136             :  *    stored in the expression context to be used when ExecProject evaluates
    2137             :  *    the result tuple.
    2138             :  */
    2139             : static TupleTableSlot *
    2140     3523454 : ExecAgg(PlanState *pstate)
    2141             : {
    2142     3523454 :     AggState   *node = castNode(AggState, pstate);
    2143     3523454 :     TupleTableSlot *result = NULL;
    2144             : 
    2145     3523454 :     CHECK_FOR_INTERRUPTS();
    2146             : 
    2147     3523454 :     if (!node->agg_done)
    2148             :     {
    2149             :         /* Dispatch based on strategy */
    2150     2037418 :         switch (node->phase->aggstrategy)
    2151             :         {
    2152      393580 :             case AGG_HASHED:
    2153      393580 :                 if (!node->table_filled)
    2154       62906 :                     agg_fill_hash_table(node);
    2155             :                 /* FALLTHROUGH */
    2156             :             case AGG_MIXED:
    2157      411824 :                 result = agg_retrieve_hash_table(node);
    2158      411824 :                 break;
    2159     1625594 :             case AGG_PLAIN:
    2160             :             case AGG_SORTED:
    2161     1625594 :                 result = agg_retrieve_direct(node);
    2162     1625548 :                 break;
    2163             :         }
    2164             : 
    2165     2037372 :         if (!TupIsNull(result))
    2166     1973410 :             return result;
    2167             :     }
    2168             : 
    2169     1549998 :     return NULL;
    2170             : }
    2171             : 
    2172             : /*
    2173             :  * ExecAgg for non-hashed case
    2174             :  */
    2175             : static TupleTableSlot *
    2176     1625594 : agg_retrieve_direct(AggState *aggstate)
    2177             : {
    2178     1625594 :     Agg        *node = aggstate->phase->aggnode;
    2179             :     ExprContext *econtext;
    2180             :     ExprContext *tmpcontext;
    2181             :     AggStatePerAgg peragg;
    2182             :     AggStatePerGroup *pergroups;
    2183             :     TupleTableSlot *outerslot;
    2184             :     TupleTableSlot *firstSlot;
    2185             :     TupleTableSlot *result;
    2186     1625594 :     bool        hasGroupingSets = aggstate->phase->numsets > 0;
    2187     1625594 :     int         numGroupingSets = Max(aggstate->phase->numsets, 1);
    2188             :     int         currentSet;
    2189             :     int         nextSetSize;
    2190             :     int         numReset;
    2191             :     int         i;
    2192             : 
    2193             :     /*
    2194             :      * get state info from node
    2195             :      *
    2196             :      * econtext is the per-output-tuple expression context
    2197             :      *
    2198             :      * tmpcontext is the per-input-tuple expression context
    2199             :      */
    2200     1625594 :     econtext = aggstate->ss.ps.ps_ExprContext;
    2201     1625594 :     tmpcontext = aggstate->tmpcontext;
    2202             : 
    2203     1625594 :     peragg = aggstate->peragg;
    2204     1625594 :     pergroups = aggstate->pergroups;
    2205     1625594 :     firstSlot = aggstate->ss.ss_ScanTupleSlot;
    2206             : 
    2207             :     /*
    2208             :      * We loop retrieving groups until we find one matching
    2209             :      * aggstate->ss.ps.qual
    2210             :      *
    2211             :      * For grouping sets, we have the invariant that aggstate->projected_set
    2212             :      * is either -1 (initial call) or the index (starting from 0) in
    2213             :      * gset_lengths for the group we just completed (either by projecting a
    2214             :      * row or by discarding it in the qual).
    2215             :      */
    2216     1672692 :     while (!aggstate->agg_done)
    2217             :     {
    2218             :         /*
    2219             :          * Clear the per-output-tuple context for each group, as well as
    2220             :          * aggcontext (which contains any pass-by-ref transvalues of the old
    2221             :          * group).  Some aggregate functions store working state in child
    2222             :          * contexts; those now get reset automatically without us needing to
    2223             :          * do anything special.
    2224             :          *
    2225             :          * We use ReScanExprContext not just ResetExprContext because we want
    2226             :          * any registered shutdown callbacks to be called.  That allows
    2227             :          * aggregate functions to ensure they've cleaned up any non-memory
    2228             :          * resources.
    2229             :          */
    2230     1672606 :         ReScanExprContext(econtext);
    2231             : 
    2232             :         /*
    2233             :          * Determine how many grouping sets need to be reset at this boundary.
    2234             :          */
    2235     1672606 :         if (aggstate->projected_set >= 0 &&
    2236      183590 :             aggstate->projected_set < numGroupingSets)
    2237      183586 :             numReset = aggstate->projected_set + 1;
    2238             :         else
    2239     1489020 :             numReset = numGroupingSets;
    2240             : 
    2241             :         /*
    2242             :          * numReset can change on a phase boundary, but that's OK; we want to
    2243             :          * reset the contexts used in _this_ phase, and later, after possibly
    2244             :          * changing phase, initialize the right number of aggregates for the
    2245             :          * _new_ phase.
    2246             :          */
    2247             : 
    2248     3360078 :         for (i = 0; i < numReset; i++)
    2249             :         {
    2250     1687472 :             ReScanExprContext(aggstate->aggcontexts[i]);
    2251             :         }
    2252             : 
    2253             :         /*
    2254             :          * Check if input is complete and there are no more groups to project
    2255             :          * in this phase; move to next phase or mark as done.
    2256             :          */
    2257     1672606 :         if (aggstate->input_done == true &&
    2258        1020 :             aggstate->projected_set >= (numGroupingSets - 1))
    2259             :         {
    2260         480 :             if (aggstate->current_phase < aggstate->numphases - 1)
    2261             :             {
    2262         124 :                 initialize_phase(aggstate, aggstate->current_phase + 1);
    2263         124 :                 aggstate->input_done = false;
    2264         124 :                 aggstate->projected_set = -1;
    2265         124 :                 numGroupingSets = Max(aggstate->phase->numsets, 1);
    2266         124 :                 node = aggstate->phase->aggnode;
    2267         124 :                 numReset = numGroupingSets;
    2268             :             }
    2269         356 :             else if (aggstate->aggstrategy == AGG_MIXED)
    2270             :             {
    2271             :                 /*
    2272             :                  * Mixed mode; we've output all the grouped stuff and have
    2273             :                  * full hashtables, so switch to outputting those.
    2274             :                  */
    2275          92 :                 initialize_phase(aggstate, 0);
    2276          92 :                 aggstate->table_filled = true;
    2277          92 :                 ResetTupleHashIterator(aggstate->perhash[0].hashtable,
    2278             :                                        &aggstate->perhash[0].hashiter);
    2279          92 :                 select_current_set(aggstate, 0, true);
    2280          92 :                 return agg_retrieve_hash_table(aggstate);
    2281             :             }
    2282             :             else
    2283             :             {
    2284         264 :                 aggstate->agg_done = true;
    2285         264 :                 break;
    2286             :             }
    2287             :         }
    2288             : 
    2289             :         /*
    2290             :          * Get the number of columns in the next grouping set after the last
    2291             :          * projected one (if any). This is the number of columns to compare to
    2292             :          * see if we reached the boundary of that set too.
    2293             :          */
    2294     1672250 :         if (aggstate->projected_set >= 0 &&
    2295      183110 :             aggstate->projected_set < (numGroupingSets - 1))
    2296       18196 :             nextSetSize = aggstate->phase->gset_lengths[aggstate->projected_set + 1];
    2297             :         else
    2298     1654054 :             nextSetSize = 0;
    2299             : 
    2300             :         /*----------
    2301             :          * If a subgroup for the current grouping set is present, project it.
    2302             :          *
    2303             :          * We have a new group if:
    2304             :          *  - we're out of input but haven't projected all grouping sets
    2305             :          *    (checked above)
    2306             :          * OR
    2307             :          *    - we already projected a row that wasn't from the last grouping
    2308             :          *      set
    2309             :          *    AND
    2310             :          *    - the next grouping set has at least one grouping column (since
    2311             :          *      empty grouping sets project only once input is exhausted)
    2312             :          *    AND
    2313             :          *    - the previous and pending rows differ on the grouping columns
    2314             :          *      of the next grouping set
    2315             :          *----------
    2316             :          */
    2317     1672250 :         tmpcontext->ecxt_innertuple = econtext->ecxt_outertuple;
    2318     1672250 :         if (aggstate->input_done ||
    2319     1671710 :             (node->aggstrategy != AGG_PLAIN &&
    2320      183710 :              aggstate->projected_set != -1 &&
    2321      182570 :              aggstate->projected_set < (numGroupingSets - 1) &&
    2322       13300 :              nextSetSize > 0 &&
    2323       13300 :              !ExecQualAndReset(aggstate->phase->eqfunctions[nextSetSize - 1],
    2324             :                                tmpcontext)))
    2325             :         {
    2326        9436 :             aggstate->projected_set += 1;
    2327             : 
    2328             :             Assert(aggstate->projected_set < numGroupingSets);
    2329        9436 :             Assert(nextSetSize > 0 || aggstate->input_done);
    2330             :         }
    2331             :         else
    2332             :         {
    2333             :             /*
    2334             :              * We no longer care what group we just projected, the next
    2335             :              * projection will always be the first (or only) grouping set
    2336             :              * (unless the input proves to be empty).
    2337             :              */
    2338     1662814 :             aggstate->projected_set = 0;
    2339             : 
    2340             :             /*
    2341             :              * If we don't already have the first tuple of the new group,
    2342             :              * fetch it from the outer plan.
    2343             :              */
    2344     1662814 :             if (aggstate->grp_firstTuple == NULL)
    2345             :             {
    2346     1489140 :                 outerslot = fetch_input_tuple(aggstate);
    2347     1489122 :                 if (!TupIsNull(outerslot))
    2348             :                 {
    2349             :                     /*
    2350             :                      * Make a copy of the first input tuple; we will use this
    2351             :                      * for comparisons (in group mode) and for projection.
    2352             :                      */
    2353       92866 :                     aggstate->grp_firstTuple = ExecCopySlotHeapTuple(outerslot);
    2354             :                 }
    2355             :                 else
    2356             :                 {
    2357             :                     /* outer plan produced no tuples at all */
    2358     1396256 :                     if (hasGroupingSets)
    2359             :                     {
    2360             :                         /*
    2361             :                          * If there was no input at all, we need to project
    2362             :                          * rows only if there are grouping sets of size 0.
    2363             :                          * Note that this implies that there can't be any
    2364             :                          * references to ungrouped Vars, which would otherwise
    2365             :                          * cause issues with the empty output slot.
    2366             :                          *
    2367             :                          * XXX: This is no longer true, we currently deal with
    2368             :                          * this in finalize_aggregates().
    2369             :                          */
    2370          36 :                         aggstate->input_done = true;
    2371             : 
    2372          52 :                         while (aggstate->phase->gset_lengths[aggstate->projected_set] > 0)
    2373             :                         {
    2374          20 :                             aggstate->projected_set += 1;
    2375          20 :                             if (aggstate->projected_set >= numGroupingSets)
    2376             :                             {
    2377             :                                 /*
    2378             :                                  * We can't set agg_done here because we might
    2379             :                                  * have more phases to do, even though the
    2380             :                                  * input is empty. So we need to restart the
    2381             :                                  * whole outer loop.
    2382             :                                  */
    2383           4 :                                 break;
    2384             :                             }
    2385             :                         }
    2386             : 
    2387          36 :                         if (aggstate->projected_set >= numGroupingSets)
    2388           4 :                             continue;
    2389             :                     }
    2390             :                     else
    2391             :                     {
    2392     1396220 :                         aggstate->agg_done = true;
    2393             :                         /* If we are grouping, we should produce no tuples too */
    2394     1396220 :                         if (node->aggstrategy != AGG_PLAIN)
    2395         112 :                             return NULL;
    2396             :                     }
    2397             :                 }
    2398             :             }
    2399             : 
    2400             :             /*
    2401             :              * Initialize working state for a new input tuple group.
    2402             :              */
    2403     1662680 :             initialize_aggregates(aggstate, pergroups, numReset);
    2404             : 
    2405     1662680 :             if (aggstate->grp_firstTuple != NULL)
    2406             :             {
    2407             :                 /*
    2408             :                  * Store the copied first input tuple in the tuple table slot
    2409             :                  * reserved for it.  The tuple will be deleted when it is
    2410             :                  * cleared from the slot.
    2411             :                  */
    2412      266540 :                 ExecForceStoreHeapTuple(aggstate->grp_firstTuple,
    2413             :                                         firstSlot, true);
    2414      266540 :                 aggstate->grp_firstTuple = NULL; /* don't keep two pointers */
    2415             : 
    2416             :                 /* set up for first advance_aggregates call */
    2417      266540 :                 tmpcontext->ecxt_outertuple = firstSlot;
    2418             : 
    2419             :                 /*
    2420             :                  * Process each outer-plan tuple, and then fetch the next one,
    2421             :                  * until we exhaust the outer plan or cross a group boundary.
    2422             :                  */
    2423             :                 for (;;)
    2424             :                 {
    2425             :                     /*
    2426             :                      * During phase 1 only of a mixed agg, we need to update
    2427             :                      * hashtables as well in advance_aggregates.
    2428             :                      */
    2429    13068280 :                     if (aggstate->aggstrategy == AGG_MIXED &&
    2430       25764 :                         aggstate->current_phase == 1)
    2431             :                     {
    2432       25764 :                         lookup_hash_entries(aggstate);
    2433             :                     }
    2434             : 
    2435             :                     /* Advance the aggregates (or combine functions) */
    2436    13068280 :                     advance_aggregates(aggstate);
    2437             : 
    2438             :                     /* Reset per-input-tuple context after each tuple */
    2439    13068252 :                     ResetExprContext(tmpcontext);
    2440             : 
    2441    13068252 :                     outerslot = fetch_input_tuple(aggstate);
    2442    13068252 :                     if (TupIsNull(outerslot))
    2443             :                     {
    2444             :                         /* no more outer-plan tuples available */
    2445             : 
    2446             :                         /* if we built hash tables, finalize any spills */
    2447       92834 :                         if (aggstate->aggstrategy == AGG_MIXED &&
    2448          84 :                             aggstate->current_phase == 1)
    2449          84 :                             hashagg_finish_initial_spills(aggstate);
    2450             : 
    2451       92834 :                         if (hasGroupingSets)
    2452             :                         {
    2453         444 :                             aggstate->input_done = true;
    2454         444 :                             break;
    2455             :                         }
    2456             :                         else
    2457             :                         {
    2458       92390 :                             aggstate->agg_done = true;
    2459       92390 :                             break;
    2460             :                         }
    2461             :                     }
    2462             :                     /* set up for next advance_aggregates call */
    2463    12975418 :                     tmpcontext->ecxt_outertuple = outerslot;
    2464             : 
    2465             :                     /*
    2466             :                      * If we are grouping, check whether we've crossed a group
    2467             :                      * boundary.
    2468             :                      */
    2469    12975418 :                     if (node->aggstrategy != AGG_PLAIN)
    2470             :                     {
    2471     1216760 :                         tmpcontext->ecxt_innertuple = firstSlot;
    2472     1216760 :                         if (!ExecQual(aggstate->phase->eqfunctions[node->numCols - 1],
    2473             :                                       tmpcontext))
    2474             :                         {
    2475      173678 :                             aggstate->grp_firstTuple = ExecCopySlotHeapTuple(outerslot);
    2476      173678 :                             break;
    2477             :                         }
    2478             :                     }
    2479             :                 }
    2480             :             }
    2481             : 
    2482             :             /*
    2483             :              * Use the representative input tuple for any references to
    2484             :              * non-aggregated input columns in aggregate direct args, the node
    2485             :              * qual, and the tlist.  (If we are not grouping, and there are no
    2486             :              * input rows at all, we will come here with an empty firstSlot
    2487             :              * ... but if not grouping, there can't be any references to
    2488             :              * non-aggregated input columns, so no problem.)
    2489             :              */
    2490     1662652 :             econtext->ecxt_outertuple = firstSlot;
    2491             :         }
    2492             : 
    2493             :         Assert(aggstate->projected_set >= 0);
    2494             : 
    2495     1672088 :         currentSet = aggstate->projected_set;
    2496             : 
    2497     1672088 :         prepare_projection_slot(aggstate, econtext->ecxt_outertuple, currentSet);
    2498             : 
    2499     1672088 :         select_current_set(aggstate, currentSet, false);
    2500             : 
    2501     1672088 :         finalize_aggregates(aggstate,
    2502             :                             peragg,
    2503     1672088 :                             pergroups[currentSet]);
    2504             : 
    2505             :         /*
    2506             :          * If there's no row to project right now, we must continue rather
    2507             :          * than returning a null since there might be more groups.
    2508             :          */
    2509     1672088 :         result = project_aggregates(aggstate);
    2510     1672088 :         if (result)
    2511     1624994 :             return result;
    2512             :     }
    2513             : 
    2514             :     /* No more groups */
    2515         350 :     return NULL;
    2516             : }
    2517             : 
    2518             : /*
    2519             :  * ExecAgg for hashed case: read input and build hash table
    2520             :  */
    2521             : static void
    2522       62906 : agg_fill_hash_table(AggState *aggstate)
    2523             : {
    2524             :     TupleTableSlot *outerslot;
    2525       62906 :     ExprContext *tmpcontext = aggstate->tmpcontext;
    2526             : 
    2527             :     /*
    2528             :      * Process each outer-plan tuple, and then fetch the next one, until we
    2529             :      * exhaust the outer plan.
    2530             :      */
    2531             :     for (;;)
    2532             :     {
    2533     3534092 :         outerslot = fetch_input_tuple(aggstate);
    2534     3534092 :         if (TupIsNull(outerslot))
    2535             :             break;
    2536             : 
    2537             :         /* set up for lookup_hash_entries and advance_aggregates */
    2538     3471186 :         tmpcontext->ecxt_outertuple = outerslot;
    2539             : 
    2540             :         /* Find or build hashtable entries */
    2541     3471186 :         lookup_hash_entries(aggstate);
    2542             : 
    2543             :         /* Advance the aggregates (or combine functions) */
    2544     3471186 :         advance_aggregates(aggstate);
    2545             : 
    2546             :         /*
    2547             :          * Reset per-input-tuple context after each tuple, but note that the
    2548             :          * hash lookups do this too
    2549             :          */
    2550     3471186 :         ResetExprContext(aggstate->tmpcontext);
    2551             :     }
    2552             : 
    2553             :     /* finalize spills, if any */
    2554       62906 :     hashagg_finish_initial_spills(aggstate);
    2555             : 
    2556       62906 :     aggstate->table_filled = true;
    2557             :     /* Initialize to walk the first hash table */
    2558       62906 :     select_current_set(aggstate, 0, true);
    2559       62906 :     ResetTupleHashIterator(aggstate->perhash[0].hashtable,
    2560             :                            &aggstate->perhash[0].hashiter);
    2561       62906 : }
    2562             : 
    2563             : /*
    2564             :  * If any data was spilled during hash aggregation, reset the hash table and
    2565             :  * reprocess one batch of spilled data. After reprocessing a batch, the hash
    2566             :  * table will again contain data, ready to be consumed by
    2567             :  * agg_retrieve_hash_table_in_memory().
    2568             :  *
    2569             :  * Should only be called after all in memory hash table entries have been
    2570             :  * finalized and emitted.
    2571             :  *
    2572             :  * Return false when input is exhausted and there's no more work to be done;
    2573             :  * otherwise return true.
    2574             :  */
    2575             : static bool
    2576       82544 : agg_refill_hash_table(AggState *aggstate)
    2577             : {
    2578             :     HashAggBatch *batch;
    2579             :     AggStatePerHash perhash;
    2580             :     HashAggSpill spill;
    2581       82544 :     LogicalTapeSet *tapeset = aggstate->hash_tapeset;
    2582       82544 :     bool        spill_initialized = false;
    2583             : 
    2584       82544 :     if (aggstate->hash_batches == NIL)
    2585       63500 :         return false;
    2586             : 
    2587             :     /* hash_batches is a stack, with the top item at the end of the list */
    2588       19044 :     batch = llast(aggstate->hash_batches);
    2589       19044 :     aggstate->hash_batches = list_delete_last(aggstate->hash_batches);
    2590             : 
    2591       19044 :     hash_agg_set_limits(aggstate->hashentrysize, batch->input_card,
    2592             :                         batch->used_bits, &aggstate->hash_mem_limit,
    2593             :                         &aggstate->hash_ngroups_limit, NULL);
    2594             : 
    2595             :     /*
    2596             :      * Each batch only processes one grouping set; set the rest to NULL so
    2597             :      * that advance_aggregates() knows to ignore them. We don't touch
    2598             :      * pergroups for sorted grouping sets here, because they will be needed if
    2599             :      * we rescan later. The expressions for sorted grouping sets will not be
    2600             :      * evaluated after we recompile anyway.
    2601             :      */
    2602      140448 :     MemSet(aggstate->hash_pergroup, 0,
    2603             :            sizeof(AggStatePerGroup) * aggstate->num_hashes);
    2604             : 
    2605             :     /* free memory and reset hash tables */
    2606       19044 :     ReScanExprContext(aggstate->hashcontext);
    2607      140448 :     for (int setno = 0; setno < aggstate->num_hashes; setno++)
    2608      121404 :         ResetTupleHashTable(aggstate->perhash[setno].hashtable);
    2609             : 
    2610       19044 :     aggstate->hash_ngroups_current = 0;
    2611             : 
    2612             :     /*
    2613             :      * In AGG_MIXED mode, hash aggregation happens in phase 1 and the output
    2614             :      * happens in phase 0. So, we switch to phase 1 when processing a batch,
    2615             :      * and back to phase 0 after the batch is done.
    2616             :      */
    2617             :     Assert(aggstate->current_phase == 0);
    2618       19044 :     if (aggstate->phase->aggstrategy == AGG_MIXED)
    2619             :     {
    2620       17508 :         aggstate->current_phase = 1;
    2621       17508 :         aggstate->phase = &aggstate->phases[aggstate->current_phase];
    2622             :     }
    2623             : 
    2624       19044 :     select_current_set(aggstate, batch->setno, true);
    2625             : 
    2626       19044 :     perhash = &aggstate->perhash[aggstate->current_set];
    2627             : 
    2628             :     /*
    2629             :      * Spilled tuples are always read back as MinimalTuples, which may be
    2630             :      * different from the outer plan, so recompile the aggregate expressions.
    2631             :      *
    2632             :      * We still need the NULL check, because we are only processing one
    2633             :      * grouping set at a time and the rest will be NULL.
    2634             :      */
    2635       19044 :     hashagg_recompile_expressions(aggstate, true, true);
    2636             : 
    2637             :     for (;;)
    2638      581072 :     {
    2639      600116 :         TupleTableSlot *spillslot = aggstate->hash_spill_rslot;
    2640      600116 :         TupleTableSlot *hashslot = perhash->hashslot;
    2641             :         TupleHashEntry entry;
    2642             :         MinimalTuple tuple;
    2643             :         uint32      hash;
    2644      600116 :         bool        isnew = false;
    2645      600116 :         bool       *p_isnew = aggstate->hash_spill_mode ? NULL : &isnew;
    2646             : 
    2647      600116 :         CHECK_FOR_INTERRUPTS();
    2648             : 
    2649      600116 :         tuple = hashagg_batch_read(batch, &hash);
    2650      600116 :         if (tuple == NULL)
    2651       19044 :             break;
    2652             : 
    2653      581072 :         ExecStoreMinimalTuple(tuple, spillslot, true);
    2654      581072 :         aggstate->tmpcontext->ecxt_outertuple = spillslot;
    2655             : 
    2656      581072 :         prepare_hash_slot(perhash,
    2657      581072 :                           aggstate->tmpcontext->ecxt_outertuple,
    2658             :                           hashslot);
    2659      581072 :         entry = LookupTupleHashEntryHash(
    2660             :                                          perhash->hashtable, hashslot, p_isnew, hash);
    2661             : 
    2662      581072 :         if (entry != NULL)
    2663             :         {
    2664      206432 :             if (isnew)
    2665       70972 :                 initialize_hash_entry(aggstate, perhash->hashtable, entry);
    2666      206432 :             aggstate->hash_pergroup[batch->setno] = entry->additional;
    2667      206432 :             advance_aggregates(aggstate);
    2668             :         }
    2669             :         else
    2670             :         {
    2671      374640 :             if (!spill_initialized)
    2672             :             {
    2673             :                 /*
    2674             :                  * Avoid initializing the spill until we actually need it so
    2675             :                  * that we don't assign tapes that will never be used.
    2676             :                  */
    2677        8596 :                 spill_initialized = true;
    2678        8596 :                 hashagg_spill_init(&spill, tapeset, batch->used_bits,
    2679             :                                    batch->input_card, aggstate->hashentrysize);
    2680             :             }
    2681             :             /* no memory for a new group, spill */
    2682      374640 :             hashagg_spill_tuple(aggstate, &spill, spillslot, hash);
    2683             : 
    2684      374640 :             aggstate->hash_pergroup[batch->setno] = NULL;
    2685             :         }
    2686             : 
    2687             :         /*
    2688             :          * Reset per-input-tuple context after each tuple, but note that the
    2689             :          * hash lookups do this too
    2690             :          */
    2691      581072 :         ResetExprContext(aggstate->tmpcontext);
    2692             :     }
    2693             : 
    2694       19044 :     LogicalTapeClose(batch->input_tape);
    2695             : 
    2696             :     /* change back to phase 0 */
    2697       19044 :     aggstate->current_phase = 0;
    2698       19044 :     aggstate->phase = &aggstate->phases[aggstate->current_phase];
    2699             : 
    2700       19044 :     if (spill_initialized)
    2701             :     {
    2702        8596 :         hashagg_spill_finish(aggstate, &spill, batch->setno);
    2703        8596 :         hash_agg_update_metrics(aggstate, true, spill.npartitions);
    2704             :     }
    2705             :     else
    2706       10448 :         hash_agg_update_metrics(aggstate, true, 0);
    2707             : 
    2708       19044 :     aggstate->hash_spill_mode = false;
    2709             : 
    2710             :     /* prepare to walk the first hash table */
    2711       19044 :     select_current_set(aggstate, batch->setno, true);
    2712       19044 :     ResetTupleHashIterator(aggstate->perhash[batch->setno].hashtable,
    2713             :                            &aggstate->perhash[batch->setno].hashiter);
    2714             : 
    2715       19044 :     pfree(batch);
    2716             : 
    2717       19044 :     return true;
    2718             : }
    2719             : 
    2720             : /*
    2721             :  * ExecAgg for hashed case: retrieving groups from hash table
    2722             :  *
    2723             :  * After exhausting in-memory tuples, also try refilling the hash table using
    2724             :  * previously-spilled tuples. Only returns NULL after all in-memory and
    2725             :  * spilled tuples are exhausted.
    2726             :  */
    2727             : static TupleTableSlot *
    2728      411916 : agg_retrieve_hash_table(AggState *aggstate)
    2729             : {
    2730      411916 :     TupleTableSlot *result = NULL;
    2731             : 
    2732      779376 :     while (result == NULL)
    2733             :     {
    2734      430960 :         result = agg_retrieve_hash_table_in_memory(aggstate);
    2735      430960 :         if (result == NULL)
    2736             :         {
    2737       82544 :             if (!agg_refill_hash_table(aggstate))
    2738             :             {
    2739       63500 :                 aggstate->agg_done = true;
    2740       63500 :                 break;
    2741             :             }
    2742             :         }
    2743             :     }
    2744             : 
    2745      411916 :     return result;
    2746             : }
    2747             : 
    2748             : /*
    2749             :  * Retrieve the groups from the in-memory hash tables without considering any
    2750             :  * spilled tuples.
    2751             :  */
    2752             : static TupleTableSlot *
    2753      430960 : agg_retrieve_hash_table_in_memory(AggState *aggstate)
    2754             : {
    2755             :     ExprContext *econtext;
    2756             :     AggStatePerAgg peragg;
    2757             :     AggStatePerGroup pergroup;
    2758             :     TupleHashEntryData *entry;
    2759             :     TupleTableSlot *firstSlot;
    2760             :     TupleTableSlot *result;
    2761             :     AggStatePerHash perhash;
    2762             : 
    2763             :     /*
    2764             :      * get state info from node.
    2765             :      *
    2766             :      * econtext is the per-output-tuple expression context.
    2767             :      */
    2768      430960 :     econtext = aggstate->ss.ps.ps_ExprContext;
    2769      430960 :     peragg = aggstate->peragg;
    2770      430960 :     firstSlot = aggstate->ss.ss_ScanTupleSlot;
    2771             : 
    2772             :     /*
    2773             :      * Note that perhash (and therefore anything accessed through it) can
    2774             :      * change inside the loop, as we change between grouping sets.
    2775             :      */
    2776      430960 :     perhash = &aggstate->perhash[aggstate->current_set];
    2777             : 
    2778             :     /*
    2779             :      * We loop retrieving groups until we find one satisfying
    2780             :      * aggstate->ss.ps.qual
    2781             :      */
    2782             :     for (;;)
    2783       70728 :     {
    2784      501688 :         TupleTableSlot *hashslot = perhash->hashslot;
    2785             :         int         i;
    2786             : 
    2787      501688 :         CHECK_FOR_INTERRUPTS();
    2788             : 
    2789             :         /*
    2790             :          * Find the next entry in the hash table
    2791             :          */
    2792      501688 :         entry = ScanTupleHashTable(perhash->hashtable, &perhash->hashiter);
    2793      501688 :         if (entry == NULL)
    2794             :         {
    2795      149370 :             int         nextset = aggstate->current_set + 1;
    2796             : 
    2797      149370 :             if (nextset < aggstate->num_hashes)
    2798             :             {
    2799             :                 /*
    2800             :                  * Switch to next grouping set, reinitialize, and restart the
    2801             :                  * loop.
    2802             :                  */
    2803       66826 :                 select_current_set(aggstate, nextset, true);
    2804             : 
    2805       66826 :                 perhash = &aggstate->perhash[aggstate->current_set];
    2806             : 
    2807       66826 :                 ResetTupleHashIterator(perhash->hashtable, &perhash->hashiter);
    2808             : 
    2809       66826 :                 continue;
    2810             :             }
    2811             :             else
    2812             :             {
    2813       82544 :                 return NULL;
    2814             :             }
    2815             :         }
    2816             : 
    2817             :         /*
    2818             :          * Clear the per-output-tuple context for each group
    2819             :          *
    2820             :          * We intentionally don't use ReScanExprContext here; if any aggs have
    2821             :          * registered shutdown callbacks, they mustn't be called yet, since we
    2822             :          * might not be done with that agg.
    2823             :          */
    2824      352318 :         ResetExprContext(econtext);
    2825             : 
    2826             :         /*
    2827             :          * Transform representative tuple back into one with the right
    2828             :          * columns.
    2829             :          */
    2830      352318 :         ExecStoreMinimalTuple(entry->firstTuple, hashslot, false);
    2831      352318 :         slot_getallattrs(hashslot);
    2832             : 
    2833      352318 :         ExecClearTuple(firstSlot);
    2834      352318 :         memset(firstSlot->tts_isnull, true,
    2835      352318 :                firstSlot->tts_tupleDescriptor->natts * sizeof(bool));
    2836             : 
    2837      938380 :         for (i = 0; i < perhash->numhashGrpCols; i++)
    2838             :         {
    2839      586062 :             int         varNumber = perhash->hashGrpColIdxInput[i] - 1;
    2840             : 
    2841      586062 :             firstSlot->tts_values[varNumber] = hashslot->tts_values[i];
    2842      586062 :             firstSlot->tts_isnull[varNumber] = hashslot->tts_isnull[i];
    2843             :         }
    2844      352318 :         ExecStoreVirtualTuple(firstSlot);
    2845             : 
    2846      352318 :         pergroup = (AggStatePerGroup) entry->additional;
    2847             : 
    2848             :         /*
    2849             :          * Use the representative input tuple for any references to
    2850             :          * non-aggregated input columns in the qual and tlist.
    2851             :          */
    2852      352318 :         econtext->ecxt_outertuple = firstSlot;
    2853             : 
    2854      352318 :         prepare_projection_slot(aggstate,
    2855             :                                 econtext->ecxt_outertuple,
    2856             :                                 aggstate->current_set);
    2857             : 
    2858      352318 :         finalize_aggregates(aggstate, peragg, pergroup);
    2859             : 
    2860      352318 :         result = project_aggregates(aggstate);
    2861      352318 :         if (result)
    2862      348416 :             return result;
    2863             :     }
    2864             : 
    2865             :     /* No more groups */
    2866             :     return NULL;
    2867             : }
    2868             : 
    2869             : /*
    2870             :  * hashagg_spill_init
    2871             :  *
    2872             :  * Called after we determined that spilling is necessary. Chooses the number
    2873             :  * of partitions to create, and initializes them.
    2874             :  */
    2875             : static void
    2876        8680 : hashagg_spill_init(HashAggSpill *spill, LogicalTapeSet *tapeset, int used_bits,
    2877             :                    double input_groups, double hashentrysize)
    2878             : {
    2879             :     int         npartitions;
    2880             :     int         partition_bits;
    2881             : 
    2882        8680 :     npartitions = hash_choose_num_partitions(input_groups, hashentrysize,
    2883             :                                              used_bits, &partition_bits);
    2884             : 
    2885        8680 :     spill->partitions = palloc0(sizeof(LogicalTape *) * npartitions);
    2886        8680 :     spill->ntuples = palloc0(sizeof(int64) * npartitions);
    2887        8680 :     spill->hll_card = palloc0(sizeof(hyperLogLogState) * npartitions);
    2888             : 
    2889       43400 :     for (int i = 0; i < npartitions; i++)
    2890       34720 :         spill->partitions[i] = LogicalTapeCreate(tapeset);
    2891             : 
    2892        8680 :     spill->shift = 32 - used_bits - partition_bits;
    2893        8680 :     spill->mask = (npartitions - 1) << spill->shift;
    2894        8680 :     spill->npartitions = npartitions;
    2895             : 
    2896       43400 :     for (int i = 0; i < npartitions; i++)
    2897       34720 :         initHyperLogLog(&spill->hll_card[i], HASHAGG_HLL_BIT_WIDTH);
    2898        8680 : }
    2899             : 
    2900             : /*
    2901             :  * hashagg_spill_tuple
    2902             :  *
    2903             :  * No room for new groups in the hash table. Save for later in the appropriate
    2904             :  * partition.
    2905             :  */
    2906             : static Size
    2907      581072 : hashagg_spill_tuple(AggState *aggstate, HashAggSpill *spill,
    2908             :                     TupleTableSlot *inputslot, uint32 hash)
    2909             : {
    2910             :     TupleTableSlot *spillslot;
    2911             :     int         partition;
    2912             :     MinimalTuple tuple;
    2913             :     LogicalTape *tape;
    2914      581072 :     int         total_written = 0;
    2915             :     bool        shouldFree;
    2916             : 
    2917             :     Assert(spill->partitions != NULL);
    2918             : 
    2919             :     /* spill only attributes that we actually need */
    2920      581072 :     if (!aggstate->all_cols_needed)
    2921             :     {
    2922        6656 :         spillslot = aggstate->hash_spill_wslot;
    2923        6656 :         slot_getsomeattrs(inputslot, aggstate->max_colno_needed);
    2924        6656 :         ExecClearTuple(spillslot);
    2925       19968 :         for (int i = 0; i < spillslot->tts_tupleDescriptor->natts; i++)
    2926             :         {
    2927       13312 :             if (bms_is_member(i + 1, aggstate->colnos_needed))
    2928             :             {
    2929        6656 :                 spillslot->tts_values[i] = inputslot->tts_values[i];
    2930        6656 :                 spillslot->tts_isnull[i] = inputslot->tts_isnull[i];
    2931             :             }
    2932             :             else
    2933        6656 :                 spillslot->tts_isnull[i] = true;
    2934             :         }
    2935        6656 :         ExecStoreVirtualTuple(spillslot);
    2936             :     }
    2937             :     else
    2938      574416 :         spillslot = inputslot;
    2939             : 
    2940      581072 :     tuple = ExecFetchSlotMinimalTuple(spillslot, &shouldFree);
    2941             : 
    2942      581072 :     partition = (hash & spill->mask) >> spill->shift;
    2943      581072 :     spill->ntuples[partition]++;
    2944             : 
    2945             :     /*
    2946             :      * All hash values destined for a given partition have some bits in
    2947             :      * common, which causes bad HLL cardinality estimates. Hash the hash to
    2948             :      * get a more uniform distribution.
    2949             :      */
    2950      581072 :     addHyperLogLog(&spill->hll_card[partition], hash_bytes_uint32(hash));
    2951             : 
    2952      581072 :     tape = spill->partitions[partition];
    2953             : 
    2954      581072 :     LogicalTapeWrite(tape, (void *) &hash, sizeof(uint32));
    2955      581072 :     total_written += sizeof(uint32);
    2956             : 
    2957      581072 :     LogicalTapeWrite(tape, (void *) tuple, tuple->t_len);
    2958      581072 :     total_written += tuple->t_len;
    2959             : 
    2960      581072 :     if (shouldFree)
    2961      206432 :         pfree(tuple);
    2962             : 
    2963      581072 :     return total_written;
    2964             : }
    2965             : 
    2966             : /*
    2967             :  * hashagg_batch_new
    2968             :  *
    2969             :  * Construct a HashAggBatch item, which represents one iteration of HashAgg to
    2970             :  * be done.
    2971             :  */
    2972             : static HashAggBatch *
    2973       19044 : hashagg_batch_new(LogicalTape *input_tape, int setno,
    2974             :                   int64 input_tuples, double input_card, int used_bits)
    2975             : {
    2976       19044 :     HashAggBatch *batch = palloc0(sizeof(HashAggBatch));
    2977             : 
    2978       19044 :     batch->setno = setno;
    2979       19044 :     batch->used_bits = used_bits;
    2980       19044 :     batch->input_tape = input_tape;
    2981       19044 :     batch->input_tuples = input_tuples;
    2982       19044 :     batch->input_card = input_card;
    2983             : 
    2984       19044 :     return batch;
    2985             : }
    2986             : 
    2987             : /*
    2988             :  * read_spilled_tuple
    2989             :  *      read the next tuple from a batch's tape.  Return NULL if no more.
    2990             :  */
    2991             : static MinimalTuple
    2992      600116 : hashagg_batch_read(HashAggBatch *batch, uint32 *hashp)
    2993             : {
    2994      600116 :     LogicalTape *tape = batch->input_tape;
    2995             :     MinimalTuple tuple;
    2996             :     uint32      t_len;
    2997             :     size_t      nread;
    2998             :     uint32      hash;
    2999             : 
    3000      600116 :     nread = LogicalTapeRead(tape, &hash, sizeof(uint32));
    3001      600116 :     if (nread == 0)
    3002       19044 :         return NULL;
    3003      581072 :     if (nread != sizeof(uint32))
    3004           0 :         ereport(ERROR,
    3005             :                 (errcode_for_file_access(),
    3006             :                  errmsg("unexpected EOF for tape %p: requested %zu bytes, read %zu bytes",
    3007             :                         tape, sizeof(uint32), nread)));
    3008      581072 :     if (hashp != NULL)
    3009      581072 :         *hashp = hash;
    3010             : 
    3011      581072 :     nread = LogicalTapeRead(tape, &t_len, sizeof(t_len));
    3012      581072 :     if (nread != sizeof(uint32))
    3013           0 :         ereport(ERROR,
    3014             :                 (errcode_for_file_access(),
    3015             :                  errmsg("unexpected EOF for tape %p: requested %zu bytes, read %zu bytes",
    3016             :                         tape, sizeof(uint32), nread)));
    3017             : 
    3018      581072 :     tuple = (MinimalTuple) palloc(t_len);
    3019      581072 :     tuple->t_len = t_len;
    3020             : 
    3021      581072 :     nread = LogicalTapeRead(tape,
    3022             :                             (void *) ((char *) tuple + sizeof(uint32)),
    3023             :                             t_len - sizeof(uint32));
    3024      581072 :     if (nread != t_len - sizeof(uint32))
    3025           0 :         ereport(ERROR,
    3026             :                 (errcode_for_file_access(),
    3027             :                  errmsg("unexpected EOF for tape %p: requested %zu bytes, read %zu bytes",
    3028             :                         tape, t_len - sizeof(uint32), nread)));
    3029             : 
    3030      581072 :     return tuple;
    3031             : }
    3032             : 
    3033             : /*
    3034             :  * hashagg_finish_initial_spills
    3035             :  *
    3036             :  * After a HashAggBatch has been processed, it may have spilled tuples to
    3037             :  * disk. If so, turn the spilled partitions into new batches that must later
    3038             :  * be executed.
    3039             :  */
    3040             : static void
    3041       62990 : hashagg_finish_initial_spills(AggState *aggstate)
    3042             : {
    3043             :     int         setno;
    3044       62990 :     int         total_npartitions = 0;
    3045             : 
    3046       62990 :     if (aggstate->hash_spills != NULL)
    3047             :     {
    3048         128 :         for (setno = 0; setno < aggstate->num_hashes; setno++)
    3049             :         {
    3050          84 :             HashAggSpill *spill = &aggstate->hash_spills[setno];
    3051             : 
    3052          84 :             total_npartitions += spill->npartitions;
    3053          84 :             hashagg_spill_finish(aggstate, spill, setno);
    3054             :         }
    3055             : 
    3056             :         /*
    3057             :          * We're not processing tuples from outer plan any more; only
    3058             :          * processing batches of spilled tuples. The initial spill structures
    3059             :          * are no longer needed.
    3060             :          */
    3061          44 :         pfree(aggstate->hash_spills);
    3062          44 :         aggstate->hash_spills = NULL;
    3063             :     }
    3064             : 
    3065       62990 :     hash_agg_update_metrics(aggstate, false, total_npartitions);
    3066       62990 :     aggstate->hash_spill_mode = false;
    3067       62990 : }
    3068             : 
    3069             : /*
    3070             :  * hashagg_spill_finish
    3071             :  *
    3072             :  * Transform spill partitions into new batches.
    3073             :  */
    3074             : static void
    3075        8680 : hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno)
    3076             : {
    3077             :     int         i;
    3078        8680 :     int         used_bits = 32 - spill->shift;
    3079             : 
    3080        8680 :     if (spill->npartitions == 0)
    3081           0 :         return;                 /* didn't spill */
    3082             : 
    3083       43400 :     for (i = 0; i < spill->npartitions; i++)
    3084             :     {
    3085       34720 :         LogicalTape *tape = spill->partitions[i];
    3086             :         HashAggBatch *new_batch;
    3087             :         double      cardinality;
    3088             : 
    3089             :         /* if the partition is empty, don't create a new batch of work */
    3090       34720 :         if (spill->ntuples[i] == 0)
    3091       15676 :             continue;
    3092             : 
    3093       19044 :         cardinality = estimateHyperLogLog(&spill->hll_card[i]);
    3094       19044 :         freeHyperLogLog(&spill->hll_card[i]);
    3095             : 
    3096             :         /* rewinding frees the buffer while not in use */
    3097       19044 :         LogicalTapeRewindForRead(tape, HASHAGG_READ_BUFFER_SIZE);
    3098             : 
    3099       19044 :         new_batch = hashagg_batch_new(tape, setno,
    3100       19044 :                                       spill->ntuples[i], cardinality,
    3101             :                                       used_bits);
    3102       19044 :         aggstate->hash_batches = lappend(aggstate->hash_batches, new_batch);
    3103       19044 :         aggstate->hash_batches_used++;
    3104             :     }
    3105             : 
    3106        8680 :     pfree(spill->ntuples);
    3107        8680 :     pfree(spill->hll_card);
    3108        8680 :     pfree(spill->partitions);
    3109             : }
    3110             : 
    3111             : /*
    3112             :  * Free resources related to a spilled HashAgg.
    3113             :  */
    3114             : static void
    3115      116816 : hashagg_reset_spill_state(AggState *aggstate)
    3116             : {
    3117             :     /* free spills from initial pass */
    3118      116816 :     if (aggstate->hash_spills != NULL)
    3119             :     {
    3120             :         int         setno;
    3121             : 
    3122           0 :         for (setno = 0; setno < aggstate->num_hashes; setno++)
    3123             :         {
    3124           0 :             HashAggSpill *spill = &aggstate->hash_spills[setno];
    3125             : 
    3126           0 :             pfree(spill->ntuples);
    3127           0 :             pfree(spill->partitions);
    3128             :         }
    3129           0 :         pfree(aggstate->hash_spills);
    3130           0 :         aggstate->hash_spills = NULL;
    3131             :     }
    3132             : 
    3133             :     /* free batches */
    3134      116816 :     list_free_deep(aggstate->hash_batches);
    3135      116816 :     aggstate->hash_batches = NIL;
    3136             : 
    3137             :     /* close tape set */
    3138      116816 :     if (aggstate->hash_tapeset != NULL)
    3139             :     {
    3140          44 :         LogicalTapeSetClose(aggstate->hash_tapeset);
    3141          44 :         aggstate->hash_tapeset = NULL;
    3142             :     }
    3143      116816 : }
    3144             : 
    3145             : 
    3146             : /* -----------------
    3147             :  * ExecInitAgg
    3148             :  *
    3149             :  *  Creates the run-time information for the agg node produced by the
    3150             :  *  planner and initializes its outer subtree.
    3151             :  *
    3152             :  * -----------------
    3153             :  */
    3154             : AggState *
    3155       58016 : ExecInitAgg(Agg *node, EState *estate, int eflags)
    3156             : {
    3157             :     AggState   *aggstate;
    3158             :     AggStatePerAgg peraggs;
    3159             :     AggStatePerTrans pertransstates;
    3160             :     AggStatePerGroup *pergroups;
    3161             :     Plan       *outerPlan;
    3162             :     ExprContext *econtext;
    3163             :     TupleDesc   scanDesc;
    3164             :     int         max_aggno;
    3165             :     int         max_transno;
    3166             :     int         numaggrefs;
    3167             :     int         numaggs;
    3168             :     int         numtrans;
    3169             :     int         phase;
    3170             :     int         phaseidx;
    3171             :     ListCell   *l;
    3172       58016 :     Bitmapset  *all_grouped_cols = NULL;
    3173       58016 :     int         numGroupingSets = 1;
    3174             :     int         numPhases;
    3175             :     int         numHashes;
    3176       58016 :     int         i = 0;
    3177       58016 :     int         j = 0;
    3178      111122 :     bool        use_hashing = (node->aggstrategy == AGG_HASHED ||
    3179       53106 :                                node->aggstrategy == AGG_MIXED);
    3180             : 
    3181             :     /* check for unsupported flags */
    3182             :     Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
    3183             : 
    3184             :     /*
    3185             :      * create state structure
    3186             :      */
    3187       58016 :     aggstate = makeNode(AggState);
    3188       58016 :     aggstate->ss.ps.plan = (Plan *) node;
    3189       58016 :     aggstate->ss.ps.state = estate;
    3190       58016 :     aggstate->ss.ps.ExecProcNode = ExecAgg;
    3191             : 
    3192       58016 :     aggstate->aggs = NIL;
    3193       58016 :     aggstate->numaggs = 0;
    3194       58016 :     aggstate->numtrans = 0;
    3195       58016 :     aggstate->aggstrategy = node->aggstrategy;
    3196       58016 :     aggstate->aggsplit = node->aggsplit;
    3197       58016 :     aggstate->maxsets = 0;
    3198       58016 :     aggstate->projected_set = -1;
    3199       58016 :     aggstate->current_set = 0;
    3200       58016 :     aggstate->peragg = NULL;
    3201       58016 :     aggstate->pertrans = NULL;
    3202       58016 :     aggstate->curperagg = NULL;
    3203       58016 :     aggstate->curpertrans = NULL;
    3204       58016 :     aggstate->input_done = false;
    3205       58016 :     aggstate->agg_done = false;
    3206       58016 :     aggstate->pergroups = NULL;
    3207       58016 :     aggstate->grp_firstTuple = NULL;
    3208       58016 :     aggstate->sort_in = NULL;
    3209       58016 :     aggstate->sort_out = NULL;
    3210             : 
    3211             :     /*
    3212             :      * phases[0] always exists, but is dummy in sorted/plain mode
    3213             :      */
    3214       58016 :     numPhases = (use_hashing ? 1 : 2);
    3215       58016 :     numHashes = (use_hashing ? 1 : 0);
    3216             : 
    3217             :     /*
    3218             :      * Calculate the maximum number of grouping sets in any phase; this
    3219             :      * determines the size of some allocations.  Also calculate the number of
    3220             :      * phases, since all hashed/mixed nodes contribute to only a single phase.
    3221             :      */
    3222       58016 :     if (node->groupingSets)
    3223             :     {
    3224         476 :         numGroupingSets = list_length(node->groupingSets);
    3225             : 
    3226        1024 :         foreach(l, node->chain)
    3227             :         {
    3228         548 :             Agg        *agg = lfirst(l);
    3229             : 
    3230         548 :             numGroupingSets = Max(numGroupingSets,
    3231             :                                   list_length(agg->groupingSets));
    3232             : 
    3233             :             /*
    3234             :              * additional AGG_HASHED aggs become part of phase 0, but all
    3235             :              * others add an extra phase.
    3236             :              */
    3237         548 :             if (agg->aggstrategy != AGG_HASHED)
    3238         280 :                 ++numPhases;
    3239             :             else
    3240         268 :                 ++numHashes;
    3241             :         }
    3242             :     }
    3243             : 
    3244       58016 :     aggstate->maxsets = numGroupingSets;
    3245       58016 :     aggstate->numphases = numPhases;
    3246             : 
    3247       58016 :     aggstate->aggcontexts = (ExprContext **)
    3248       58016 :         palloc0(sizeof(ExprContext *) * numGroupingSets);
    3249             : 
    3250             :     /*
    3251             :      * Create expression contexts.  We need three or more, one for
    3252             :      * per-input-tuple processing, one for per-output-tuple processing, one
    3253             :      * for all the hashtables, and one for each grouping set.  The per-tuple
    3254             :      * memory context of the per-grouping-set ExprContexts (aggcontexts)
    3255             :      * replaces the standalone memory context formerly used to hold transition
    3256             :      * values.  We cheat a little by using ExecAssignExprContext() to build
    3257             :      * all of them.
    3258             :      *
    3259             :      * NOTE: the details of what is stored in aggcontexts and what is stored
    3260             :      * in the regular per-query memory context are driven by a simple
    3261             :      * decision: we want to reset the aggcontext at group boundaries (if not
    3262             :      * hashing) and in ExecReScanAgg to recover no-longer-wanted space.
    3263             :      */
    3264       58016 :     ExecAssignExprContext(estate, &aggstate->ss.ps);
    3265       58016 :     aggstate->tmpcontext = aggstate->ss.ps.ps_ExprContext;
    3266             : 
    3267      116588 :     for (i = 0; i < numGroupingSets; ++i)
    3268             :     {
    3269       58572 :         ExecAssignExprContext(estate, &aggstate->ss.ps);
    3270       58572 :         aggstate->aggcontexts[i] = aggstate->ss.ps.ps_ExprContext;
    3271             :     }
    3272             : 
    3273       58016 :     if (use_hashing)
    3274        5038 :         aggstate->hashcontext = CreateWorkExprContext(estate);
    3275             : 
    3276       58016 :     ExecAssignExprContext(estate, &aggstate->ss.ps);
    3277             : 
    3278             :     /*
    3279             :      * Initialize child nodes.
    3280             :      *
    3281             :      * If we are doing a hashed aggregation then the child plan does not need
    3282             :      * to handle REWIND efficiently; see ExecReScanAgg.
    3283             :      */
    3284       58016 :     if (node->aggstrategy == AGG_HASHED)
    3285        4910 :         eflags &= ~EXEC_FLAG_REWIND;
    3286       58016 :     outerPlan = outerPlan(node);
    3287       58016 :     outerPlanState(aggstate) = ExecInitNode(outerPlan, estate, eflags);
    3288             : 
    3289             :     /*
    3290             :      * initialize source tuple type.
    3291             :      */
    3292       58016 :     aggstate->ss.ps.outerops =
    3293       58016 :         ExecGetResultSlotOps(outerPlanState(&aggstate->ss),
    3294             :                              &aggstate->ss.ps.outeropsfixed);
    3295       58016 :     aggstate->ss.ps.outeropsset = true;
    3296             : 
    3297       58016 :     ExecCreateScanSlotFromOuterPlan(estate, &aggstate->ss,
    3298             :                                     aggstate->ss.ps.outerops);
    3299       58016 :     scanDesc = aggstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor;
    3300             : 
    3301             :     /*
    3302             :      * If there are more than two phases (including a potential dummy phase
    3303             :      * 0), input will be resorted using tuplesort. Need a slot for that.
    3304             :      */
    3305       58016 :     if (numPhases > 2)
    3306             :     {
    3307         124 :         aggstate->sort_slot = ExecInitExtraTupleSlot(estate, scanDesc,
    3308             :                                                      &TTSOpsMinimalTuple);
    3309             : 
    3310             :         /*
    3311             :          * The output of the tuplesort, and the output from the outer child
    3312             :          * might not use the same type of slot. In most cases the child will
    3313             :          * be a Sort, and thus return a TTSOpsMinimalTuple type slot - but the
    3314             :          * input can also be presorted due an index, in which case it could be
    3315             :          * a different type of slot.
    3316             :          *
    3317             :          * XXX: For efficiency it would be good to instead/additionally
    3318             :          * generate expressions with corresponding settings of outerops* for
    3319             :          * the individual phases - deforming is often a bottleneck for
    3320             :          * aggregations with lots of rows per group. If there's multiple
    3321             :          * sorts, we know that all but the first use TTSOpsMinimalTuple (via
    3322             :          * the nodeAgg.c internal tuplesort).
    3323             :          */
    3324         124 :         if (aggstate->ss.ps.outeropsfixed &&
    3325         124 :             aggstate->ss.ps.outerops != &TTSOpsMinimalTuple)
    3326          16 :             aggstate->ss.ps.outeropsfixed = false;
    3327             :     }
    3328             : 
    3329             :     /*
    3330             :      * Initialize result type, slot and projection.
    3331             :      */
    3332       58016 :     ExecInitResultTupleSlotTL(&aggstate->ss.ps, &TTSOpsVirtual);
    3333       58016 :     ExecAssignProjectionInfo(&aggstate->ss.ps, NULL);
    3334             : 
    3335             :     /*
    3336             :      * initialize child expressions
    3337             :      *
    3338             :      * We expect the parser to have checked that no aggs contain other agg
    3339             :      * calls in their arguments (and just to be sure, we verify it again while
    3340             :      * initializing the plan node).  This would make no sense under SQL
    3341             :      * semantics, and it's forbidden by the spec.  Because it is true, we
    3342             :      * don't need to worry about evaluating the aggs in any particular order.
    3343             :      *
    3344             :      * Note: execExpr.c finds Aggrefs for us, and adds them to aggstate->aggs.
    3345             :      * Aggrefs in the qual are found here; Aggrefs in the targetlist are found
    3346             :      * during ExecAssignProjectionInfo, above.
    3347             :      */
    3348       58016 :     aggstate->ss.ps.qual =
    3349       58016 :         ExecInitQual(node->plan.qual, (PlanState *) aggstate);
    3350             : 
    3351             :     /*
    3352             :      * We should now have found all Aggrefs in the targetlist and quals.
    3353             :      */
    3354       58016 :     numaggrefs = list_length(aggstate->aggs);
    3355       58016 :     max_aggno = -1;
    3356       58016 :     max_transno = -1;
    3357      117496 :     foreach(l, aggstate->aggs)
    3358             :     {
    3359       59480 :         Aggref     *aggref = (Aggref *) lfirst(l);
    3360             : 
    3361       59480 :         max_aggno = Max(max_aggno, aggref->aggno);
    3362       59480 :         max_transno = Max(max_transno, aggref->aggtransno);
    3363             :     }
    3364       58016 :     numaggs = max_aggno + 1;
    3365       58016 :     numtrans = max_transno + 1;
    3366             : 
    3367             :     /*
    3368             :      * For each phase, prepare grouping set data and fmgr lookup data for
    3369             :      * compare functions.  Accumulate all_grouped_cols in passing.
    3370             :      */
    3371       58016 :     aggstate->phases = palloc0(numPhases * sizeof(AggStatePerPhaseData));
    3372             : 
    3373       58016 :     aggstate->num_hashes = numHashes;
    3374       58016 :     if (numHashes)
    3375             :     {
    3376        5038 :         aggstate->perhash = palloc0(sizeof(AggStatePerHashData) * numHashes);
    3377        5038 :         aggstate->phases[0].numsets = 0;
    3378        5038 :         aggstate->phases[0].gset_lengths = palloc(numHashes * sizeof(int));
    3379        5038 :         aggstate->phases[0].grouped_cols = palloc(numHashes * sizeof(Bitmapset *));
    3380             :     }
    3381             : 
    3382       58016 :     phase = 0;
    3383      116580 :     for (phaseidx = 0; phaseidx <= list_length(node->chain); ++phaseidx)
    3384             :     {
    3385             :         Agg        *aggnode;
    3386             :         Sort       *sortnode;
    3387             : 
    3388       58564 :         if (phaseidx > 0)
    3389             :         {
    3390         548 :             aggnode = list_nth_node(Agg, node->chain, phaseidx - 1);
    3391         548 :             sortnode = castNode(Sort, aggnode->plan.lefttree);
    3392             :         }
    3393             :         else
    3394             :         {
    3395       58016 :             aggnode = node;
    3396       58016 :             sortnode = NULL;
    3397             :         }
    3398             : 
    3399             :         Assert(phase <= 1 || sortnode);
    3400             : 
    3401       58564 :         if (aggnode->aggstrategy == AGG_HASHED
    3402       53386 :             || aggnode->aggstrategy == AGG_MIXED)
    3403             :         {
    3404        5306 :             AggStatePerPhase phasedata = &aggstate->phases[0];
    3405             :             AggStatePerHash perhash;
    3406        5306 :             Bitmapset  *cols = NULL;
    3407             : 
    3408             :             Assert(phase == 0);
    3409        5306 :             i = phasedata->numsets++;
    3410        5306 :             perhash = &aggstate->perhash[i];
    3411             : 
    3412             :             /* phase 0 always points to the "real" Agg in the hash case */
    3413        5306 :             phasedata->aggnode = node;
    3414        5306 :             phasedata->aggstrategy = node->aggstrategy;
    3415             : 
    3416             :             /* but the actual Agg node representing this hash is saved here */
    3417        5306 :             perhash->aggnode = aggnode;
    3418             : 
    3419        5306 :             phasedata->gset_lengths[i] = perhash->numCols = aggnode->numCols;
    3420             : 
    3421       12672 :             for (j = 0; j < aggnode->numCols; ++j)
    3422        7366 :                 cols = bms_add_member(cols, aggnode->grpColIdx[j]);
    3423             : 
    3424        5306 :             phasedata->grouped_cols[i] = cols;
    3425             : 
    3426        5306 :             all_grouped_cols = bms_add_members(all_grouped_cols, cols);
    3427        5306 :             continue;
    3428             :         }
    3429             :         else
    3430             :         {
    3431       53258 :             AggStatePerPhase phasedata = &aggstate->phases[++phase];
    3432             :             int         num_sets;
    3433             : 
    3434       53258 :             phasedata->numsets = num_sets = list_length(aggnode->groupingSets);
    3435             : 
    3436       53258 :             if (num_sets)
    3437             :             {
    3438         572 :                 phasedata->gset_lengths = palloc(num_sets * sizeof(int));
    3439         572 :                 phasedata->grouped_cols = palloc(num_sets * sizeof(Bitmapset *));
    3440             : 
    3441         572 :                 i = 0;
    3442        1748 :                 foreach(l, aggnode->groupingSets)
    3443             :                 {
    3444        1176 :                     int         current_length = list_length(lfirst(l));
    3445        1176 :                     Bitmapset  *cols = NULL;
    3446             : 
    3447             :                     /* planner forces this to be correct */
    3448        2340 :                     for (j = 0; j < current_length; ++j)
    3449        1164 :                         cols = bms_add_member(cols, aggnode->grpColIdx[j]);
    3450             : 
    3451        1176 :                     phasedata->grouped_cols[i] = cols;
    3452        1176 :                     phasedata->gset_lengths[i] = current_length;
    3453             : 
    3454        1176 :                     ++i;
    3455             :                 }
    3456             : 
    3457         572 :                 all_grouped_cols = bms_add_members(all_grouped_cols,
    3458         572 :                                                    phasedata->grouped_cols[0]);
    3459             :             }
    3460             :             else
    3461             :             {
    3462             :                 Assert(phaseidx == 0);
    3463             : 
    3464       52686 :                 phasedata->gset_lengths = NULL;
    3465       52686 :                 phasedata->grouped_cols = NULL;
    3466             :             }
    3467             : 
    3468             :             /*
    3469             :              * If we are grouping, precompute fmgr lookup data for inner loop.
    3470             :              */
    3471       53258 :             if (aggnode->aggstrategy == AGG_SORTED)
    3472             :             {
    3473        1434 :                 int         i = 0;
    3474             : 
    3475             :                 Assert(aggnode->numCols > 0);
    3476             : 
    3477             :                 /*
    3478             :                  * Build a separate function for each subset of columns that
    3479             :                  * need to be compared.
    3480             :                  */
    3481        1434 :                 phasedata->eqfunctions =
    3482        1434 :                     (ExprState **) palloc0(aggnode->numCols * sizeof(ExprState *));
    3483             : 
    3484             :                 /* for each grouping set */
    3485        2452 :                 for (i = 0; i < phasedata->numsets; i++)
    3486             :                 {
    3487        1018 :                     int         length = phasedata->gset_lengths[i];
    3488             : 
    3489        1018 :                     if (phasedata->eqfunctions[length - 1] != NULL)
    3490         312 :                         continue;
    3491             : 
    3492         706 :                     phasedata->eqfunctions[length - 1] =
    3493         706 :                         execTuplesMatchPrepare(scanDesc,
    3494             :                                                length,
    3495         706 :                                                aggnode->grpColIdx,
    3496         706 :                                                aggnode->grpOperators,
    3497         706 :                                                aggnode->grpCollations,
    3498             :                                                (PlanState *) aggstate);
    3499             :                 }
    3500             : 
    3501             :                 /* and for all grouped columns, unless already computed */
    3502        1434 :                 if (phasedata->eqfunctions[aggnode->numCols - 1] == NULL)
    3503             :                 {
    3504         972 :                     phasedata->eqfunctions[aggnode->numCols - 1] =
    3505         972 :                         execTuplesMatchPrepare(scanDesc,
    3506             :                                                aggnode->numCols,
    3507         972 :                                                aggnode->grpColIdx,
    3508         972 :                                                aggnode->grpOperators,
    3509         972 :                                                aggnode->grpCollations,
    3510             :                                                (PlanState *) aggstate);
    3511             :                 }
    3512             :             }
    3513             : 
    3514       53258 :             phasedata->aggnode = aggnode;
    3515       53258 :             phasedata->aggstrategy = aggnode->aggstrategy;
    3516       53258 :             phasedata->sortnode = sortnode;
    3517             :         }
    3518             :     }
    3519             : 
    3520             :     /*
    3521             :      * Convert all_grouped_cols to a descending-order list.
    3522             :      */
    3523       58016 :     i = -1;
    3524       65746 :     while ((i = bms_next_member(all_grouped_cols, i)) >= 0)
    3525        7730 :         aggstate->all_grouped_cols = lcons_int(i, aggstate->all_grouped_cols);
    3526             : 
    3527             :     /*
    3528             :      * Set up aggregate-result storage in the output expr context, and also
    3529             :      * allocate my private per-agg working storage
    3530             :      */
    3531       58016 :     econtext = aggstate->ss.ps.ps_ExprContext;
    3532       58016 :     econtext->ecxt_aggvalues = (Datum *) palloc0(sizeof(Datum) * numaggs);
    3533       58016 :     econtext->ecxt_aggnulls = (bool *) palloc0(sizeof(bool) * numaggs);
    3534             : 
    3535       58016 :     peraggs = (AggStatePerAgg) palloc0(sizeof(AggStatePerAggData) * numaggs);
    3536       58016 :     pertransstates = (AggStatePerTrans) palloc0(sizeof(AggStatePerTransData) * numtrans);
    3537             : 
    3538       58016 :     aggstate->peragg = peraggs;
    3539       58016 :     aggstate->pertrans = pertransstates;
    3540             : 
    3541             : 
    3542       58016 :     aggstate->all_pergroups =
    3543       58016 :         (AggStatePerGroup *) palloc0(sizeof(AggStatePerGroup)
    3544       58016 :                                      * (numGroupingSets + numHashes));
    3545       58016 :     pergroups = aggstate->all_pergroups;
    3546             : 
    3547       58016 :     if (node->aggstrategy != AGG_HASHED)
    3548             :     {
    3549      106768 :         for (i = 0; i < numGroupingSets; i++)
    3550             :         {
    3551       53662 :             pergroups[i] = (AggStatePerGroup) palloc0(sizeof(AggStatePerGroupData)
    3552             :                                                       * numaggs);
    3553             :         }
    3554             : 
    3555       53106 :         aggstate->pergroups = pergroups;
    3556       53106 :         pergroups += numGroupingSets;
    3557             :     }
    3558             : 
    3559             :     /*
    3560             :      * Hashing can only appear in the initial phase.
    3561             :      */
    3562       58016 :     if (use_hashing)
    3563             :     {
    3564        5038 :         Plan       *outerplan = outerPlan(node);
    3565        5038 :         uint64      totalGroups = 0;
    3566             :         int         i;
    3567             : 
    3568        5038 :         aggstate->hash_metacxt = AllocSetContextCreate(aggstate->ss.ps.state->es_query_cxt,
    3569             :                                                        "HashAgg meta context",
    3570             :                                                        ALLOCSET_DEFAULT_SIZES);
    3571        5038 :         aggstate->hash_spill_rslot = ExecInitExtraTupleSlot(estate, scanDesc,
    3572             :                                                             &TTSOpsMinimalTuple);
    3573        5038 :         aggstate->hash_spill_wslot = ExecInitExtraTupleSlot(estate, scanDesc,
    3574             :                                                             &TTSOpsVirtual);
    3575             : 
    3576             :         /* this is an array of pointers, not structures */
    3577        5038 :         aggstate->hash_pergroup = pergroups;
    3578             : 
    3579       10076 :         aggstate->hashentrysize = hash_agg_entry_size(aggstate->numtrans,
    3580        5038 :                                                       outerplan->plan_width,
    3581             :                                                       node->transitionSpace);
    3582             : 
    3583             :         /*
    3584             :          * Consider all of the grouping sets together when setting the limits
    3585             :          * and estimating the number of partitions. This can be inaccurate
    3586             :          * when there is more than one grouping set, but should still be
    3587             :          * reasonable.
    3588             :          */
    3589       10344 :         for (i = 0; i < aggstate->num_hashes; i++)
    3590        5306 :             totalGroups += aggstate->perhash[i].aggnode->numGroups;
    3591             : 
    3592        5038 :         hash_agg_set_limits(aggstate->hashentrysize, totalGroups, 0,
    3593             :                             &aggstate->hash_mem_limit,
    3594             :                             &aggstate->hash_ngroups_limit,
    3595             :                             &aggstate->hash_planned_partitions);
    3596        5038 :         find_hash_columns(aggstate);
    3597             : 
    3598             :         /* Skip massive memory allocation if we are just doing EXPLAIN */
    3599        5038 :         if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
    3600        4316 :             build_hash_tables(aggstate);
    3601             : 
    3602        5038 :         aggstate->table_filled = false;
    3603             : 
    3604             :         /* Initialize this to 1, meaning nothing spilled, yet */
    3605        5038 :         aggstate->hash_batches_used = 1;
    3606             :     }
    3607             : 
    3608             :     /*
    3609             :      * Initialize current phase-dependent values to initial phase. The initial
    3610             :      * phase is 1 (first sort pass) for all strategies that use sorting (if
    3611             :      * hashing is being done too, then phase 0 is processed last); but if only
    3612             :      * hashing is being done, then phase 0 is all there is.
    3613             :      */
    3614       58016 :     if (node->aggstrategy == AGG_HASHED)
    3615             :     {
    3616        4910 :         aggstate->current_phase = 0;
    3617        4910 :         initialize_phase(aggstate, 0);
    3618        4910 :         select_current_set(aggstate, 0, true);
    3619             :     }
    3620             :     else
    3621             :     {
    3622       53106 :         aggstate->current_phase = 1;
    3623       53106 :         initialize_phase(aggstate, 1);
    3624       53106 :         select_current_set(aggstate, 0, false);
    3625             :     }
    3626             : 
    3627             :     /*
    3628             :      * Perform lookups of aggregate function info, and initialize the
    3629             :      * unchanging fields of the per-agg and per-trans data.
    3630             :      */
    3631      117492 :     foreach(l, aggstate->aggs)
    3632             :     {
    3633       59480 :         Aggref     *aggref = lfirst(l);
    3634             :         AggStatePerAgg peragg;
    3635             :         AggStatePerTrans pertrans;
    3636             :         Oid         aggTransFnInputTypes[FUNC_MAX_ARGS];
    3637             :         int         numAggTransFnArgs;
    3638             :         int         numDirectArgs;
    3639             :         HeapTuple   aggTuple;
    3640             :         Form_pg_aggregate aggform;
    3641             :         AclResult   aclresult;
    3642             :         Oid         finalfn_oid;
    3643             :         Oid         serialfn_oid,
    3644             :                     deserialfn_oid;
    3645             :         Oid         aggOwner;
    3646             :         Expr       *finalfnexpr;
    3647             :         Oid         aggtranstype;
    3648             : 
    3649             :         /* Planner should have assigned aggregate to correct level */
    3650             :         Assert(aggref->agglevelsup == 0);
    3651             :         /* ... and the split mode should match */
    3652             :         Assert(aggref->aggsplit == aggstate->aggsplit);
    3653             : 
    3654       59480 :         peragg = &peraggs[aggref->aggno];
    3655             : 
    3656             :         /* Check if we initialized the state for this aggregate already. */
    3657       59480 :         if (peragg->aggref != NULL)
    3658         292 :             continue;
    3659             : 
    3660       59188 :         peragg->aggref = aggref;
    3661       59188 :         peragg->transno = aggref->aggtransno;
    3662             : 
    3663             :         /* Fetch the pg_aggregate row */
    3664       59188 :         aggTuple = SearchSysCache1(AGGFNOID,
    3665       59188 :                                    ObjectIdGetDatum(aggref->aggfnoid));
    3666       59188 :         if (!HeapTupleIsValid(aggTuple))
    3667           0 :             elog(ERROR, "cache lookup failed for aggregate %u",
    3668             :                  aggref->aggfnoid);
    3669       59188 :         aggform = (Form_pg_aggregate) GETSTRUCT(aggTuple);
    3670             : 
    3671             :         /* Check permission to call aggregate function */
    3672       59188 :         aclresult = pg_proc_aclcheck(aggref->aggfnoid, GetUserId(),
    3673             :                                      ACL_EXECUTE);
    3674       59188 :         if (aclresult != ACLCHECK_OK)
    3675           4 :             aclcheck_error(aclresult, OBJECT_AGGREGATE,
    3676           4 :                            get_func_name(aggref->aggfnoid));
    3677       59184 :         InvokeFunctionExecuteHook(aggref->aggfnoid);
    3678             : 
    3679             :         /* planner recorded transition state type in the Aggref itself */
    3680       59184 :         aggtranstype = aggref->aggtranstype;
    3681             :         Assert(OidIsValid(aggtranstype));
    3682             : 
    3683             :         /* Final function only required if we're finalizing the aggregates */
    3684       59184 :         if (DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit))
    3685        2694 :             peragg->finalfn_oid = finalfn_oid = InvalidOid;
    3686             :         else
    3687       56490 :             peragg->finalfn_oid = finalfn_oid = aggform->aggfinalfn;
    3688             : 
    3689       59184 :         serialfn_oid = InvalidOid;
    3690       59184 :         deserialfn_oid = InvalidOid;
    3691             : 
    3692             :         /*
    3693             :          * Check if serialization/deserialization is required.  We only do it
    3694             :          * for aggregates that have transtype INTERNAL.
    3695             :          */
    3696       59184 :         if (aggtranstype == INTERNALOID)
    3697             :         {
    3698             :             /*
    3699             :              * The planner should only have generated a serialize agg node if
    3700             :              * every aggregate with an INTERNAL state has a serialization
    3701             :              * function.  Verify that.
    3702             :              */
    3703       40824 :             if (DO_AGGSPLIT_SERIALIZE(aggstate->aggsplit))
    3704             :             {
    3705             :                 /* serialization only valid when not running finalfn */
    3706             :                 Assert(DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
    3707             : 
    3708         160 :                 if (!OidIsValid(aggform->aggserialfn))
    3709           0 :                     elog(ERROR, "serialfunc not provided for serialization aggregation");
    3710         160 :                 serialfn_oid = aggform->aggserialfn;
    3711             :             }
    3712             : 
    3713             :             /* Likewise for deserialization functions */
    3714       40824 :             if (DO_AGGSPLIT_DESERIALIZE(aggstate->aggsplit))
    3715             :             {
    3716             :                 /* deserialization only valid when combining states */
    3717             :                 Assert(DO_AGGSPLIT_COMBINE(aggstate->aggsplit));
    3718             : 
    3719          48 :                 if (!OidIsValid(aggform->aggdeserialfn))
    3720           0 :                     elog(ERROR, "deserialfunc not provided for deserialization aggregation");
    3721          48 :                 deserialfn_oid = aggform->aggdeserialfn;
    3722             :             }
    3723             :         }
    3724             : 
    3725             :         /* Check that aggregate owner has permission to call component fns */
    3726             :         {
    3727             :             HeapTuple   procTuple;
    3728             : 
    3729       59184 :             procTuple = SearchSysCache1(PROCOID,
    3730       59184 :                                         ObjectIdGetDatum(aggref->aggfnoid));
    3731       59184 :             if (!HeapTupleIsValid(procTuple))
    3732           0 :                 elog(ERROR, "cache lookup failed for function %u",
    3733             :                      aggref->aggfnoid);
    3734       59184 :             aggOwner = ((Form_pg_proc) GETSTRUCT(procTuple))->proowner;
    3735       59184 :             ReleaseSysCache(procTuple);
    3736             : 
    3737       59184 :             if (OidIsValid(finalfn_oid))
    3738             :             {
    3739       41840 :                 aclresult = pg_proc_aclcheck(finalfn_oid, aggOwner,
    3740             :                                              ACL_EXECUTE);
    3741       41840 :                 if (aclresult != ACLCHECK_OK)
    3742           0 :                     aclcheck_error(aclresult, OBJECT_FUNCTION,
    3743           0 :                                    get_func_name(finalfn_oid));
    3744       41840 :                 InvokeFunctionExecuteHook(finalfn_oid);
    3745             :             }
    3746       59184 :             if (OidIsValid(serialfn_oid))
    3747             :             {
    3748         160 :                 aclresult = pg_proc_aclcheck(serialfn_oid, aggOwner,
    3749             :                                              ACL_EXECUTE);
    3750         160 :                 if (aclresult != ACLCHECK_OK)
    3751           0 :                     aclcheck_error(aclresult, OBJECT_FUNCTION,
    3752           0 :                                    get_func_name(serialfn_oid));
    3753         160 :                 InvokeFunctionExecuteHook(serialfn_oid);
    3754             :             }
    3755       59184 :             if (OidIsValid(deserialfn_oid))
    3756             :             {
    3757          48 :                 aclresult = pg_proc_aclcheck(deserialfn_oid, aggOwner,
    3758             :                                              ACL_EXECUTE);
    3759          48 :                 if (aclresult != ACLCHECK_OK)
    3760           0 :                     aclcheck_error(aclresult, OBJECT_FUNCTION,
    3761           0 :                                    get_func_name(deserialfn_oid));
    3762          48 :                 InvokeFunctionExecuteHook(deserialfn_oid);
    3763             :             }
    3764             :         }
    3765             : 
    3766             :         /*
    3767             :          * Get actual datatypes of the (nominal) aggregate inputs.  These
    3768             :          * could be different from the agg's declared input types, when the
    3769             :          * agg accepts ANY or a polymorphic type.
    3770             :          */
    3771       59184 :         numAggTransFnArgs = get_aggregate_argtypes(aggref,
    3772             :                                                    aggTransFnInputTypes);
    3773             : 
    3774             :         /* Count the "direct" arguments, if any */
    3775       59184 :         numDirectArgs = list_length(aggref->aggdirectargs);
    3776             : 
    3777             :         /* Detect how many arguments to pass to the finalfn */
    3778       59184 :         if (aggform->aggfinalextra)
    3779       39540 :             peragg->numFinalArgs = numAggTransFnArgs + 1;
    3780             :         else
    3781       19644 :             peragg->numFinalArgs = numDirectArgs + 1;
    3782             : 
    3783             :         /* Initialize any direct-argument expressions */
    3784       59184 :         peragg->aggdirectargs = ExecInitExprList(aggref->aggdirectargs,
    3785             :                                                  (PlanState *) aggstate);
    3786             : 
    3787             :         /*
    3788             :          * build expression trees using actual argument & result types for the
    3789             :          * finalfn, if it exists and is required.
    3790             :          */
    3791       59184 :         if (OidIsValid(finalfn_oid))
    3792             :         {
    3793       41840 :             build_aggregate_finalfn_expr(aggTransFnInputTypes,
    3794             :                                          peragg->numFinalArgs,
    3795             :                                          aggtranstype,
    3796             :                                          aggref->aggtype,
    3797             :                                          aggref->inputcollid,
    3798             :                                          finalfn_oid,
    3799             :                                          &finalfnexpr);
    3800       41840 :             fmgr_info(finalfn_oid, &peragg->finalfn);
    3801       41840 :             fmgr_info_set_expr((Node *) finalfnexpr, &peragg->finalfn);
    3802             :         }
    3803             : 
    3804             :         /* get info about the output value's datatype */
    3805       59184 :         get_typlenbyval(aggref->aggtype,
    3806             :                         &peragg->resulttypeLen,
    3807             :                         &peragg->resulttypeByVal);
    3808             : 
    3809             :         /*
    3810             :          * Build working state for invoking the transition function, if we
    3811             :          * haven't done it already.
    3812             :          */
    3813       59184 :         pertrans = &pertransstates[aggref->aggtransno];
    3814       59184 :         if (pertrans->aggref == NULL)
    3815             :         {
    3816             :             Datum       textInitVal;
    3817             :             Datum       initValue;
    3818             :             bool        initValueIsNull;
    3819             :             Oid         transfn_oid;
    3820             : 
    3821             :             /*
    3822             :              * If this aggregation is performing state combines, then instead
    3823             :              * of using the transition function, we'll use the combine
    3824             :              * function.
    3825             :              */
    3826       59012 :             if (DO_AGGSPLIT_COMBINE(aggstate->aggsplit))
    3827             :             {
    3828         840 :                 transfn_oid = aggform->aggcombinefn;
    3829             : 
    3830             :                 /* If not set then the planner messed up */
    3831         840 :                 if (!OidIsValid(transfn_oid))
    3832           0 :                     elog(ERROR, "combinefn not set for aggregate function");
    3833             :             }
    3834             :             else
    3835       58172 :                 transfn_oid = aggform->aggtransfn;
    3836             : 
    3837       59012 :             aclresult = pg_proc_aclcheck(transfn_oid, aggOwner, ACL_EXECUTE);
    3838       59012 :             if (aclresult != ACLCHECK_OK)
    3839           0 :                 aclcheck_error(aclresult, OBJECT_FUNCTION,
    3840           0 :                                get_func_name(transfn_oid));
    3841       59012 :             InvokeFunctionExecuteHook(transfn_oid);
    3842             : 
    3843             :             /*
    3844             :              * initval is potentially null, so don't try to access it as a
    3845             :              * struct field. Must do it the hard way with SysCacheGetAttr.
    3846             :              */
    3847       59012 :             textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
    3848             :                                           Anum_pg_aggregate_agginitval,
    3849             :                                           &initValueIsNull);
    3850       59012 :             if (initValueIsNull)
    3851       46596 :                 initValue = (Datum) 0;
    3852             :             else
    3853       12416 :                 initValue = GetAggInitVal(textInitVal, aggtranstype);
    3854             : 
    3855       59012 :             if (DO_AGGSPLIT_COMBINE(aggstate->aggsplit))
    3856             :             {
    3857         840 :                 Oid         combineFnInputTypes[] = {aggtranstype,
    3858             :                 aggtranstype};
    3859             : 
    3860             :                 /*
    3861             :                  * When combining there's only one input, the to-be-combined
    3862             :                  * transition value.  The transition value is not counted
    3863             :                  * here.
    3864             :                  */
    3865         840 :                 pertrans->numTransInputs = 1;
    3866             : 
    3867             :                 /* aggcombinefn always has two arguments of aggtranstype */
    3868         840 :                 build_pertrans_for_aggref(pertrans, aggstate, estate,
    3869             :                                           aggref, transfn_oid, aggtranstype,
    3870             :                                           serialfn_oid, deserialfn_oid,
    3871             :                                           initValue, initValueIsNull,
    3872             :                                           combineFnInputTypes, 2);
    3873             : 
    3874             :                 /*
    3875             :                  * Ensure that a combine function to combine INTERNAL states
    3876             :                  * is not strict. This should have been checked during CREATE
    3877             :                  * AGGREGATE, but the strict property could have been changed
    3878             :                  * since then.
    3879             :                  */
    3880         840 :                 if (pertrans->transfn.fn_strict && aggtranstype == INTERNALOID)
    3881           0 :                     ereport(ERROR,
    3882             :                             (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
    3883             :                              errmsg("combine function with transition type %s must not be declared STRICT",
    3884             :                                     format_type_be(aggtranstype))));
    3885             :             }
    3886             :             else
    3887             :             {
    3888             :                 /* Detect how many arguments to pass to the transfn */
    3889       58172 :                 if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
    3890         170 :                     pertrans->numTransInputs = list_length(aggref->args);
    3891             :                 else
    3892       58002 :                     pertrans->numTransInputs = numAggTransFnArgs;
    3893             : 
    3894       58172 :                 build_pertrans_for_aggref(pertrans, aggstate, estate,
    3895             :                                           aggref, transfn_oid, aggtranstype,
    3896             :                                           serialfn_oid, deserialfn_oid,
    3897             :                                           initValue, initValueIsNull,
    3898             :                                           aggTransFnInputTypes,
    3899             :                                           numAggTransFnArgs);
    3900             : 
    3901             :                 /*
    3902             :                  * If the transfn is strict and the initval is NULL, make sure
    3903             :                  * input type and transtype are the same (or at least
    3904             :                  * binary-compatible), so that it's OK to use the first
    3905             :                  * aggregated input value as the initial transValue.  This
    3906             :                  * should have been checked at agg definition time, but we
    3907             :                  * must check again in case the transfn's strictness property
    3908             :                  * has been changed.
    3909             :                  */
    3910       58172 :                 if (pertrans->transfn.fn_strict && pertrans->initValueIsNull)
    3911             :                 {
    3912        3598 :                     if (numAggTransFnArgs <= numDirectArgs ||
    3913        3598 :                         !IsBinaryCoercible(aggTransFnInputTypes[numDirectArgs],
    3914             :                                            aggtranstype))
    3915           0 :                         ereport(ERROR,
    3916             :                                 (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
    3917             :                                  errmsg("aggregate %u needs to have compatible input type and transition type",
    3918             :                                         aggref->aggfnoid)));
    3919             :                 }
    3920             :             }
    3921             :         }
    3922             :         else
    3923         172 :             pertrans->aggshared = true;
    3924       59184 :         ReleaseSysCache(aggTuple);
    3925             :     }
    3926             : 
    3927             :     /*
    3928             :      * Update aggstate->numaggs to be the number of unique aggregates found.
    3929             :      * Also set numstates to the number of unique transition states found.
    3930             :      */
    3931       58012 :     aggstate->numaggs = numaggs;
    3932       58012 :     aggstate->numtrans = numtrans;
    3933             : 
    3934             :     /*
    3935             :      * Last, check whether any more aggregates got added onto the node while
    3936             :      * we processed the expressions for the aggregate arguments (including not
    3937             :      * only the regular arguments and FILTER expressions handled immediately
    3938             :      * above, but any direct arguments we might've handled earlier).  If so,
    3939             :      * we have nested aggregate functions, which is semantically nonsensical,
    3940             :      * so complain.  (This should have been caught by the parser, so we don't
    3941             :      * need to work hard on a helpful error message; but we defend against it
    3942             :      * here anyway, just to be sure.)
    3943             :      */
    3944       58012 :     if (numaggrefs != list_length(aggstate->aggs))
    3945           0 :         ereport(ERROR,
    3946             :                 (errcode(ERRCODE_GROUPING_ERROR),
    3947             :                  errmsg("aggregate function calls cannot be nested")));
    3948             : 
    3949             :     /*
    3950             :      * Build expressions doing all the transition work at once. We build a
    3951             :      * different one for each phase, as the number of transition function
    3952             :      * invocation can differ between phases. Note this'll work both for
    3953             :      * transition and combination functions (although there'll only be one
    3954             :      * phase in the latter case).
    3955             :      */
    3956      169278 :     for (phaseidx = 0; phaseidx < aggstate->numphases; phaseidx++)
    3957             :     {
    3958      111266 :         AggStatePerPhase phase = &aggstate->phases[phaseidx];
    3959      111266 :         bool        dohash = false;
    3960      111266 :         bool        dosort = false;
    3961             : 
    3962             :         /* phase 0 doesn't necessarily exist */
    3963      111266 :         if (!phase->aggnode)
    3964       52974 :             continue;
    3965             : 
    3966       58292 :         if (aggstate->aggstrategy == AGG_MIXED && phaseidx == 1)
    3967             :         {
    3968             :             /*
    3969             :              * Phase one, and only phase one, in a mixed agg performs both
    3970             :              * sorting and aggregation.
    3971             :              */
    3972         128 :             dohash = true;
    3973         128 :             dosort = true;
    3974             :         }
    3975       58164 :         else if (aggstate->aggstrategy == AGG_MIXED && phaseidx == 0)
    3976             :         {
    3977             :             /*
    3978             :              * No need to compute a transition function for an AGG_MIXED phase
    3979             :              * 0 - the contents of the hashtables will have been computed
    3980             :              * during phase 1.
    3981             :              */
    3982         128 :             continue;
    3983             :         }
    3984       58036 :         else if (phase->aggstrategy == AGG_PLAIN ||
    3985        6302 :                  phase->aggstrategy == AGG_SORTED)
    3986             :         {
    3987       53126 :             dohash = false;
    3988       53126 :             dosort = true;
    3989             :         }
    3990        4910 :         else if (phase->aggstrategy == AGG_HASHED)
    3991             :         {
    3992        4910 :             dohash = true;
    3993        4910 :             dosort = false;
    3994             :         }
    3995             :         else
    3996             :             Assert(false);
    3997             : 
    3998       58164 :         phase->evaltrans = ExecBuildAggTrans(aggstate, phase, dosort, dohash,
    3999             :                                              false);
    4000             : 
    4001             :         /* cache compiled expression for outer slot without NULL check */
    4002       58164 :         phase->evaltrans_cache[0][0] = phase->evaltrans;
    4003             :     }
    4004             : 
    4005       58012 :     return aggstate;
    4006             : }
    4007             : 
    4008             : /*
    4009             :  * Build the state needed to calculate a state value for an aggregate.
    4010             :  *
    4011             :  * This initializes all the fields in 'pertrans'. 'aggref' is the aggregate
    4012             :  * to initialize the state for. 'transfn_oid', 'aggtranstype', and the rest
    4013             :  * of the arguments could be calculated from 'aggref', but the caller has
    4014             :  * calculated them already, so might as well pass them.
    4015             :  *
    4016             :  * 'transfn_oid' may be either the Oid of the aggtransfn or the aggcombinefn.
    4017             :  */
    4018             : static void
    4019       59012 : build_pertrans_for_aggref(AggStatePerTrans pertrans,
    4020             :                           AggState *aggstate, EState *estate,
    4021             :                           Aggref *aggref,
    4022             :                           Oid transfn_oid, Oid aggtranstype,
    4023             :                           Oid aggserialfn, Oid aggdeserialfn,
    4024             :                           Datum initValue, bool initValueIsNull,
    4025             :                           Oid *inputTypes, int numArguments)
    4026             : {
    4027       59012 :     int         numGroupingSets = Max(aggstate->maxsets, 1);
    4028             :     Expr       *transfnexpr;
    4029             :     int         numTransArgs;
    4030       59012 :     Expr       *serialfnexpr = NULL;
    4031       59012 :     Expr       *deserialfnexpr = NULL;
    4032             :     ListCell   *lc;
    4033             :     int         numInputs;
    4034             :     int         numDirectArgs;
    4035             :     List       *sortlist;
    4036             :     int         numSortCols;
    4037             :     int         numDistinctCols;
    4038             :     int         i;
    4039             : 
    4040             :     /* Begin filling in the pertrans data */
    4041       59012 :     pertrans->aggref = aggref;
    4042       59012 :     pertrans->aggshared = false;
    4043       59012 :     pertrans->aggCollation = aggref->inputcollid;
    4044       59012 :     pertrans->transfn_oid = transfn_oid;
    4045       59012 :     pertrans->serialfn_oid = aggserialfn;
    4046       59012 :     pertrans->deserialfn_oid = aggdeserialfn;
    4047       59012 :     pertrans->initValue = initValue;
    4048       59012 :     pertrans->initValueIsNull = initValueIsNull;
    4049             : 
    4050             :     /* Count the "direct" arguments, if any */
    4051       59012 :     numDirectArgs = list_length(aggref->aggdirectargs);
    4052             : 
    4053             :     /* Count the number of aggregated input columns */
    4054       59012 :     pertrans->numInputs = numInputs = list_length(aggref->args);
    4055             : 
    4056       59012 :     pertrans->aggtranstype = aggtranstype;
    4057             : 
    4058             :     /* account for the current transition state */
    4059       59012 :     numTransArgs = pertrans->numTransInputs + 1;
    4060             : 
    4061             :     /*
    4062             :      * Set up infrastructure for calling the transfn.  Note that invtrans is
    4063             :      * not needed here.
    4064             :      */
    4065       59012 :     build_aggregate_transfn_expr(inputTypes,
    4066             :                                  numArguments,
    4067             :                                  numDirectArgs,
    4068       59012 :                                  aggref->aggvariadic,
    4069             :                                  aggtranstype,
    4070             :                                  aggref->inputcollid,
    4071             :                                  transfn_oid,
    4072             :                                  InvalidOid,
    4073             :                                  &transfnexpr,
    4074             :                                  NULL);
    4075             : 
    4076       59012 :     fmgr_info(transfn_oid, &pertrans->transfn);
    4077       59012 :     fmgr_info_set_expr((Node *) transfnexpr, &pertrans->transfn);
    4078             : 
    4079       59012 :     pertrans->transfn_fcinfo =
    4080       59012 :         (FunctionCallInfo) palloc(SizeForFunctionCallInfo(numTransArgs));
    4081       59012 :     InitFunctionCallInfoData(*pertrans->transfn_fcinfo,
    4082             :                              &pertrans->transfn,
    4083             :                              numTransArgs,
    4084             :                              pertrans->aggCollation,
    4085             :                              (void *) aggstate, NULL);
    4086             : 
    4087             :     /* get info about the state value's datatype */
    4088       59012 :     get_typlenbyval(aggtranstype,
    4089             :                     &pertrans->transtypeLen,
    4090             :                     &pertrans->transtypeByVal);
    4091             : 
    4092       59012 :     if (OidIsValid(aggserialfn))
    4093             :     {
    4094         160 :         build_aggregate_serialfn_expr(aggserialfn,
    4095             :                                       &serialfnexpr);
    4096         160 :         fmgr_info(aggserialfn, &pertrans->serialfn);
    4097         160 :         fmgr_info_set_expr((Node *) serialfnexpr, &pertrans->serialfn);
    4098             : 
    4099         160 :         pertrans->serialfn_fcinfo =
    4100         160 :             (FunctionCallInfo) palloc(SizeForFunctionCallInfo(1));
    4101         160 :         InitFunctionCallInfoData(*pertrans->serialfn_fcinfo,
    4102             :                                  &pertrans->serialfn,
    4103             :                                  1,
    4104             :                                  InvalidOid,
    4105             :                                  (void *) aggstate, NULL);
    4106             :     }
    4107             : 
    4108       59012 :     if (OidIsValid(aggdeserialfn))
    4109             :     {
    4110          48 :         build_aggregate_deserialfn_expr(aggdeserialfn,
    4111             :                                         &deserialfnexpr);
    4112          48 :         fmgr_info(aggdeserialfn, &pertrans->deserialfn);
    4113          48 :         fmgr_info_set_expr((Node *) deserialfnexpr, &pertrans->deserialfn);
    4114             : 
    4115          48 :         pertrans->deserialfn_fcinfo =
    4116          48 :             (FunctionCallInfo) palloc(SizeForFunctionCallInfo(2));
    4117          48 :         InitFunctionCallInfoData(*pertrans->deserialfn_fcinfo,
    4118             :                                  &pertrans->deserialfn,
    4119             :                                  2,
    4120             :                                  InvalidOid,
    4121             :                                  (void *) aggstate, NULL);
    4122             : 
    4123             :     }
    4124             : 
    4125             :     /*
    4126             :      * If we're doing either DISTINCT or ORDER BY for a plain agg, then we
    4127             :      * have a list of SortGroupClause nodes; fish out the data in them and
    4128             :      * stick them into arrays.  We ignore ORDER BY for an ordered-set agg,
    4129             :      * however; the agg's transfn and finalfn are responsible for that.
    4130             :      *
    4131             :      * Note that by construction, if there is a DISTINCT clause then the ORDER
    4132             :      * BY clause is a prefix of it (see transformDistinctClause).
    4133             :      */
    4134       59012 :     if (AGGKIND_IS_ORDERED_SET(aggref->aggkind))
    4135             :     {
    4136         170 :         sortlist = NIL;
    4137         170 :         numSortCols = numDistinctCols = 0;
    4138             :     }
    4139       58842 :     else if (aggref->aggdistinct)
    4140             :     {
    4141         290 :         sortlist = aggref->aggdistinct;
    4142         290 :         numSortCols = numDistinctCols = list_length(sortlist);
    4143             :         Assert(numSortCols >= list_length(aggref->aggorder));
    4144             :     }
    4145             :     else
    4146             :     {
    4147       58552 :         sortlist = aggref->aggorder;
    4148       58552 :         numSortCols = list_length(sortlist);
    4149       58552 :         numDistinctCols = 0;
    4150             :     }
    4151             : 
    4152       59012 :     pertrans->numSortCols = numSortCols;
    4153       59012 :     pertrans->numDistinctCols = numDistinctCols;
    4154             : 
    4155             :     /*
    4156             :      * If we have either sorting or filtering to do, create a tupledesc and
    4157             :      * slot corresponding to the aggregated inputs (including sort
    4158             :      * expressions) of the agg.
    4159             :      */
    4160       59012 :     if (numSortCols > 0 || aggref->aggfilter)
    4161             :     {
    4162       32416 :         pertrans->sortdesc = ExecTypeFromTL(aggref->args);
    4163       32416 :         pertrans->sortslot =
    4164       32416 :             ExecInitExtraTupleSlot(estate, pertrans->sortdesc,
    4165             :                                    &TTSOpsMinimalTuple);
    4166             :     }
    4167             : 
    4168       59012 :     if (numSortCols > 0)
    4169             :     {
    4170             :         /*
    4171             :          * We don't implement DISTINCT or ORDER BY aggs in the HASHED case
    4172             :          * (yet)
    4173             :          */
    4174             :         Assert(aggstate->aggstrategy != AGG_HASHED && aggstate->aggstrategy != AGG_MIXED);
    4175             : 
    4176             :         /* ORDER BY aggregates are not supported with partial aggregation */
    4177             :         Assert(!DO_AGGSPLIT_COMBINE(aggstate->aggsplit));
    4178             : 
    4179             :         /* If we have only one input, we need its len/byval info. */
    4180       31916 :         if (numInputs == 1)
    4181             :         {
    4182       12348 :             get_typlenbyval(inputTypes[numDirectArgs],
    4183             :                             &pertrans->inputtypeLen,
    4184             :                             &pertrans->inputtypeByVal);
    4185             :         }
    4186       19568 :         else if (numDistinctCols > 0)
    4187             :         {
    4188             :             /* we will need an extra slot to store prior values */
    4189          56 :             pertrans->uniqslot =
    4190          56 :                 ExecInitExtraTupleSlot(estate, pertrans->sortdesc,
    4191             :                                        &TTSOpsMinimalTuple);
    4192             :         }
    4193             : 
    4194             :         /* Extract the sort information for use later */
    4195       31916 :         pertrans->sortColIdx =
    4196       31916 :             (AttrNumber *) palloc(numSortCols * sizeof(AttrNumber));
    4197       31916 :         pertrans->sortOperators =
    4198       31916 :             (Oid *) palloc(numSortCols * sizeof(Oid));
    4199       31916 :         pertrans->sortCollations =
    4200       31916 :             (Oid *) palloc(numSortCols * sizeof(Oid));
    4201       31916 :         pertrans->sortNullsFirst =
    4202       31916 :             (bool *) palloc(numSortCols * sizeof(bool));
    4203             : 
    4204       31916 :         i = 0;
    4205       63956 :         foreach(lc, sortlist)
    4206             :         {
    4207       32040 :             SortGroupClause *sortcl = (SortGroupClause *) lfirst(lc);
    4208       32040 :             TargetEntry *tle = get_sortgroupclause_tle(sortcl, aggref->args);
    4209             : 
    4210             :             /* the parser should have made sure of this */
    4211             :             Assert(OidIsValid(sortcl->sortop));
    4212             : 
    4213       32040 :             pertrans->sortColIdx[i] = tle->resno;
    4214       32040 :             pertrans->sortOperators[i] = sortcl->sortop;
    4215       32040 :             pertrans->sortCollations[i] = exprCollation((Node *) tle->expr);
    4216       32040 :             pertrans->sortNullsFirst[i] = sortcl->nulls_first;
    4217       32040 :             i++;
    4218             :         }
    4219             :         Assert(i == numSortCols);
    4220             :     }
    4221             : 
    4222       59012 :     if (aggref->aggdistinct)
    4223             :     {
    4224             :         Oid        *ops;
    4225             : 
    4226             :         Assert(numArguments > 0);
    4227             :         Assert(list_length(aggref->aggdistinct) == numDistinctCols);
    4228             : 
    4229         290 :         ops = palloc(numDistinctCols * sizeof(Oid));
    4230             : 
    4231         290 :         i = 0;
    4232         684 :         foreach(lc, aggref->aggdistinct)
    4233         394 :             ops[i++] = ((SortGroupClause *) lfirst(lc))->eqop;
    4234             : 
    4235             :         /* lookup / build the necessary comparators */
    4236         290 :         if (numDistinctCols == 1)
    4237         234 :             fmgr_info(get_opcode(ops[0]), &pertrans->equalfnOne);
    4238             :         else
    4239          56 :             pertrans->equalfnMulti =
    4240          56 :                 execTuplesMatchPrepare(pertrans->sortdesc,
    4241             :                                        numDistinctCols,
    4242          56 :                                        pertrans->sortColIdx,
    4243             :                                        ops,
    4244          56 :                                        pertrans->sortCollations,
    4245             :                                        &aggstate->ss.ps);
    4246         290 :         pfree(ops);
    4247             :     }
    4248             : 
    4249       59012 :     pertrans->sortstates = (Tuplesortstate **)
    4250       59012 :         palloc0(sizeof(Tuplesortstate *) * numGroupingSets);
    4251       59012 : }
    4252             : 
    4253             : 
    4254             : static Datum
    4255       12416 : GetAggInitVal(Datum textInitVal, Oid transtype)
    4256             : {
    4257             :     Oid         typinput,
    4258             :                 typioparam;
    4259             :     char       *strInitVal;
    4260             :     Datum       initVal;
    4261             : 
    4262       12416 :     getTypeInputInfo(transtype, &typinput, &typioparam);
    4263       12416 :     strInitVal = TextDatumGetCString(textInitVal);
    4264       12416 :     initVal = OidInputFunctionCall(typinput, strInitVal,
    4265             :                                    typioparam, -1);
    4266       12416 :     pfree(strInitVal);
    4267       12416 :     return initVal;
    4268             : }
    4269             : 
    4270             : void
    4271       57960 : ExecEndAgg(AggState *node)
    4272             : {
    4273             :     PlanState  *outerPlan;
    4274             :     int         transno;
    4275       57960 :     int         numGroupingSets = Max(node->maxsets, 1);
    4276             :     int         setno;
    4277             : 
    4278             :     /*
    4279             :      * When ending a parallel worker, copy the statistics gathered by the
    4280             :      * worker back into shared memory so that it can be picked up by the main
    4281             :      * process to report in EXPLAIN ANALYZE.
    4282             :      */
    4283       57960 :     if (node->shared_info && IsParallelWorker())
    4284             :     {
    4285             :         AggregateInstrumentation *si;
    4286             : 
    4287             :         Assert(ParallelWorkerNumber <= node->shared_info->num_workers);
    4288         116 :         si = &node->shared_info->sinstrument[ParallelWorkerNumber];
    4289         116 :         si->hash_batches_used = node->hash_batches_used;
    4290         116 :         si->hash_disk_used = node->hash_disk_used;
    4291         116 :         si->hash_mem_peak = node->hash_mem_peak;
    4292             :     }
    4293             : 
    4294             :     /* Make sure we have closed any open tuplesorts */
    4295             : 
    4296       57960 :     if (node->sort_in)
    4297          96 :         tuplesort_end(node->sort_in);
    4298       57960 :     if (node->sort_out)
    4299          28 :         tuplesort_end(node->sort_out);
    4300             : 
    4301       57960 :     hashagg_reset_spill_state(node);
    4302             : 
    4303       57960 :     if (node->hash_metacxt != NULL)
    4304             :     {
    4305        5032 :         MemoryContextDelete(node->hash_metacxt);
    4306        5032 :         node->hash_metacxt = NULL;
    4307             :     }
    4308             : 
    4309      116916 :     for (transno = 0; transno < node->numtrans; transno++)
    4310             :     {
    4311       58956 :         AggStatePerTrans pertrans = &node->pertrans[transno];
    4312             : 
    4313      118580 :         for (setno = 0; setno < numGroupingSets; setno++)
    4314             :         {
    4315       59624 :             if (pertrans->sortstates[setno])
    4316           0 :                 tuplesort_end(pertrans->sortstates[setno]);
    4317             :         }
    4318             :     }
    4319             : 
    4320             :     /* And ensure any agg shutdown callbacks have been called */
    4321      116476 :     for (setno = 0; setno < numGroupingSets; setno++)
    4322       58516 :         ReScanExprContext(node->aggcontexts[setno]);
    4323       57960 :     if (node->hashcontext)
    4324        5032 :         ReScanExprContext(node->hashcontext);
    4325             : 
    4326             :     /*
    4327             :      * We don't actually free any ExprContexts here (see comment in
    4328             :      * ExecFreeExprContext), just unlinking the output one from the plan node
    4329             :      * suffices.
    4330             :      */
    4331       57960 :     ExecFreeExprContext(&node->ss.ps);
    4332             : 
    4333             :     /* clean up tuple table */
    4334       57960 :     ExecClearTuple(node->ss.ss_ScanTupleSlot);
    4335             : 
    4336       57960 :     outerPlan = outerPlanState(node);
    4337       57960 :     ExecEndNode(outerPlan);
    4338       57960 : }
    4339             : 
    4340             : void
    4341     1532594 : ExecReScanAgg(AggState *node)
    4342             : {
    4343     1532594 :     ExprContext *econtext = node->ss.ps.ps_ExprContext;
    4344     1532594 :     PlanState  *outerPlan = outerPlanState(node);
    4345     1532594 :     Agg        *aggnode = (Agg *) node->ss.ps.plan;
    4346             :     int         transno;
    4347     1532594 :     int         numGroupingSets = Max(node->maxsets, 1);
    4348             :     int         setno;
    4349             : 
    4350     1532594 :     node->agg_done = false;
    4351             : 
    4352     1532594 :     if (node->aggstrategy == AGG_HASHED)
    4353             :     {
    4354             :         /*
    4355             :          * In the hashed case, if we haven't yet built the hash table then we
    4356             :          * can just return; nothing done yet, so nothing to undo. If subnode's
    4357             :          * chgParam is not NULL then it will be re-scanned by ExecProcNode,
    4358             :          * else no reason to re-scan it at all.
    4359             :          */
    4360       59896 :         if (!node->table_filled)
    4361         504 :             return;
    4362             : 
    4363             :         /*
    4364             :          * If we do have the hash table, and it never spilled, and the subplan
    4365             :          * does not have any parameter changes, and none of our own parameter
    4366             :          * changes affect input expressions of the aggregated functions, then
    4367             :          * we can just rescan the existing hash table; no need to build it
    4368             :          * again.
    4369             :          */
    4370       59392 :         if (outerPlan->chgParam == NULL && !node->hash_ever_spilled &&
    4371         572 :             !bms_overlap(node->ss.ps.chgParam, aggnode->aggParams))
    4372             :         {
    4373         556 :             ResetTupleHashIterator(node->perhash[0].hashtable,
    4374             :                                    &node->perhash[0].hashiter);
    4375         556 :             select_current_set(node, 0, true);
    4376         556 :             return;
    4377             :         }
    4378             :     }
    4379             : 
    4380             :     /* Make sure we have closed any open tuplesorts */
    4381     3004280 :     for (transno = 0; transno < node->numtrans; transno++)
    4382             :     {
    4383     2945516 :         for (setno = 0; setno < numGroupingSets; setno++)
    4384             :         {
    4385     1472770 :             AggStatePerTrans pertrans = &node->pertrans[transno];
    4386             : 
    4387     1472770 :             if (pertrans->sortstates[setno])
    4388             :             {
    4389           0 :                 tuplesort_end(pertrans->sortstates[setno]);
    4390           0 :                 pertrans->sortstates[setno] = NULL;
    4391             :             }
    4392             :         }
    4393             :     }
    4394             : 
    4395             :     /*
    4396             :      * We don't need to ReScanExprContext the output tuple context here;
    4397             :      * ExecReScan already did it. But we do need to reset our per-grouping-set
    4398             :      * contexts, which may have transvalues stored in them. (We use rescan
    4399             :      * rather than just reset because transfns may have registered callbacks
    4400             :      * that need to be run now.) For the AGG_HASHED case, see below.
    4401             :      */
    4402             : 
    4403     3063092 :     for (setno = 0; setno < numGroupingSets; setno++)
    4404             :     {
    4405     1531558 :         ReScanExprContext(node->aggcontexts[setno]);
    4406             :     }
    4407             : 
    4408             :     /* Release first tuple of group, if we have made a copy */
    4409     1531534 :     if (node->grp_firstTuple != NULL)
    4410             :     {
    4411           0 :         heap_freetuple(node->grp_firstTuple);
    4412           0 :         node->grp_firstTuple = NULL;
    4413             :     }
    4414     1531534 :     ExecClearTuple(node->ss.ss_ScanTupleSlot);
    4415             : 
    4416             :     /* Forget current agg values */
    4417     3004280 :     MemSet(econtext->ecxt_aggvalues, 0, sizeof(Datum) * node->numaggs);
    4418     1531534 :     MemSet(econtext->ecxt_aggnulls, 0, sizeof(bool) * node->numaggs);
    4419             : 
    4420             :     /*
    4421             :      * With AGG_HASHED/MIXED, the hash table is allocated in a sub-context of
    4422             :      * the hashcontext. This used to be an issue, but now, resetting a context
    4423             :      * automatically deletes sub-contexts too.
    4424             :      */
    4425     1531534 :     if (node->aggstrategy == AGG_HASHED || node->aggstrategy == AGG_MIXED)
    4426             :     {
    4427       58856 :         hashagg_reset_spill_state(node);
    4428             : 
    4429       58856 :         node->hash_ever_spilled = false;
    4430       58856 :         node->hash_spill_mode = false;
    4431       58856 :         node->hash_ngroups_current = 0;
    4432             : 
    4433       58856 :         ReScanExprContext(node->hashcontext);
    4434             :         /* Rebuild an empty hash table */
    4435       58856 :         build_hash_tables(node);
    4436       58856 :         node->table_filled = false;
    4437             :         /* iterator will be reset when the table is filled */
    4438             : 
    4439       58856 :         hashagg_recompile_expressions(node, false, false);
    4440             :     }
    4441             : 
    4442     1531534 :     if (node->aggstrategy != AGG_HASHED)
    4443             :     {
    4444             :         /*
    4445             :          * Reset the per-group state (in particular, mark transvalues null)
    4446             :          */
    4447     2945420 :         for (setno = 0; setno < numGroupingSets; setno++)
    4448             :         {
    4449     4418198 :             MemSet(node->pergroups[setno], 0,
    4450             :                    sizeof(AggStatePerGroupData) * node->numaggs);
    4451             :         }
    4452             : 
    4453             :         /* reset to phase 1 */
    4454     1472698 :         initialize_phase(node, 1);
    4455             : 
    4456     1472698 :         node->input_done = false;
    4457     1472698 :         node->projected_set = -1;
    4458             :     }
    4459             : 
    4460     1531534 :     if (outerPlan->chgParam == NULL)
    4461         102 :         ExecReScan(outerPlan);
    4462             : }
    4463             : 
    4464             : 
    4465             : /***********************************************************************
    4466             :  * API exposed to aggregate functions
    4467             :  ***********************************************************************/
    4468             : 
    4469             : 
    4470             : /*
    4471             :  * AggCheckCallContext - test if a SQL function is being called as an aggregate
    4472             :  *
    4473             :  * The transition and/or final functions of an aggregate may want to verify
    4474             :  * that they are being called as aggregates, rather than as plain SQL
    4475             :  * functions.  They should use this function to do so.  The return value
    4476             :  * is nonzero if being called as an aggregate, or zero if not.  (Specific
    4477             :  * nonzero values are AGG_CONTEXT_AGGREGATE or AGG_CONTEXT_WINDOW, but more
    4478             :  * values could conceivably appear in future.)
    4479             :  *
    4480             :  * If aggcontext isn't NULL, the function also stores at *aggcontext the
    4481             :  * identity of the memory context that aggregate transition values are being
    4482             :  * stored in.  Note that the same aggregate call site (flinfo) may be called
    4483             :  * interleaved on different transition values in different contexts, so it's
    4484             :  * not kosher to cache aggcontext under fn_extra.  It is, however, kosher to
    4485             :  * cache it in the transvalue itself (for internal-type transvalues).
    4486             :  */
    4487             : int
    4488     3163846 : AggCheckCallContext(FunctionCallInfo fcinfo, MemoryContext *aggcontext)
    4489             : {
    4490     3163846 :     if (fcinfo->context && IsA(fcinfo->context, AggState))
    4491             :     {
    4492     3114490 :         if (aggcontext)
    4493             :         {
    4494     1375510 :             AggState   *aggstate = ((AggState *) fcinfo->context);
    4495     1375510 :             ExprContext *cxt = aggstate->curaggcontext;
    4496             : 
    4497     1375510 :             *aggcontext = cxt->ecxt_per_tuple_memory;
    4498             :         }
    4499     3114490 :         return AGG_CONTEXT_AGGREGATE;
    4500             :     }
    4501       49356 :     if (fcinfo->context && IsA(fcinfo->context, WindowAggState))
    4502             :     {
    4503       48096 :         if (aggcontext)
    4504         400 :             *aggcontext = ((WindowAggState *) fcinfo->context)->curaggcontext;
    4505       48096 :         return AGG_CONTEXT_WINDOW;
    4506             :     }
    4507             : 
    4508             :     /* this is just to prevent "uninitialized variable" warnings */
    4509        1260 :     if (aggcontext)
    4510        1228 :         *aggcontext = NULL;
    4511        1260 :     return 0;
    4512             : }
    4513             : 
    4514             : /*
    4515             :  * AggGetAggref - allow an aggregate support function to get its Aggref
    4516             :  *
    4517             :  * If the function is being called as an aggregate support function,
    4518             :  * return the Aggref node for the aggregate call.  Otherwise, return NULL.
    4519             :  *
    4520             :  * Aggregates sharing the same inputs and transition functions can get
    4521             :  * merged into a single transition calculation.  If the transition function
    4522             :  * calls AggGetAggref, it will get some one of the Aggrefs for which it is
    4523             :  * executing.  It must therefore not pay attention to the Aggref fields that
    4524             :  * relate to the final function, as those are indeterminate.  But if a final
    4525             :  * function calls AggGetAggref, it will get a precise result.
    4526             :  *
    4527             :  * Note that if an aggregate is being used as a window function, this will
    4528             :  * return NULL.  We could provide a similar function to return the relevant
    4529             :  * WindowFunc node in such cases, but it's not needed yet.
    4530             :  */
    4531             : Aggref *
    4532         166 : AggGetAggref(FunctionCallInfo fcinfo)
    4533             : {
    4534         166 :     if (fcinfo->context && IsA(fcinfo->context, AggState))
    4535             :     {
    4536         166 :         AggState   *aggstate = (AggState *) fcinfo->context;
    4537             :         AggStatePerAgg curperagg;
    4538             :         AggStatePerTrans curpertrans;
    4539             : 
    4540             :         /* check curperagg (valid when in a final function) */
    4541         166 :         curperagg = aggstate->curperagg;
    4542             : 
    4543         166 :         if (curperagg)
    4544           0 :             return curperagg->aggref;
    4545             : 
    4546             :         /* check curpertrans (valid when in a transition function) */
    4547         166 :         curpertrans = aggstate->curpertrans;
    4548             : 
    4549         166 :         if (curpertrans)
    4550         166 :             return curpertrans->aggref;
    4551             :     }
    4552           0 :     return NULL;
    4553             : }
    4554             : 
    4555             : /*
    4556             :  * AggGetTempMemoryContext - fetch short-term memory context for aggregates
    4557             :  *
    4558             :  * This is useful in agg final functions; the context returned is one that
    4559             :  * the final function can safely reset as desired.  This isn't useful for
    4560             :  * transition functions, since the context returned MAY (we don't promise)
    4561             :  * be the same as the context those are called in.
    4562             :  *
    4563             :  * As above, this is currently not useful for aggs called as window functions.
    4564             :  */
    4565             : MemoryContext
    4566           0 : AggGetTempMemoryContext(FunctionCallInfo fcinfo)
    4567             : {
    4568           0 :     if (fcinfo->context && IsA(fcinfo->context, AggState))
    4569             :     {
    4570           0 :         AggState   *aggstate = (AggState *) fcinfo->context;
    4571             : 
    4572           0 :         return aggstate->tmpcontext->ecxt_per_tuple_memory;
    4573             :     }
    4574           0 :     return NULL;
    4575             : }
    4576             : 
    4577             : /*
    4578             :  * AggStateIsShared - find out whether transition state is shared
    4579             :  *
    4580             :  * If the function is being called as an aggregate support function,
    4581             :  * return true if the aggregate's transition state is shared across
    4582             :  * multiple aggregates, false if it is not.
    4583             :  *
    4584             :  * Returns true if not called as an aggregate support function.
    4585             :  * This is intended as a conservative answer, ie "no you'd better not
    4586             :  * scribble on your input".  In particular, will return true if the
    4587             :  * aggregate is being used as a window function, which is a scenario
    4588             :  * in which changing the transition state is a bad idea.  We might
    4589             :  * want to refine the behavior for the window case in future.
    4590             :  */
    4591             : bool
    4592         166 : AggStateIsShared(FunctionCallInfo fcinfo)
    4593             : {
    4594         166 :     if (fcinfo->context && IsA(fcinfo->context, AggState))
    4595             :     {
    4596         166 :         AggState   *aggstate = (AggState *) fcinfo->context;
    4597             :         AggStatePerAgg curperagg;
    4598             :         AggStatePerTrans curpertrans;
    4599             : 
    4600             :         /* check curperagg (valid when in a final function) */
    4601         166 :         curperagg = aggstate->curperagg;
    4602             : 
    4603         166 :         if (curperagg)
    4604           0 :             return aggstate->pertrans[curperagg->transno].aggshared;
    4605             : 
    4606             :         /* check curpertrans (valid when in a transition function) */
    4607         166 :         curpertrans = aggstate->curpertrans;
    4608             : 
    4609         166 :         if (curpertrans)
    4610         166 :             return curpertrans->aggshared;
    4611             :     }
    4612           0 :     return true;
    4613             : }
    4614             : 
    4615             : /*
    4616             :  * AggRegisterCallback - register a cleanup callback for an aggregate
    4617             :  *
    4618             :  * This is useful for aggs to register shutdown callbacks, which will ensure
    4619             :  * that non-memory resources are freed.  The callback will occur just before
    4620             :  * the associated aggcontext (as returned by AggCheckCallContext) is reset,
    4621             :  * either between groups or as a result of rescanning the query.  The callback
    4622             :  * will NOT be called on error paths.  The typical use-case is for freeing of
    4623             :  * tuplestores or tuplesorts maintained in aggcontext, or pins held by slots
    4624             :  * created by the agg functions.  (The callback will not be called until after
    4625             :  * the result of the finalfn is no longer needed, so it's safe for the finalfn
    4626             :  * to return data that will be freed by the callback.)
    4627             :  *
    4628             :  * As above, this is currently not useful for aggs called as window functions.
    4629             :  */
    4630             : void
    4631         454 : AggRegisterCallback(FunctionCallInfo fcinfo,
    4632             :                     ExprContextCallbackFunction func,
    4633             :                     Datum arg)
    4634             : {
    4635         454 :     if (fcinfo->context && IsA(fcinfo->context, AggState))
    4636             :     {
    4637         454 :         AggState   *aggstate = (AggState *) fcinfo->context;
    4638         454 :         ExprContext *cxt = aggstate->curaggcontext;
    4639             : 
    4640         454 :         RegisterExprContextCallback(cxt, func, arg);
    4641             : 
    4642         454 :         return;
    4643             :     }
    4644           0 :     elog(ERROR, "aggregate function cannot register a callback in this context");
    4645             : }
    4646             : 
    4647             : 
    4648             : /* ----------------------------------------------------------------
    4649             :  *                      Parallel Query Support
    4650             :  * ----------------------------------------------------------------
    4651             :  */
    4652             : 
    4653             :  /* ----------------------------------------------------------------
    4654             :   *     ExecAggEstimate
    4655             :   *
    4656             :   *     Estimate space required to propagate aggregate statistics.
    4657             :   * ----------------------------------------------------------------
    4658             :   */
    4659             : void
    4660         352 : ExecAggEstimate(AggState *node, ParallelContext *pcxt)
    4661             : {
    4662             :     Size        size;
    4663             : 
    4664             :     /* don't need this if not instrumenting or no workers */
    4665         352 :     if (!node->ss.ps.instrument || pcxt->nworkers == 0)
    4666         280 :         return;
    4667             : 
    4668          72 :     size = mul_size(pcxt->nworkers, sizeof(AggregateInstrumentation));
    4669          72 :     size = add_size(size, offsetof(SharedAggInfo, sinstrument));
    4670          72 :     shm_toc_estimate_chunk(&pcxt->estimator, size);
    4671          72 :     shm_toc_estimate_keys(&pcxt->estimator, 1);
    4672             : }
    4673             : 
    4674             : /* ----------------------------------------------------------------
    4675             :  *      ExecAggInitializeDSM
    4676             :  *
    4677             :  *      Initialize DSM space for aggregate statistics.
    4678             :  * ----------------------------------------------------------------
    4679             :  */
    4680             : void
    4681         352 : ExecAggInitializeDSM(AggState *node, ParallelContext *pcxt)
    4682             : {
    4683             :     Size        size;
    4684             : 
    4685             :     /* don't need this if not instrumenting or no workers */
    4686         352 :     if (!node->ss.ps.instrument || pcxt->nworkers == 0)
    4687         280 :         return;
    4688             : 
    4689          72 :     size = offsetof(SharedAggInfo, sinstrument)
    4690          72 :         + pcxt->nworkers * sizeof(AggregateInstrumentation);
    4691          72 :     node->shared_info = shm_toc_allocate(pcxt->toc, size);
    4692             :     /* ensure any unfilled slots will contain zeroes */
    4693          72 :     memset(node->shared_info, 0, size);
    4694          72 :     node->shared_info->num_workers = pcxt->nworkers;
    4695          72 :     shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id,
    4696          72 :                    node->shared_info);
    4697             : }
    4698             : 
    4699             : /* ----------------------------------------------------------------
    4700             :  *      ExecAggInitializeWorker
    4701             :  *
    4702             :  *      Attach worker to DSM space for aggregate statistics.
    4703             :  * ----------------------------------------------------------------
    4704             :  */
    4705             : void
    4706         982 : ExecAggInitializeWorker(AggState *node, ParallelWorkerContext *pwcxt)
    4707             : {
    4708         982 :     node->shared_info =
    4709         982 :         shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, true);
    4710         982 : }
    4711             : 
    4712             : /* ----------------------------------------------------------------
    4713             :  *      ExecAggRetrieveInstrumentation
    4714             :  *
    4715             :  *      Transfer aggregate statistics from DSM to private memory.
    4716             :  * ----------------------------------------------------------------
    4717             :  */
    4718             : void
    4719          72 : ExecAggRetrieveInstrumentation(AggState *node)
    4720             : {
    4721             :     Size        size;
    4722             :     SharedAggInfo *si;
    4723             : 
    4724          72 :     if (node->shared_info == NULL)
    4725           0 :         return;
    4726             : 
    4727          72 :     size = offsetof(SharedAggInfo, sinstrument)
    4728          72 :         + node->shared_info->num_workers * sizeof(AggregateInstrumentation);
    4729          72 :     si = palloc(size);
    4730          72 :     memcpy(si, node->shared_info, size);
    4731          72 :     node->shared_info = si;
    4732             : }

Generated by: LCOV version 1.14