LCOV - code coverage report
Current view: top level - src/backend/executor - nodeHash.c (source / functions) Hit Total Coverage
Test: PostgreSQL 15devel Lines: 1040 1089 95.5 %
Date: 2021-12-03 04:09:03 Functions: 52 53 98.1 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * nodeHash.c
       4             :  *    Routines to hash relations for hashjoin
       5             :  *
       6             :  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/executor/nodeHash.c
      12             :  *
      13             :  * See note on parallelism in nodeHashjoin.c.
      14             :  *
      15             :  *-------------------------------------------------------------------------
      16             :  */
      17             : /*
      18             :  * INTERFACE ROUTINES
      19             :  *      MultiExecHash   - generate an in-memory hash table of the relation
      20             :  *      ExecInitHash    - initialize node and subnodes
      21             :  *      ExecEndHash     - shutdown node and subnodes
      22             :  */
      23             : 
      24             : #include "postgres.h"
      25             : 
      26             : #include <math.h>
      27             : #include <limits.h>
      28             : 
      29             : #include "access/htup_details.h"
      30             : #include "access/parallel.h"
      31             : #include "catalog/pg_statistic.h"
      32             : #include "commands/tablespace.h"
      33             : #include "executor/execdebug.h"
      34             : #include "executor/hashjoin.h"
      35             : #include "executor/nodeHash.h"
      36             : #include "executor/nodeHashjoin.h"
      37             : #include "miscadmin.h"
      38             : #include "pgstat.h"
      39             : #include "port/atomics.h"
      40             : #include "port/pg_bitutils.h"
      41             : #include "utils/dynahash.h"
      42             : #include "utils/guc.h"
      43             : #include "utils/lsyscache.h"
      44             : #include "utils/memutils.h"
      45             : #include "utils/syscache.h"
      46             : 
      47             : static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
      48             : static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
      49             : static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable);
      50             : static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable);
      51             : static void ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node,
      52             :                                   int mcvsToUse);
      53             : static void ExecHashSkewTableInsert(HashJoinTable hashtable,
      54             :                                     TupleTableSlot *slot,
      55             :                                     uint32 hashvalue,
      56             :                                     int bucketNumber);
      57             : static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
      58             : 
      59             : static void *dense_alloc(HashJoinTable hashtable, Size size);
      60             : static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable,
      61             :                                                 size_t size,
      62             :                                                 dsa_pointer *shared);
      63             : static void MultiExecPrivateHash(HashState *node);
      64             : static void MultiExecParallelHash(HashState *node);
      65             : static inline HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable table,
      66             :                                                        int bucketno);
      67             : static inline HashJoinTuple ExecParallelHashNextTuple(HashJoinTable table,
      68             :                                                       HashJoinTuple tuple);
      69             : static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
      70             :                                              HashJoinTuple tuple,
      71             :                                              dsa_pointer tuple_shared);
      72             : static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
      73             : static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable);
      74             : static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable);
      75             : static void ExecParallelHashRepartitionRest(HashJoinTable hashtable);
      76             : static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable table,
      77             :                                                      dsa_pointer *shared);
      78             : static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable,
      79             :                                           int batchno,
      80             :                                           size_t size);
      81             : static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
      82             : static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable);
      83             : 
      84             : 
      85             : /* ----------------------------------------------------------------
      86             :  *      ExecHash
      87             :  *
      88             :  *      stub for pro forma compliance
      89             :  * ----------------------------------------------------------------
      90             :  */
      91             : static TupleTableSlot *
      92           0 : ExecHash(PlanState *pstate)
      93             : {
      94           0 :     elog(ERROR, "Hash node does not support ExecProcNode call convention");
      95             :     return NULL;
      96             : }
      97             : 
      98             : /* ----------------------------------------------------------------
      99             :  *      MultiExecHash
     100             :  *
     101             :  *      build hash table for hashjoin, doing partitioning if more
     102             :  *      than one batch is required.
     103             :  * ----------------------------------------------------------------
     104             :  */
     105             : Node *
     106      394444 : MultiExecHash(HashState *node)
     107             : {
     108             :     /* must provide our own instrumentation support */
     109      394444 :     if (node->ps.instrument)
     110         194 :         InstrStartNode(node->ps.instrument);
     111             : 
     112      394444 :     if (node->parallel_state != NULL)
     113         216 :         MultiExecParallelHash(node);
     114             :     else
     115      394228 :         MultiExecPrivateHash(node);
     116             : 
     117             :     /* must provide our own instrumentation support */
     118      394444 :     if (node->ps.instrument)
     119         194 :         InstrStopNode(node->ps.instrument, node->hashtable->partialTuples);
     120             : 
     121             :     /*
     122             :      * We do not return the hash table directly because it's not a subtype of
     123             :      * Node, and so would violate the MultiExecProcNode API.  Instead, our
     124             :      * parent Hashjoin node is expected to know how to fish it out of our node
     125             :      * state.  Ugly but not really worth cleaning up, since Hashjoin knows
     126             :      * quite a bit more about Hash besides that.
     127             :      */
     128      394444 :     return NULL;
     129             : }
     130             : 
     131             : /* ----------------------------------------------------------------
     132             :  *      MultiExecPrivateHash
     133             :  *
     134             :  *      parallel-oblivious version, building a backend-private
     135             :  *      hash table and (if necessary) batch files.
     136             :  * ----------------------------------------------------------------
     137             :  */
     138             : static void
     139      394228 : MultiExecPrivateHash(HashState *node)
     140             : {
     141             :     PlanState  *outerNode;
     142             :     List       *hashkeys;
     143             :     HashJoinTable hashtable;
     144             :     TupleTableSlot *slot;
     145             :     ExprContext *econtext;
     146             :     uint32      hashvalue;
     147             : 
     148             :     /*
     149             :      * get state info from node
     150             :      */
     151      394228 :     outerNode = outerPlanState(node);
     152      394228 :     hashtable = node->hashtable;
     153             : 
     154             :     /*
     155             :      * set expression context
     156             :      */
     157      394228 :     hashkeys = node->hashkeys;
     158      394228 :     econtext = node->ps.ps_ExprContext;
     159             : 
     160             :     /*
     161             :      * Get all tuples from the node below the Hash node and insert into the
     162             :      * hash table (or temp files).
     163             :      */
     164             :     for (;;)
     165             :     {
     166    15271468 :         slot = ExecProcNode(outerNode);
     167    15271468 :         if (TupIsNull(slot))
     168             :             break;
     169             :         /* We have to compute the hash value */
     170    14877240 :         econtext->ecxt_outertuple = slot;
     171    14877240 :         if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
     172    14877240 :                                  false, hashtable->keepNulls,
     173             :                                  &hashvalue))
     174             :         {
     175             :             int         bucketNumber;
     176             : 
     177    14877232 :             bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
     178    14877232 :             if (bucketNumber != INVALID_SKEW_BUCKET_NO)
     179             :             {
     180             :                 /* It's a skew tuple, so put it into that hash table */
     181         172 :                 ExecHashSkewTableInsert(hashtable, slot, hashvalue,
     182             :                                         bucketNumber);
     183         172 :                 hashtable->skewTuples += 1;
     184             :             }
     185             :             else
     186             :             {
     187             :                 /* Not subject to skew optimization, so insert normally */
     188    14877060 :                 ExecHashTableInsert(hashtable, slot, hashvalue);
     189             :             }
     190    14877232 :             hashtable->totalTuples += 1;
     191             :         }
     192             :     }
     193             : 
     194             :     /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
     195      394228 :     if (hashtable->nbuckets != hashtable->nbuckets_optimal)
     196         584 :         ExecHashIncreaseNumBuckets(hashtable);
     197             : 
     198             :     /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
     199      394228 :     hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
     200      394228 :     if (hashtable->spaceUsed > hashtable->spacePeak)
     201      394212 :         hashtable->spacePeak = hashtable->spaceUsed;
     202             : 
     203      394228 :     hashtable->partialTuples = hashtable->totalTuples;
     204      394228 : }
     205             : 
     206             : /* ----------------------------------------------------------------
     207             :  *      MultiExecParallelHash
     208             :  *
     209             :  *      parallel-aware version, building a shared hash table and
     210             :  *      (if necessary) batch files using the combined effort of
     211             :  *      a set of co-operating backends.
     212             :  * ----------------------------------------------------------------
     213             :  */
     214             : static void
     215         216 : MultiExecParallelHash(HashState *node)
     216             : {
     217             :     ParallelHashJoinState *pstate;
     218             :     PlanState  *outerNode;
     219             :     List       *hashkeys;
     220             :     HashJoinTable hashtable;
     221             :     TupleTableSlot *slot;
     222             :     ExprContext *econtext;
     223             :     uint32      hashvalue;
     224             :     Barrier    *build_barrier;
     225             :     int         i;
     226             : 
     227             :     /*
     228             :      * get state info from node
     229             :      */
     230         216 :     outerNode = outerPlanState(node);
     231         216 :     hashtable = node->hashtable;
     232             : 
     233             :     /*
     234             :      * set expression context
     235             :      */
     236         216 :     hashkeys = node->hashkeys;
     237         216 :     econtext = node->ps.ps_ExprContext;
     238             : 
     239             :     /*
     240             :      * Synchronize the parallel hash table build.  At this stage we know that
     241             :      * the shared hash table has been or is being set up by
     242             :      * ExecHashTableCreate(), but we don't know if our peers have returned
     243             :      * from there or are here in MultiExecParallelHash(), and if so how far
     244             :      * through they are.  To find out, we check the build_barrier phase then
     245             :      * and jump to the right step in the build algorithm.
     246             :      */
     247         216 :     pstate = hashtable->parallel_state;
     248         216 :     build_barrier = &pstate->build_barrier;
     249             :     Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATING);
     250         216 :     switch (BarrierPhase(build_barrier))
     251             :     {
     252          96 :         case PHJ_BUILD_ALLOCATING:
     253             : 
     254             :             /*
     255             :              * Either I just allocated the initial hash table in
     256             :              * ExecHashTableCreate(), or someone else is doing that.  Either
     257             :              * way, wait for everyone to arrive here so we can proceed.
     258             :              */
     259          96 :             BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE);
     260             :             /* Fall through. */
     261             : 
     262         146 :         case PHJ_BUILD_HASHING_INNER:
     263             : 
     264             :             /*
     265             :              * It's time to begin hashing, or if we just arrived here then
     266             :              * hashing is already underway, so join in that effort.  While
     267             :              * hashing we have to be prepared to help increase the number of
     268             :              * batches or buckets at any time, and if we arrived here when
     269             :              * that was already underway we'll have to help complete that work
     270             :              * immediately so that it's safe to access batches and buckets
     271             :              * below.
     272             :              */
     273         146 :             if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) !=
     274             :                 PHJ_GROW_BATCHES_ELECTING)
     275           0 :                 ExecParallelHashIncreaseNumBatches(hashtable);
     276         146 :             if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) !=
     277             :                 PHJ_GROW_BUCKETS_ELECTING)
     278           0 :                 ExecParallelHashIncreaseNumBuckets(hashtable);
     279         146 :             ExecParallelHashEnsureBatchAccessors(hashtable);
     280         146 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
     281             :             for (;;)
     282             :             {
     283     1200214 :                 slot = ExecProcNode(outerNode);
     284     1200214 :                 if (TupIsNull(slot))
     285             :                     break;
     286     1200068 :                 econtext->ecxt_outertuple = slot;
     287     1200068 :                 if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
     288     1200068 :                                          false, hashtable->keepNulls,
     289             :                                          &hashvalue))
     290     1200068 :                     ExecParallelHashTableInsert(hashtable, slot, hashvalue);
     291     1200068 :                 hashtable->partialTuples++;
     292             :             }
     293             : 
     294             :             /*
     295             :              * Make sure that any tuples we wrote to disk are visible to
     296             :              * others before anyone tries to load them.
     297             :              */
     298         868 :             for (i = 0; i < hashtable->nbatch; ++i)
     299         722 :                 sts_end_write(hashtable->batches[i].inner_tuples);
     300             : 
     301             :             /*
     302             :              * Update shared counters.  We need an accurate total tuple count
     303             :              * to control the empty table optimization.
     304             :              */
     305         146 :             ExecParallelHashMergeCounters(hashtable);
     306             : 
     307         146 :             BarrierDetach(&pstate->grow_buckets_barrier);
     308         146 :             BarrierDetach(&pstate->grow_batches_barrier);
     309             : 
     310             :             /*
     311             :              * Wait for everyone to finish building and flushing files and
     312             :              * counters.
     313             :              */
     314         146 :             if (BarrierArriveAndWait(build_barrier,
     315             :                                      WAIT_EVENT_HASH_BUILD_HASH_INNER))
     316             :             {
     317             :                 /*
     318             :                  * Elect one backend to disable any further growth.  Batches
     319             :                  * are now fixed.  While building them we made sure they'd fit
     320             :                  * in our memory budget when we load them back in later (or we
     321             :                  * tried to do that and gave up because we detected extreme
     322             :                  * skew).
     323             :                  */
     324          96 :                 pstate->growth = PHJ_GROWTH_DISABLED;
     325             :             }
     326             :     }
     327             : 
     328             :     /*
     329             :      * We're not yet attached to a batch.  We all agree on the dimensions and
     330             :      * number of inner tuples (for the empty table optimization).
     331             :      */
     332         216 :     hashtable->curbatch = -1;
     333         216 :     hashtable->nbuckets = pstate->nbuckets;
     334         216 :     hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
     335         216 :     hashtable->totalTuples = pstate->total_tuples;
     336         216 :     ExecParallelHashEnsureBatchAccessors(hashtable);
     337             : 
     338             :     /*
     339             :      * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
     340             :      * case, which will bring the build phase to PHJ_BUILD_DONE (if it isn't
     341             :      * there already).
     342             :      */
     343             :     Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASHING_OUTER ||
     344             :            BarrierPhase(build_barrier) == PHJ_BUILD_DONE);
     345         216 : }
     346             : 
     347             : /* ----------------------------------------------------------------
     348             :  *      ExecInitHash
     349             :  *
     350             :  *      Init routine for Hash node
     351             :  * ----------------------------------------------------------------
     352             :  */
     353             : HashState *
     354       46108 : ExecInitHash(Hash *node, EState *estate, int eflags)
     355             : {
     356             :     HashState  *hashstate;
     357             : 
     358             :     /* check for unsupported flags */
     359             :     Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
     360             : 
     361             :     /*
     362             :      * create state structure
     363             :      */
     364       46108 :     hashstate = makeNode(HashState);
     365       46108 :     hashstate->ps.plan = (Plan *) node;
     366       46108 :     hashstate->ps.state = estate;
     367       46108 :     hashstate->ps.ExecProcNode = ExecHash;
     368       46108 :     hashstate->hashtable = NULL;
     369       46108 :     hashstate->hashkeys = NIL;   /* will be set by parent HashJoin */
     370             : 
     371             :     /*
     372             :      * Miscellaneous initialization
     373             :      *
     374             :      * create expression context for node
     375             :      */
     376       46108 :     ExecAssignExprContext(estate, &hashstate->ps);
     377             : 
     378             :     /*
     379             :      * initialize child nodes
     380             :      */
     381       46108 :     outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
     382             : 
     383             :     /*
     384             :      * initialize our result slot and type. No need to build projection
     385             :      * because this node doesn't do projections.
     386             :      */
     387       46108 :     ExecInitResultTupleSlotTL(&hashstate->ps, &TTSOpsMinimalTuple);
     388       46108 :     hashstate->ps.ps_ProjInfo = NULL;
     389             : 
     390             :     /*
     391             :      * initialize child expressions
     392             :      */
     393             :     Assert(node->plan.qual == NIL);
     394       46108 :     hashstate->hashkeys =
     395       46108 :         ExecInitExprList(node->hashkeys, (PlanState *) hashstate);
     396             : 
     397       46108 :     return hashstate;
     398             : }
     399             : 
     400             : /* ---------------------------------------------------------------
     401             :  *      ExecEndHash
     402             :  *
     403             :  *      clean up routine for Hash node
     404             :  * ----------------------------------------------------------------
     405             :  */
     406             : void
     407       46068 : ExecEndHash(HashState *node)
     408             : {
     409             :     PlanState  *outerPlan;
     410             : 
     411             :     /*
     412             :      * free exprcontext
     413             :      */
     414       46068 :     ExecFreeExprContext(&node->ps);
     415             : 
     416             :     /*
     417             :      * shut down the subplan
     418             :      */
     419       46068 :     outerPlan = outerPlanState(node);
     420       46068 :     ExecEndNode(outerPlan);
     421       46068 : }
     422             : 
     423             : 
     424             : /* ----------------------------------------------------------------
     425             :  *      ExecHashTableCreate
     426             :  *
     427             :  *      create an empty hashtable data structure for hashjoin.
     428             :  * ----------------------------------------------------------------
     429             :  */
     430             : HashJoinTable
     431      394444 : ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations, bool keepNulls)
     432             : {
     433             :     Hash       *node;
     434             :     HashJoinTable hashtable;
     435             :     Plan       *outerNode;
     436             :     size_t      space_allowed;
     437             :     int         nbuckets;
     438             :     int         nbatch;
     439             :     double      rows;
     440             :     int         num_skew_mcvs;
     441             :     int         log2_nbuckets;
     442             :     int         nkeys;
     443             :     int         i;
     444             :     ListCell   *ho;
     445             :     ListCell   *hc;
     446             :     MemoryContext oldcxt;
     447             : 
     448             :     /*
     449             :      * Get information about the size of the relation to be hashed (it's the
     450             :      * "outer" subtree of this node, but the inner relation of the hashjoin).
     451             :      * Compute the appropriate size of the hash table.
     452             :      */
     453      394444 :     node = (Hash *) state->ps.plan;
     454      394444 :     outerNode = outerPlan(node);
     455             : 
     456             :     /*
     457             :      * If this is shared hash table with a partial plan, then we can't use
     458             :      * outerNode->plan_rows to estimate its size.  We need an estimate of the
     459             :      * total number of rows across all copies of the partial plan.
     460             :      */
     461      394444 :     rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
     462             : 
     463      394228 :     ExecChooseHashTableSize(rows, outerNode->plan_width,
     464      394444 :                             OidIsValid(node->skewTable),
     465      394444 :                             state->parallel_state != NULL,
     466      394444 :                             state->parallel_state != NULL ?
     467         216 :                             state->parallel_state->nparticipants - 1 : 0,
     468             :                             &space_allowed,
     469             :                             &nbuckets, &nbatch, &num_skew_mcvs);
     470             : 
     471             :     /* nbuckets must be a power of 2 */
     472      394444 :     log2_nbuckets = my_log2(nbuckets);
     473             :     Assert(nbuckets == (1 << log2_nbuckets));
     474             : 
     475             :     /*
     476             :      * Initialize the hash table control block.
     477             :      *
     478             :      * The hashtable control block is just palloc'd from the executor's
     479             :      * per-query memory context.  Everything else should be kept inside the
     480             :      * subsidiary hashCxt or batchCxt.
     481             :      */
     482      394444 :     hashtable = (HashJoinTable) palloc(sizeof(HashJoinTableData));
     483      394444 :     hashtable->nbuckets = nbuckets;
     484      394444 :     hashtable->nbuckets_original = nbuckets;
     485      394444 :     hashtable->nbuckets_optimal = nbuckets;
     486      394444 :     hashtable->log2_nbuckets = log2_nbuckets;
     487      394444 :     hashtable->log2_nbuckets_optimal = log2_nbuckets;
     488      394444 :     hashtable->buckets.unshared = NULL;
     489      394444 :     hashtable->keepNulls = keepNulls;
     490      394444 :     hashtable->skewEnabled = false;
     491      394444 :     hashtable->skewBucket = NULL;
     492      394444 :     hashtable->skewBucketLen = 0;
     493      394444 :     hashtable->nSkewBuckets = 0;
     494      394444 :     hashtable->skewBucketNums = NULL;
     495      394444 :     hashtable->nbatch = nbatch;
     496      394444 :     hashtable->curbatch = 0;
     497      394444 :     hashtable->nbatch_original = nbatch;
     498      394444 :     hashtable->nbatch_outstart = nbatch;
     499      394444 :     hashtable->growEnabled = true;
     500      394444 :     hashtable->totalTuples = 0;
     501      394444 :     hashtable->partialTuples = 0;
     502      394444 :     hashtable->skewTuples = 0;
     503      394444 :     hashtable->innerBatchFile = NULL;
     504      394444 :     hashtable->outerBatchFile = NULL;
     505      394444 :     hashtable->spaceUsed = 0;
     506      394444 :     hashtable->spacePeak = 0;
     507      394444 :     hashtable->spaceAllowed = space_allowed;
     508      394444 :     hashtable->spaceUsedSkew = 0;
     509      394444 :     hashtable->spaceAllowedSkew =
     510      394444 :         hashtable->spaceAllowed * SKEW_HASH_MEM_PERCENT / 100;
     511      394444 :     hashtable->chunks = NULL;
     512      394444 :     hashtable->current_chunk = NULL;
     513      394444 :     hashtable->parallel_state = state->parallel_state;
     514      394444 :     hashtable->area = state->ps.state->es_query_dsa;
     515      394444 :     hashtable->batches = NULL;
     516             : 
     517             : #ifdef HJDEBUG
     518             :     printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
     519             :            hashtable, nbatch, nbuckets);
     520             : #endif
     521             : 
     522             :     /*
     523             :      * Create temporary memory contexts in which to keep the hashtable working
     524             :      * storage.  See notes in executor/hashjoin.h.
     525             :      */
     526      394444 :     hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
     527             :                                                "HashTableContext",
     528             :                                                ALLOCSET_DEFAULT_SIZES);
     529             : 
     530      394444 :     hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
     531             :                                                 "HashBatchContext",
     532             :                                                 ALLOCSET_DEFAULT_SIZES);
     533             : 
     534             :     /* Allocate data that will live for the life of the hashjoin */
     535             : 
     536      394444 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
     537             : 
     538             :     /*
     539             :      * Get info about the hash functions to be used for each hash key. Also
     540             :      * remember whether the join operators are strict.
     541             :      */
     542      394444 :     nkeys = list_length(hashOperators);
     543      394444 :     hashtable->outer_hashfunctions =
     544      394444 :         (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
     545      394444 :     hashtable->inner_hashfunctions =
     546      394444 :         (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
     547      394444 :     hashtable->hashStrict = (bool *) palloc(nkeys * sizeof(bool));
     548      394444 :     hashtable->collations = (Oid *) palloc(nkeys * sizeof(Oid));
     549      394444 :     i = 0;
     550      790650 :     forboth(ho, hashOperators, hc, hashCollations)
     551             :     {
     552      396206 :         Oid         hashop = lfirst_oid(ho);
     553             :         Oid         left_hashfn;
     554             :         Oid         right_hashfn;
     555             : 
     556      396206 :         if (!get_op_hash_functions(hashop, &left_hashfn, &right_hashfn))
     557           0 :             elog(ERROR, "could not find hash function for hash operator %u",
     558             :                  hashop);
     559      396206 :         fmgr_info(left_hashfn, &hashtable->outer_hashfunctions[i]);
     560      396206 :         fmgr_info(right_hashfn, &hashtable->inner_hashfunctions[i]);
     561      396206 :         hashtable->hashStrict[i] = op_strict(hashop);
     562      396206 :         hashtable->collations[i] = lfirst_oid(hc);
     563      396206 :         i++;
     564             :     }
     565             : 
     566      394444 :     if (nbatch > 1 && hashtable->parallel_state == NULL)
     567             :     {
     568             :         /*
     569             :          * allocate and initialize the file arrays in hashCxt (not needed for
     570             :          * parallel case which uses shared tuplestores instead of raw files)
     571             :          */
     572          70 :         hashtable->innerBatchFile = (BufFile **)
     573          70 :             palloc0(nbatch * sizeof(BufFile *));
     574          70 :         hashtable->outerBatchFile = (BufFile **)
     575          70 :             palloc0(nbatch * sizeof(BufFile *));
     576             :         /* The files will not be opened until needed... */
     577             :         /* ... but make sure we have temp tablespaces established for them */
     578          70 :         PrepareTempTablespaces();
     579             :     }
     580             : 
     581      394444 :     MemoryContextSwitchTo(oldcxt);
     582             : 
     583      394444 :     if (hashtable->parallel_state)
     584             :     {
     585         216 :         ParallelHashJoinState *pstate = hashtable->parallel_state;
     586             :         Barrier    *build_barrier;
     587             : 
     588             :         /*
     589             :          * Attach to the build barrier.  The corresponding detach operation is
     590             :          * in ExecHashTableDetach.  Note that we won't attach to the
     591             :          * batch_barrier for batch 0 yet.  We'll attach later and start it out
     592             :          * in PHJ_BATCH_PROBING phase, because batch 0 is allocated up front
     593             :          * and then loaded while hashing (the standard hybrid hash join
     594             :          * algorithm), and we'll coordinate that using build_barrier.
     595             :          */
     596         216 :         build_barrier = &pstate->build_barrier;
     597         216 :         BarrierAttach(build_barrier);
     598             : 
     599             :         /*
     600             :          * So far we have no idea whether there are any other participants,
     601             :          * and if so, what phase they are working on.  The only thing we care
     602             :          * about at this point is whether someone has already created the
     603             :          * SharedHashJoinBatch objects and the hash table for batch 0.  One
     604             :          * backend will be elected to do that now if necessary.
     605             :          */
     606         312 :         if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECTING &&
     607          96 :             BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT))
     608             :         {
     609          96 :             pstate->nbatch = nbatch;
     610          96 :             pstate->space_allowed = space_allowed;
     611          96 :             pstate->growth = PHJ_GROWTH_OK;
     612             : 
     613             :             /* Set up the shared state for coordinating batches. */
     614          96 :             ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
     615             : 
     616             :             /*
     617             :              * Allocate batch 0's hash table up front so we can load it
     618             :              * directly while hashing.
     619             :              */
     620          96 :             pstate->nbuckets = nbuckets;
     621          96 :             ExecParallelHashTableAlloc(hashtable, 0);
     622             :         }
     623             : 
     624             :         /*
     625             :          * The next Parallel Hash synchronization point is in
     626             :          * MultiExecParallelHash(), which will progress it all the way to
     627             :          * PHJ_BUILD_DONE.  The caller must not return control from this
     628             :          * executor node between now and then.
     629             :          */
     630             :     }
     631             :     else
     632             :     {
     633             :         /*
     634             :          * Prepare context for the first-scan space allocations; allocate the
     635             :          * hashbucket array therein, and set each bucket "empty".
     636             :          */
     637      394228 :         MemoryContextSwitchTo(hashtable->batchCxt);
     638             : 
     639      394228 :         hashtable->buckets.unshared = (HashJoinTuple *)
     640      394228 :             palloc0(nbuckets * sizeof(HashJoinTuple));
     641             : 
     642             :         /*
     643             :          * Set up for skew optimization, if possible and there's a need for
     644             :          * more than one batch.  (In a one-batch join, there's no point in
     645             :          * it.)
     646             :          */
     647      394228 :         if (nbatch > 1)
     648          70 :             ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs);
     649             : 
     650      394228 :         MemoryContextSwitchTo(oldcxt);
     651             :     }
     652             : 
     653      394444 :     return hashtable;
     654             : }
     655             : 
     656             : 
     657             : /*
     658             :  * Compute appropriate size for hashtable given the estimated size of the
     659             :  * relation to be hashed (number of rows and average row width).
     660             :  *
     661             :  * This is exported so that the planner's costsize.c can use it.
     662             :  */
     663             : 
     664             : /* Target bucket loading (tuples per bucket) */
     665             : #define NTUP_PER_BUCKET         1
     666             : 
     667             : void
     668     1340854 : ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
     669             :                         bool try_combined_hash_mem,
     670             :                         int parallel_workers,
     671             :                         size_t *space_allowed,
     672             :                         int *numbuckets,
     673             :                         int *numbatches,
     674             :                         int *num_skew_mcvs)
     675             : {
     676             :     int         tupsize;
     677             :     double      inner_rel_bytes;
     678             :     size_t      hash_table_bytes;
     679             :     size_t      bucket_bytes;
     680             :     size_t      max_pointers;
     681     1340854 :     int         nbatch = 1;
     682             :     int         nbuckets;
     683             :     double      dbuckets;
     684             : 
     685             :     /* Force a plausible relation size if no info */
     686     1340854 :     if (ntuples <= 0.0)
     687          40 :         ntuples = 1000.0;
     688             : 
     689             :     /*
     690             :      * Estimate tupsize based on footprint of tuple in hashtable... note this
     691             :      * does not allow for any palloc overhead.  The manipulations of spaceUsed
     692             :      * don't count palloc overhead either.
     693             :      */
     694     1340854 :     tupsize = HJTUPLE_OVERHEAD +
     695     1340854 :         MAXALIGN(SizeofMinimalTupleHeader) +
     696     1340854 :         MAXALIGN(tupwidth);
     697     1340854 :     inner_rel_bytes = ntuples * tupsize;
     698             : 
     699             :     /*
     700             :      * Compute in-memory hashtable size limit from GUCs.
     701             :      */
     702     1340854 :     hash_table_bytes = get_hash_memory_limit();
     703             : 
     704             :     /*
     705             :      * Parallel Hash tries to use the combined hash_mem of all workers to
     706             :      * avoid the need to batch.  If that won't work, it falls back to hash_mem
     707             :      * per worker and tries to process batches in parallel.
     708             :      */
     709     1340854 :     if (try_combined_hash_mem)
     710             :     {
     711             :         /* Careful, this could overflow size_t */
     712             :         double      newlimit;
     713             : 
     714        5092 :         newlimit = (double) hash_table_bytes * (double) (parallel_workers + 1);
     715        5092 :         newlimit = Min(newlimit, (double) SIZE_MAX);
     716        5092 :         hash_table_bytes = (size_t) newlimit;
     717             :     }
     718             : 
     719     1340854 :     *space_allowed = hash_table_bytes;
     720             : 
     721             :     /*
     722             :      * If skew optimization is possible, estimate the number of skew buckets
     723             :      * that will fit in the memory allowed, and decrement the assumed space
     724             :      * available for the main hash table accordingly.
     725             :      *
     726             :      * We make the optimistic assumption that each skew bucket will contain
     727             :      * one inner-relation tuple.  If that turns out to be low, we will recover
     728             :      * at runtime by reducing the number of skew buckets.
     729             :      *
     730             :      * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
     731             :      * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
     732             :      * will round up to the next power of 2 and then multiply by 4 to reduce
     733             :      * collisions.
     734             :      */
     735     1340854 :     if (useskew)
     736             :     {
     737             :         size_t      bytes_per_mcv;
     738             :         size_t      skew_mcvs;
     739             : 
     740             :         /*----------
     741             :          * Compute number of MCVs we could hold in hash_table_bytes
     742             :          *
     743             :          * Divisor is:
     744             :          * size of a hash tuple +
     745             :          * worst-case size of skewBucket[] per MCV +
     746             :          * size of skewBucketNums[] entry +
     747             :          * size of skew bucket struct itself
     748             :          *----------
     749             :          */
     750      978492 :         bytes_per_mcv = tupsize +
     751             :             (8 * sizeof(HashSkewBucket *)) +
     752      978492 :             sizeof(int) +
     753             :             SKEW_BUCKET_OVERHEAD;
     754      978492 :         skew_mcvs = hash_table_bytes / bytes_per_mcv;
     755             : 
     756             :         /*
     757             :          * Now scale by SKEW_HASH_MEM_PERCENT (we do it in this order so as
     758             :          * not to worry about size_t overflow in the multiplication)
     759             :          */
     760      978492 :         skew_mcvs = (skew_mcvs * SKEW_HASH_MEM_PERCENT) / 100;
     761             : 
     762             :         /* Now clamp to integer range */
     763      978492 :         skew_mcvs = Min(skew_mcvs, INT_MAX);
     764             : 
     765      978492 :         *num_skew_mcvs = (int) skew_mcvs;
     766             : 
     767             :         /* Reduce hash_table_bytes by the amount needed for the skew table */
     768      978492 :         if (skew_mcvs > 0)
     769      978492 :             hash_table_bytes -= skew_mcvs * bytes_per_mcv;
     770             :     }
     771             :     else
     772      362362 :         *num_skew_mcvs = 0;
     773             : 
     774             :     /*
     775             :      * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
     776             :      * memory is filled, assuming a single batch; but limit the value so that
     777             :      * the pointer arrays we'll try to allocate do not exceed hash_table_bytes
     778             :      * nor MaxAllocSize.
     779             :      *
     780             :      * Note that both nbuckets and nbatch must be powers of 2 to make
     781             :      * ExecHashGetBucketAndBatch fast.
     782             :      */
     783     1340854 :     max_pointers = hash_table_bytes / sizeof(HashJoinTuple);
     784     1340854 :     max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
     785             :     /* If max_pointers isn't a power of 2, must round it down to one */
     786     1340854 :     max_pointers = pg_prevpower2_size_t(max_pointers);
     787             : 
     788             :     /* Also ensure we avoid integer overflow in nbatch and nbuckets */
     789             :     /* (this step is redundant given the current value of MaxAllocSize) */
     790     1340854 :     max_pointers = Min(max_pointers, INT_MAX / 2 + 1);
     791             : 
     792     1340854 :     dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
     793     1340854 :     dbuckets = Min(dbuckets, max_pointers);
     794     1340854 :     nbuckets = (int) dbuckets;
     795             :     /* don't let nbuckets be really small, though ... */
     796     1340854 :     nbuckets = Max(nbuckets, 1024);
     797             :     /* ... and force it to be a power of 2. */
     798     1340854 :     nbuckets = pg_nextpower2_32(nbuckets);
     799             : 
     800             :     /*
     801             :      * If there's not enough space to store the projected number of tuples and
     802             :      * the required bucket headers, we will need multiple batches.
     803             :      */
     804     1340854 :     bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
     805     1340854 :     if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
     806             :     {
     807             :         /* We'll need multiple batches */
     808             :         size_t      sbuckets;
     809             :         double      dbatch;
     810             :         int         minbatch;
     811             :         size_t      bucket_size;
     812             : 
     813             :         /*
     814             :          * If Parallel Hash with combined hash_mem would still need multiple
     815             :          * batches, we'll have to fall back to regular hash_mem budget.
     816             :          */
     817        3144 :         if (try_combined_hash_mem)
     818             :         {
     819         144 :             ExecChooseHashTableSize(ntuples, tupwidth, useskew,
     820             :                                     false, parallel_workers,
     821             :                                     space_allowed,
     822             :                                     numbuckets,
     823             :                                     numbatches,
     824             :                                     num_skew_mcvs);
     825         144 :             return;
     826             :         }
     827             : 
     828             :         /*
     829             :          * Estimate the number of buckets we'll want to have when hash_mem is
     830             :          * entirely full.  Each bucket will contain a bucket pointer plus
     831             :          * NTUP_PER_BUCKET tuples, whose projected size already includes
     832             :          * overhead for the hash code, pointer to the next tuple, etc.
     833             :          */
     834        3000 :         bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
     835        3000 :         sbuckets = pg_nextpower2_size_t(hash_table_bytes / bucket_size);
     836        3000 :         sbuckets = Min(sbuckets, max_pointers);
     837        3000 :         nbuckets = (int) sbuckets;
     838        3000 :         nbuckets = pg_nextpower2_32(nbuckets);
     839        3000 :         bucket_bytes = nbuckets * sizeof(HashJoinTuple);
     840             : 
     841             :         /*
     842             :          * Buckets are simple pointers to hashjoin tuples, while tupsize
     843             :          * includes the pointer, hash code, and MinimalTupleData.  So buckets
     844             :          * should never really exceed 25% of hash_mem (even for
     845             :          * NTUP_PER_BUCKET=1); except maybe for hash_mem values that are not
     846             :          * 2^N bytes, where we might get more because of doubling. So let's
     847             :          * look for 50% here.
     848             :          */
     849             :         Assert(bucket_bytes <= hash_table_bytes / 2);
     850             : 
     851             :         /* Calculate required number of batches. */
     852        3000 :         dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
     853        3000 :         dbatch = Min(dbatch, max_pointers);
     854        3000 :         minbatch = (int) dbatch;
     855        3000 :         nbatch = pg_nextpower2_32(Max(2, minbatch));
     856             :     }
     857             : 
     858             :     Assert(nbuckets > 0);
     859             :     Assert(nbatch > 0);
     860             : 
     861     1340710 :     *numbuckets = nbuckets;
     862     1340710 :     *numbatches = nbatch;
     863             : }
     864             : 
     865             : 
     866             : /* ----------------------------------------------------------------
     867             :  *      ExecHashTableDestroy
     868             :  *
     869             :  *      destroy a hash table
     870             :  * ----------------------------------------------------------------
     871             :  */
     872             : void
     873      394404 : ExecHashTableDestroy(HashJoinTable hashtable)
     874             : {
     875             :     int         i;
     876             : 
     877             :     /*
     878             :      * Make sure all the temp files are closed.  We skip batch 0, since it
     879             :      * can't have any temp files (and the arrays might not even exist if
     880             :      * nbatch is only 1).  Parallel hash joins don't use these files.
     881             :      */
     882      394404 :     if (hashtable->innerBatchFile != NULL)
     883             :     {
     884         880 :         for (i = 1; i < hashtable->nbatch; i++)
     885             :         {
     886         776 :             if (hashtable->innerBatchFile[i])
     887           0 :                 BufFileClose(hashtable->innerBatchFile[i]);
     888         776 :             if (hashtable->outerBatchFile[i])
     889           0 :                 BufFileClose(hashtable->outerBatchFile[i]);
     890             :         }
     891             :     }
     892             : 
     893             :     /* Release working memory (batchCxt is a child, so it goes away too) */
     894      394404 :     MemoryContextDelete(hashtable->hashCxt);
     895             : 
     896             :     /* And drop the control block */
     897      394404 :     pfree(hashtable);
     898      394404 : }
     899             : 
     900             : /*
     901             :  * ExecHashIncreaseNumBatches
     902             :  *      increase the original number of batches in order to reduce
     903             :  *      current memory consumption
     904             :  */
     905             : static void
     906      276394 : ExecHashIncreaseNumBatches(HashJoinTable hashtable)
     907             : {
     908      276394 :     int         oldnbatch = hashtable->nbatch;
     909      276394 :     int         curbatch = hashtable->curbatch;
     910             :     int         nbatch;
     911             :     MemoryContext oldcxt;
     912             :     long        ninmemory;
     913             :     long        nfreed;
     914             :     HashMemoryChunk oldchunks;
     915             : 
     916             :     /* do nothing if we've decided to shut off growth */
     917      276394 :     if (!hashtable->growEnabled)
     918      276320 :         return;
     919             : 
     920             :     /* safety check to avoid overflow */
     921          74 :     if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
     922           0 :         return;
     923             : 
     924          74 :     nbatch = oldnbatch * 2;
     925             :     Assert(nbatch > 1);
     926             : 
     927             : #ifdef HJDEBUG
     928             :     printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
     929             :            hashtable, nbatch, hashtable->spaceUsed);
     930             : #endif
     931             : 
     932          74 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
     933             : 
     934          74 :     if (hashtable->innerBatchFile == NULL)
     935             :     {
     936             :         /* we had no file arrays before */
     937          34 :         hashtable->innerBatchFile = (BufFile **)
     938          34 :             palloc0(nbatch * sizeof(BufFile *));
     939          34 :         hashtable->outerBatchFile = (BufFile **)
     940          34 :             palloc0(nbatch * sizeof(BufFile *));
     941             :         /* time to establish the temp tablespaces, too */
     942          34 :         PrepareTempTablespaces();
     943             :     }
     944             :     else
     945             :     {
     946             :         /* enlarge arrays and zero out added entries */
     947          40 :         hashtable->innerBatchFile = (BufFile **)
     948          40 :             repalloc(hashtable->innerBatchFile, nbatch * sizeof(BufFile *));
     949          40 :         hashtable->outerBatchFile = (BufFile **)
     950          40 :             repalloc(hashtable->outerBatchFile, nbatch * sizeof(BufFile *));
     951         164 :         MemSet(hashtable->innerBatchFile + oldnbatch, 0,
     952             :                (nbatch - oldnbatch) * sizeof(BufFile *));
     953         164 :         MemSet(hashtable->outerBatchFile + oldnbatch, 0,
     954             :                (nbatch - oldnbatch) * sizeof(BufFile *));
     955             :     }
     956             : 
     957          74 :     MemoryContextSwitchTo(oldcxt);
     958             : 
     959          74 :     hashtable->nbatch = nbatch;
     960             : 
     961             :     /*
     962             :      * Scan through the existing hash table entries and dump out any that are
     963             :      * no longer of the current batch.
     964             :      */
     965          74 :     ninmemory = nfreed = 0;
     966             : 
     967             :     /* If know we need to resize nbuckets, we can do it while rebatching. */
     968          74 :     if (hashtable->nbuckets_optimal != hashtable->nbuckets)
     969             :     {
     970             :         /* we never decrease the number of buckets */
     971             :         Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
     972             : 
     973          34 :         hashtable->nbuckets = hashtable->nbuckets_optimal;
     974          34 :         hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
     975             : 
     976          34 :         hashtable->buckets.unshared =
     977          34 :             repalloc(hashtable->buckets.unshared,
     978          34 :                      sizeof(HashJoinTuple) * hashtable->nbuckets);
     979             :     }
     980             : 
     981             :     /*
     982             :      * We will scan through the chunks directly, so that we can reset the
     983             :      * buckets now and not have to keep track which tuples in the buckets have
     984             :      * already been processed. We will free the old chunks as we go.
     985             :      */
     986          74 :     memset(hashtable->buckets.unshared, 0,
     987          74 :            sizeof(HashJoinTuple) * hashtable->nbuckets);
     988          74 :     oldchunks = hashtable->chunks;
     989          74 :     hashtable->chunks = NULL;
     990             : 
     991             :     /* so, let's scan through the old chunks, and all tuples in each chunk */
     992         362 :     while (oldchunks != NULL)
     993             :     {
     994         288 :         HashMemoryChunk nextchunk = oldchunks->next.unshared;
     995             : 
     996             :         /* position within the buffer (up to oldchunks->used) */
     997         288 :         size_t      idx = 0;
     998             : 
     999             :         /* process all tuples stored in this chunk (and then free it) */
    1000      196786 :         while (idx < oldchunks->used)
    1001             :         {
    1002      196498 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
    1003      196498 :             MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
    1004      196498 :             int         hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
    1005             :             int         bucketno;
    1006             :             int         batchno;
    1007             : 
    1008      196498 :             ninmemory++;
    1009      196498 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1010             :                                       &bucketno, &batchno);
    1011             : 
    1012      196498 :             if (batchno == curbatch)
    1013             :             {
    1014             :                 /* keep tuple in memory - copy it into the new chunk */
    1015             :                 HashJoinTuple copyTuple;
    1016             : 
    1017       75772 :                 copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
    1018       75772 :                 memcpy(copyTuple, hashTuple, hashTupleSize);
    1019             : 
    1020             :                 /* and add it back to the appropriate bucket */
    1021       75772 :                 copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1022       75772 :                 hashtable->buckets.unshared[bucketno] = copyTuple;
    1023             :             }
    1024             :             else
    1025             :             {
    1026             :                 /* dump it out */
    1027             :                 Assert(batchno > curbatch);
    1028      120726 :                 ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
    1029             :                                       hashTuple->hashvalue,
    1030      120726 :                                       &hashtable->innerBatchFile[batchno]);
    1031             : 
    1032      120726 :                 hashtable->spaceUsed -= hashTupleSize;
    1033      120726 :                 nfreed++;
    1034             :             }
    1035             : 
    1036             :             /* next tuple in this chunk */
    1037      196498 :             idx += MAXALIGN(hashTupleSize);
    1038             : 
    1039             :             /* allow this loop to be cancellable */
    1040      196498 :             CHECK_FOR_INTERRUPTS();
    1041             :         }
    1042             : 
    1043             :         /* we're done with this chunk - free it and proceed to the next one */
    1044         288 :         pfree(oldchunks);
    1045         288 :         oldchunks = nextchunk;
    1046             :     }
    1047             : 
    1048             : #ifdef HJDEBUG
    1049             :     printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
    1050             :            hashtable, nfreed, ninmemory, hashtable->spaceUsed);
    1051             : #endif
    1052             : 
    1053             :     /*
    1054             :      * If we dumped out either all or none of the tuples in the table, disable
    1055             :      * further expansion of nbatch.  This situation implies that we have
    1056             :      * enough tuples of identical hashvalues to overflow spaceAllowed.
    1057             :      * Increasing nbatch will not fix it since there's no way to subdivide the
    1058             :      * group any more finely. We have to just gut it out and hope the server
    1059             :      * has enough RAM.
    1060             :      */
    1061          74 :     if (nfreed == 0 || nfreed == ninmemory)
    1062             :     {
    1063          16 :         hashtable->growEnabled = false;
    1064             : #ifdef HJDEBUG
    1065             :         printf("Hashjoin %p: disabling further increase of nbatch\n",
    1066             :                hashtable);
    1067             : #endif
    1068             :     }
    1069             : }
    1070             : 
    1071             : /*
    1072             :  * ExecParallelHashIncreaseNumBatches
    1073             :  *      Every participant attached to grow_batches_barrier must run this
    1074             :  *      function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
    1075             :  */
    1076             : static void
    1077          38 : ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
    1078             : {
    1079          38 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1080             :     int         i;
    1081             : 
    1082             :     Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER);
    1083             : 
    1084             :     /*
    1085             :      * It's unlikely, but we need to be prepared for new participants to show
    1086             :      * up while we're in the middle of this operation so we need to switch on
    1087             :      * barrier phase here.
    1088             :      */
    1089          38 :     switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier)))
    1090             :     {
    1091          38 :         case PHJ_GROW_BATCHES_ELECTING:
    1092             : 
    1093             :             /*
    1094             :              * Elect one participant to prepare to grow the number of batches.
    1095             :              * This involves reallocating or resetting the buckets of batch 0
    1096             :              * in preparation for all participants to begin repartitioning the
    1097             :              * tuples.
    1098             :              */
    1099          38 :             if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1100             :                                      WAIT_EVENT_HASH_GROW_BATCHES_ELECT))
    1101             :             {
    1102             :                 dsa_pointer_atomic *buckets;
    1103             :                 ParallelHashJoinBatch *old_batch0;
    1104             :                 int         new_nbatch;
    1105             :                 int         i;
    1106             : 
    1107             :                 /* Move the old batch out of the way. */
    1108          36 :                 old_batch0 = hashtable->batches[0].shared;
    1109          36 :                 pstate->old_batches = pstate->batches;
    1110          36 :                 pstate->old_nbatch = hashtable->nbatch;
    1111          36 :                 pstate->batches = InvalidDsaPointer;
    1112             : 
    1113             :                 /* Free this backend's old accessors. */
    1114          36 :                 ExecParallelHashCloseBatchAccessors(hashtable);
    1115             : 
    1116             :                 /* Figure out how many batches to use. */
    1117          36 :                 if (hashtable->nbatch == 1)
    1118             :                 {
    1119             :                     /*
    1120             :                      * We are going from single-batch to multi-batch.  We need
    1121             :                      * to switch from one large combined memory budget to the
    1122             :                      * regular hash_mem budget.
    1123             :                      */
    1124          24 :                     pstate->space_allowed = get_hash_memory_limit();
    1125             : 
    1126             :                     /*
    1127             :                      * The combined hash_mem of all participants wasn't
    1128             :                      * enough. Therefore one batch per participant would be
    1129             :                      * approximately equivalent and would probably also be
    1130             :                      * insufficient.  So try two batches per participant,
    1131             :                      * rounded up to a power of two.
    1132             :                      */
    1133          24 :                     new_nbatch = pg_nextpower2_32(pstate->nparticipants * 2);
    1134             :                 }
    1135             :                 else
    1136             :                 {
    1137             :                     /*
    1138             :                      * We were already multi-batched.  Try doubling the number
    1139             :                      * of batches.
    1140             :                      */
    1141          12 :                     new_nbatch = hashtable->nbatch * 2;
    1142             :                 }
    1143             : 
    1144             :                 /* Allocate new larger generation of batches. */
    1145             :                 Assert(hashtable->nbatch == pstate->nbatch);
    1146          36 :                 ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
    1147             :                 Assert(hashtable->nbatch == pstate->nbatch);
    1148             : 
    1149             :                 /* Replace or recycle batch 0's bucket array. */
    1150          36 :                 if (pstate->old_nbatch == 1)
    1151             :                 {
    1152             :                     double      dtuples;
    1153             :                     double      dbuckets;
    1154             :                     int         new_nbuckets;
    1155             : 
    1156             :                     /*
    1157             :                      * We probably also need a smaller bucket array.  How many
    1158             :                      * tuples do we expect per batch, assuming we have only
    1159             :                      * half of them so far?  Normally we don't need to change
    1160             :                      * the bucket array's size, because the size of each batch
    1161             :                      * stays the same as we add more batches, but in this
    1162             :                      * special case we move from a large batch to many smaller
    1163             :                      * batches and it would be wasteful to keep the large
    1164             :                      * array.
    1165             :                      */
    1166          24 :                     dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
    1167          24 :                     dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
    1168          24 :                     dbuckets = Min(dbuckets,
    1169             :                                    MaxAllocSize / sizeof(dsa_pointer_atomic));
    1170          24 :                     new_nbuckets = (int) dbuckets;
    1171          24 :                     new_nbuckets = Max(new_nbuckets, 1024);
    1172          24 :                     new_nbuckets = pg_nextpower2_32(new_nbuckets);
    1173          24 :                     dsa_free(hashtable->area, old_batch0->buckets);
    1174          48 :                     hashtable->batches[0].shared->buckets =
    1175          24 :                         dsa_allocate(hashtable->area,
    1176             :                                      sizeof(dsa_pointer_atomic) * new_nbuckets);
    1177             :                     buckets = (dsa_pointer_atomic *)
    1178          24 :                         dsa_get_address(hashtable->area,
    1179          24 :                                         hashtable->batches[0].shared->buckets);
    1180       73752 :                     for (i = 0; i < new_nbuckets; ++i)
    1181       73728 :                         dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    1182          24 :                     pstate->nbuckets = new_nbuckets;
    1183             :                 }
    1184             :                 else
    1185             :                 {
    1186             :                     /* Recycle the existing bucket array. */
    1187          12 :                     hashtable->batches[0].shared->buckets = old_batch0->buckets;
    1188             :                     buckets = (dsa_pointer_atomic *)
    1189          12 :                         dsa_get_address(hashtable->area, old_batch0->buckets);
    1190       40972 :                     for (i = 0; i < hashtable->nbuckets; ++i)
    1191       40960 :                         dsa_pointer_atomic_write(&buckets[i], InvalidDsaPointer);
    1192             :                 }
    1193             : 
    1194             :                 /* Move all chunks to the work queue for parallel processing. */
    1195          36 :                 pstate->chunk_work_queue = old_batch0->chunks;
    1196             : 
    1197             :                 /* Disable further growth temporarily while we're growing. */
    1198          36 :                 pstate->growth = PHJ_GROWTH_DISABLED;
    1199             :             }
    1200             :             else
    1201             :             {
    1202             :                 /* All other participants just flush their tuples to disk. */
    1203           2 :                 ExecParallelHashCloseBatchAccessors(hashtable);
    1204             :             }
    1205             :             /* Fall through. */
    1206             : 
    1207             :         case PHJ_GROW_BATCHES_ALLOCATING:
    1208             :             /* Wait for the above to be finished. */
    1209          38 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1210             :                                  WAIT_EVENT_HASH_GROW_BATCHES_ALLOCATE);
    1211             :             /* Fall through. */
    1212             : 
    1213          38 :         case PHJ_GROW_BATCHES_REPARTITIONING:
    1214             :             /* Make sure that we have the current dimensions and buckets. */
    1215          38 :             ExecParallelHashEnsureBatchAccessors(hashtable);
    1216          38 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1217             :             /* Then partition, flush counters. */
    1218          38 :             ExecParallelHashRepartitionFirst(hashtable);
    1219          38 :             ExecParallelHashRepartitionRest(hashtable);
    1220          38 :             ExecParallelHashMergeCounters(hashtable);
    1221             :             /* Wait for the above to be finished. */
    1222          38 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1223             :                                  WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION);
    1224             :             /* Fall through. */
    1225             : 
    1226          38 :         case PHJ_GROW_BATCHES_DECIDING:
    1227             : 
    1228             :             /*
    1229             :              * Elect one participant to clean up and decide whether further
    1230             :              * repartitioning is needed, or should be disabled because it's
    1231             :              * not helping.
    1232             :              */
    1233          38 :             if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1234             :                                      WAIT_EVENT_HASH_GROW_BATCHES_DECIDE))
    1235             :             {
    1236          36 :                 bool        space_exhausted = false;
    1237          36 :                 bool        extreme_skew_detected = false;
    1238             : 
    1239             :                 /* Make sure that we have the current dimensions and buckets. */
    1240          36 :                 ExecParallelHashEnsureBatchAccessors(hashtable);
    1241          36 :                 ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1242             : 
    1243             :                 /* Are any of the new generation of batches exhausted? */
    1244         292 :                 for (i = 0; i < hashtable->nbatch; ++i)
    1245             :                 {
    1246         256 :                     ParallelHashJoinBatch *batch = hashtable->batches[i].shared;
    1247             : 
    1248         256 :                     if (batch->space_exhausted ||
    1249         256 :                         batch->estimated_size > pstate->space_allowed)
    1250             :                     {
    1251             :                         int         parent;
    1252             : 
    1253          16 :                         space_exhausted = true;
    1254             : 
    1255             :                         /*
    1256             :                          * Did this batch receive ALL of the tuples from its
    1257             :                          * parent batch?  That would indicate that further
    1258             :                          * repartitioning isn't going to help (the hash values
    1259             :                          * are probably all the same).
    1260             :                          */
    1261          16 :                         parent = i % pstate->old_nbatch;
    1262          16 :                         if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
    1263          16 :                             extreme_skew_detected = true;
    1264             :                     }
    1265             :                 }
    1266             : 
    1267             :                 /* Don't keep growing if it's not helping or we'd overflow. */
    1268          36 :                 if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
    1269          16 :                     pstate->growth = PHJ_GROWTH_DISABLED;
    1270          20 :                 else if (space_exhausted)
    1271           0 :                     pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    1272             :                 else
    1273          20 :                     pstate->growth = PHJ_GROWTH_OK;
    1274             : 
    1275             :                 /* Free the old batches in shared memory. */
    1276          36 :                 dsa_free(hashtable->area, pstate->old_batches);
    1277          36 :                 pstate->old_batches = InvalidDsaPointer;
    1278             :             }
    1279             :             /* Fall through. */
    1280             : 
    1281             :         case PHJ_GROW_BATCHES_FINISHING:
    1282             :             /* Wait for the above to complete. */
    1283          38 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1284             :                                  WAIT_EVENT_HASH_GROW_BATCHES_FINISH);
    1285             :     }
    1286          38 : }
    1287             : 
    1288             : /*
    1289             :  * Repartition the tuples currently loaded into memory for inner batch 0
    1290             :  * because the number of batches has been increased.  Some tuples are retained
    1291             :  * in memory and some are written out to a later batch.
    1292             :  */
    1293             : static void
    1294          38 : ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
    1295             : {
    1296             :     dsa_pointer chunk_shared;
    1297             :     HashMemoryChunk chunk;
    1298             : 
    1299             :     Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
    1300             : 
    1301         236 :     while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
    1302             :     {
    1303         198 :         size_t      idx = 0;
    1304             : 
    1305             :         /* Repartition all tuples in this chunk. */
    1306      151058 :         while (idx < chunk->used)
    1307             :         {
    1308      150860 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1309      150860 :             MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
    1310             :             HashJoinTuple copyTuple;
    1311             :             dsa_pointer shared;
    1312             :             int         bucketno;
    1313             :             int         batchno;
    1314             : 
    1315      150860 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1316             :                                       &bucketno, &batchno);
    1317             : 
    1318             :             Assert(batchno < hashtable->nbatch);
    1319      150860 :             if (batchno == 0)
    1320             :             {
    1321             :                 /* It still belongs in batch 0.  Copy to a new chunk. */
    1322             :                 copyTuple =
    1323       35616 :                     ExecParallelHashTupleAlloc(hashtable,
    1324       35616 :                                                HJTUPLE_OVERHEAD + tuple->t_len,
    1325             :                                                &shared);
    1326       35616 :                 copyTuple->hashvalue = hashTuple->hashvalue;
    1327       35616 :                 memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
    1328       35616 :                 ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1329             :                                           copyTuple, shared);
    1330             :             }
    1331             :             else
    1332             :             {
    1333      115244 :                 size_t      tuple_size =
    1334      115244 :                 MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1335             : 
    1336             :                 /* It belongs in a later batch. */
    1337      115244 :                 hashtable->batches[batchno].estimated_size += tuple_size;
    1338      115244 :                 sts_puttuple(hashtable->batches[batchno].inner_tuples,
    1339      115244 :                              &hashTuple->hashvalue, tuple);
    1340             :             }
    1341             : 
    1342             :             /* Count this tuple. */
    1343      150860 :             ++hashtable->batches[0].old_ntuples;
    1344      150860 :             ++hashtable->batches[batchno].ntuples;
    1345             : 
    1346      150860 :             idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1347             :                             HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1348             :         }
    1349             : 
    1350             :         /* Free this chunk. */
    1351         198 :         dsa_free(hashtable->area, chunk_shared);
    1352             : 
    1353         198 :         CHECK_FOR_INTERRUPTS();
    1354             :     }
    1355          38 : }
    1356             : 
    1357             : /*
    1358             :  * Help repartition inner batches 1..n.
    1359             :  */
    1360             : static void
    1361          38 : ExecParallelHashRepartitionRest(HashJoinTable hashtable)
    1362             : {
    1363          38 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1364          38 :     int         old_nbatch = pstate->old_nbatch;
    1365             :     SharedTuplestoreAccessor **old_inner_tuples;
    1366             :     ParallelHashJoinBatch *old_batches;
    1367             :     int         i;
    1368             : 
    1369             :     /* Get our hands on the previous generation of batches. */
    1370             :     old_batches = (ParallelHashJoinBatch *)
    1371          38 :         dsa_get_address(hashtable->area, pstate->old_batches);
    1372          38 :     old_inner_tuples = palloc0(sizeof(SharedTuplestoreAccessor *) * old_nbatch);
    1373         104 :     for (i = 1; i < old_nbatch; ++i)
    1374             :     {
    1375          66 :         ParallelHashJoinBatch *shared =
    1376          66 :         NthParallelHashJoinBatch(old_batches, i);
    1377             : 
    1378          66 :         old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
    1379             :                                          ParallelWorkerNumber + 1,
    1380             :                                          &pstate->fileset);
    1381             :     }
    1382             : 
    1383             :     /* Join in the effort to repartition them. */
    1384         104 :     for (i = 1; i < old_nbatch; ++i)
    1385             :     {
    1386             :         MinimalTuple tuple;
    1387             :         uint32      hashvalue;
    1388             : 
    1389             :         /* Scan one partition from the previous generation. */
    1390          66 :         sts_begin_parallel_scan(old_inner_tuples[i]);
    1391      136376 :         while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
    1392             :         {
    1393      136310 :             size_t      tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1394             :             int         bucketno;
    1395             :             int         batchno;
    1396             : 
    1397             :             /* Decide which partition it goes to in the new generation. */
    1398      136310 :             ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
    1399             :                                       &batchno);
    1400             : 
    1401      136310 :             hashtable->batches[batchno].estimated_size += tuple_size;
    1402      136310 :             ++hashtable->batches[batchno].ntuples;
    1403      136310 :             ++hashtable->batches[i].old_ntuples;
    1404             : 
    1405             :             /* Store the tuple its new batch. */
    1406      136310 :             sts_puttuple(hashtable->batches[batchno].inner_tuples,
    1407             :                          &hashvalue, tuple);
    1408             : 
    1409      136310 :             CHECK_FOR_INTERRUPTS();
    1410             :         }
    1411          66 :         sts_end_parallel_scan(old_inner_tuples[i]);
    1412             :     }
    1413             : 
    1414          38 :     pfree(old_inner_tuples);
    1415          38 : }
    1416             : 
    1417             : /*
    1418             :  * Transfer the backend-local per-batch counters to the shared totals.
    1419             :  */
    1420             : static void
    1421         184 : ExecParallelHashMergeCounters(HashJoinTable hashtable)
    1422             : {
    1423         184 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1424             :     int         i;
    1425             : 
    1426         184 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    1427         184 :     pstate->total_tuples = 0;
    1428        1194 :     for (i = 0; i < hashtable->nbatch; ++i)
    1429             :     {
    1430        1010 :         ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
    1431             : 
    1432        1010 :         batch->shared->size += batch->size;
    1433        1010 :         batch->shared->estimated_size += batch->estimated_size;
    1434        1010 :         batch->shared->ntuples += batch->ntuples;
    1435        1010 :         batch->shared->old_ntuples += batch->old_ntuples;
    1436        1010 :         batch->size = 0;
    1437        1010 :         batch->estimated_size = 0;
    1438        1010 :         batch->ntuples = 0;
    1439        1010 :         batch->old_ntuples = 0;
    1440        1010 :         pstate->total_tuples += batch->shared->ntuples;
    1441             :     }
    1442         184 :     LWLockRelease(&pstate->lock);
    1443         184 : }
    1444             : 
    1445             : /*
    1446             :  * ExecHashIncreaseNumBuckets
    1447             :  *      increase the original number of buckets in order to reduce
    1448             :  *      number of tuples per bucket
    1449             :  */
    1450             : static void
    1451         584 : ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
    1452             : {
    1453             :     HashMemoryChunk chunk;
    1454             : 
    1455             :     /* do nothing if not an increase (it's called increase for a reason) */
    1456         584 :     if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
    1457           0 :         return;
    1458             : 
    1459             : #ifdef HJDEBUG
    1460             :     printf("Hashjoin %p: increasing nbuckets %d => %d\n",
    1461             :            hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
    1462             : #endif
    1463             : 
    1464         584 :     hashtable->nbuckets = hashtable->nbuckets_optimal;
    1465         584 :     hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
    1466             : 
    1467             :     Assert(hashtable->nbuckets > 1);
    1468             :     Assert(hashtable->nbuckets <= (INT_MAX / 2));
    1469             :     Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
    1470             : 
    1471             :     /*
    1472             :      * Just reallocate the proper number of buckets - we don't need to walk
    1473             :      * through them - we can walk the dense-allocated chunks (just like in
    1474             :      * ExecHashIncreaseNumBatches, but without all the copying into new
    1475             :      * chunks)
    1476             :      */
    1477         584 :     hashtable->buckets.unshared =
    1478         584 :         (HashJoinTuple *) repalloc(hashtable->buckets.unshared,
    1479         584 :                                    hashtable->nbuckets * sizeof(HashJoinTuple));
    1480             : 
    1481         584 :     memset(hashtable->buckets.unshared, 0,
    1482         584 :            hashtable->nbuckets * sizeof(HashJoinTuple));
    1483             : 
    1484             :     /* scan through all tuples in all chunks to rebuild the hash table */
    1485        2822 :     for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
    1486             :     {
    1487             :         /* process all tuples stored in this chunk */
    1488        2238 :         size_t      idx = 0;
    1489             : 
    1490     1741782 :         while (idx < chunk->used)
    1491             :         {
    1492     1739544 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1493             :             int         bucketno;
    1494             :             int         batchno;
    1495             : 
    1496     1739544 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1497             :                                       &bucketno, &batchno);
    1498             : 
    1499             :             /* add the tuple to the proper bucket */
    1500     1739544 :             hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1501     1739544 :             hashtable->buckets.unshared[bucketno] = hashTuple;
    1502             : 
    1503             :             /* advance index past the tuple */
    1504     1739544 :             idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1505             :                             HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1506             :         }
    1507             : 
    1508             :         /* allow this loop to be cancellable */
    1509        2238 :         CHECK_FOR_INTERRUPTS();
    1510             :     }
    1511             : }
    1512             : 
    1513             : static void
    1514          96 : ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
    1515             : {
    1516          96 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1517             :     int         i;
    1518             :     HashMemoryChunk chunk;
    1519             :     dsa_pointer chunk_s;
    1520             : 
    1521             :     Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER);
    1522             : 
    1523             :     /*
    1524             :      * It's unlikely, but we need to be prepared for new participants to show
    1525             :      * up while we're in the middle of this operation so we need to switch on
    1526             :      * barrier phase here.
    1527             :      */
    1528          96 :     switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier)))
    1529             :     {
    1530          96 :         case PHJ_GROW_BUCKETS_ELECTING:
    1531             :             /* Elect one participant to prepare to increase nbuckets. */
    1532          96 :             if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1533             :                                      WAIT_EVENT_HASH_GROW_BUCKETS_ELECT))
    1534             :             {
    1535             :                 size_t      size;
    1536             :                 dsa_pointer_atomic *buckets;
    1537             : 
    1538             :                 /* Double the size of the bucket array. */
    1539          72 :                 pstate->nbuckets *= 2;
    1540          72 :                 size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
    1541          72 :                 hashtable->batches[0].shared->size += size / 2;
    1542          72 :                 dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
    1543         144 :                 hashtable->batches[0].shared->buckets =
    1544          72 :                     dsa_allocate(hashtable->area, size);
    1545             :                 buckets = (dsa_pointer_atomic *)
    1546          72 :                     dsa_get_address(hashtable->area,
    1547          72 :                                     hashtable->batches[0].shared->buckets);
    1548      622664 :                 for (i = 0; i < pstate->nbuckets; ++i)
    1549      622592 :                     dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    1550             : 
    1551             :                 /* Put the chunk list onto the work queue. */
    1552          72 :                 pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
    1553             : 
    1554             :                 /* Clear the flag. */
    1555          72 :                 pstate->growth = PHJ_GROWTH_OK;
    1556             :             }
    1557             :             /* Fall through. */
    1558             : 
    1559             :         case PHJ_GROW_BUCKETS_ALLOCATING:
    1560             :             /* Wait for the above to complete. */
    1561          96 :             BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1562             :                                  WAIT_EVENT_HASH_GROW_BUCKETS_ALLOCATE);
    1563             :             /* Fall through. */
    1564             : 
    1565          96 :         case PHJ_GROW_BUCKETS_REINSERTING:
    1566             :             /* Reinsert all tuples into the hash table. */
    1567          96 :             ExecParallelHashEnsureBatchAccessors(hashtable);
    1568          96 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1569         540 :             while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
    1570             :             {
    1571         444 :                 size_t      idx = 0;
    1572             : 
    1573      363636 :                 while (idx < chunk->used)
    1574             :                 {
    1575      363192 :                     HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1576      363192 :                     dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
    1577             :                     int         bucketno;
    1578             :                     int         batchno;
    1579             : 
    1580      363192 :                     ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1581             :                                               &bucketno, &batchno);
    1582             :                     Assert(batchno == 0);
    1583             : 
    1584             :                     /* add the tuple to the proper bucket */
    1585      363192 :                     ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1586             :                                               hashTuple, shared);
    1587             : 
    1588             :                     /* advance index past the tuple */
    1589      363192 :                     idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1590             :                                     HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1591             :                 }
    1592             : 
    1593             :                 /* allow this loop to be cancellable */
    1594         444 :                 CHECK_FOR_INTERRUPTS();
    1595             :             }
    1596          96 :             BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1597             :                                  WAIT_EVENT_HASH_GROW_BUCKETS_REINSERT);
    1598             :     }
    1599          96 : }
    1600             : 
    1601             : /*
    1602             :  * ExecHashTableInsert
    1603             :  *      insert a tuple into the hash table depending on the hash value
    1604             :  *      it may just go to a temp file for later batches
    1605             :  *
    1606             :  * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
    1607             :  * tuple; the minimal case in particular is certain to happen while reloading
    1608             :  * tuples from batch files.  We could save some cycles in the regular-tuple
    1609             :  * case by not forcing the slot contents into minimal form; not clear if it's
    1610             :  * worth the messiness required.
    1611             :  */
    1612             : void
    1613    16401612 : ExecHashTableInsert(HashJoinTable hashtable,
    1614             :                     TupleTableSlot *slot,
    1615             :                     uint32 hashvalue)
    1616             : {
    1617             :     bool        shouldFree;
    1618    16401612 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1619             :     int         bucketno;
    1620             :     int         batchno;
    1621             : 
    1622    16401612 :     ExecHashGetBucketAndBatch(hashtable, hashvalue,
    1623             :                               &bucketno, &batchno);
    1624             : 
    1625             :     /*
    1626             :      * decide whether to put the tuple in the hash table or a temp file
    1627             :      */
    1628    16401612 :     if (batchno == hashtable->curbatch)
    1629             :     {
    1630             :         /*
    1631             :          * put the tuple in hash table
    1632             :          */
    1633             :         HashJoinTuple hashTuple;
    1634             :         int         hashTupleSize;
    1635    14997870 :         double      ntuples = (hashtable->totalTuples - hashtable->skewTuples);
    1636             : 
    1637             :         /* Create the HashJoinTuple */
    1638    14997870 :         hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    1639    14997870 :         hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
    1640             : 
    1641    14997870 :         hashTuple->hashvalue = hashvalue;
    1642    14997870 :         memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1643             : 
    1644             :         /*
    1645             :          * We always reset the tuple-matched flag on insertion.  This is okay
    1646             :          * even when reloading a tuple from a batch file, since the tuple
    1647             :          * could not possibly have been matched to an outer tuple before it
    1648             :          * went into the batch file.
    1649             :          */
    1650    14997870 :         HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1651             : 
    1652             :         /* Push it onto the front of the bucket's list */
    1653    14997870 :         hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1654    14997870 :         hashtable->buckets.unshared[bucketno] = hashTuple;
    1655             : 
    1656             :         /*
    1657             :          * Increase the (optimal) number of buckets if we just exceeded the
    1658             :          * NTUP_PER_BUCKET threshold, but only when there's still a single
    1659             :          * batch.
    1660             :          */
    1661    14997870 :         if (hashtable->nbatch == 1 &&
    1662    13410086 :             ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
    1663             :         {
    1664             :             /* Guard against integer overflow and alloc size overflow */
    1665        1134 :             if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
    1666        1134 :                 hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
    1667             :             {
    1668        1134 :                 hashtable->nbuckets_optimal *= 2;
    1669        1134 :                 hashtable->log2_nbuckets_optimal += 1;
    1670             :             }
    1671             :         }
    1672             : 
    1673             :         /* Account for space used, and back off if we've used too much */
    1674    14997870 :         hashtable->spaceUsed += hashTupleSize;
    1675    14997870 :         if (hashtable->spaceUsed > hashtable->spacePeak)
    1676    13771374 :             hashtable->spacePeak = hashtable->spaceUsed;
    1677    14997870 :         if (hashtable->spaceUsed +
    1678    14997870 :             hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
    1679    14997870 :             > hashtable->spaceAllowed)
    1680      276394 :             ExecHashIncreaseNumBatches(hashtable);
    1681             :     }
    1682             :     else
    1683             :     {
    1684             :         /*
    1685             :          * put the tuple into a temp file for later batches
    1686             :          */
    1687             :         Assert(batchno > hashtable->curbatch);
    1688     1403742 :         ExecHashJoinSaveTuple(tuple,
    1689             :                               hashvalue,
    1690     1403742 :                               &hashtable->innerBatchFile[batchno]);
    1691             :     }
    1692             : 
    1693    16401612 :     if (shouldFree)
    1694    14193380 :         heap_free_minimal_tuple(tuple);
    1695    16401612 : }
    1696             : 
    1697             : /*
    1698             :  * ExecParallelHashTableInsert
    1699             :  *      insert a tuple into a shared hash table or shared batch tuplestore
    1700             :  */
    1701             : void
    1702     1200068 : ExecParallelHashTableInsert(HashJoinTable hashtable,
    1703             :                             TupleTableSlot *slot,
    1704             :                             uint32 hashvalue)
    1705             : {
    1706             :     bool        shouldFree;
    1707     1200068 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1708             :     dsa_pointer shared;
    1709             :     int         bucketno;
    1710             :     int         batchno;
    1711             : 
    1712     1200310 : retry:
    1713     1200310 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    1714             : 
    1715     1200310 :     if (batchno == 0)
    1716             :     {
    1717             :         HashJoinTuple hashTuple;
    1718             : 
    1719             :         /* Try to load it into memory. */
    1720             :         Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) ==
    1721             :                PHJ_BUILD_HASHING_INNER);
    1722      661164 :         hashTuple = ExecParallelHashTupleAlloc(hashtable,
    1723      661164 :                                                HJTUPLE_OVERHEAD + tuple->t_len,
    1724             :                                                &shared);
    1725      661164 :         if (hashTuple == NULL)
    1726         216 :             goto retry;
    1727             : 
    1728             :         /* Store the hash value in the HashJoinTuple header. */
    1729      660948 :         hashTuple->hashvalue = hashvalue;
    1730      660948 :         memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1731             : 
    1732             :         /* Push it onto the front of the bucket's list */
    1733      660948 :         ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1734             :                                   hashTuple, shared);
    1735             :     }
    1736             :     else
    1737             :     {
    1738      539146 :         size_t      tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1739             : 
    1740             :         Assert(batchno > 0);
    1741             : 
    1742             :         /* Try to preallocate space in the batch if necessary. */
    1743      539146 :         if (hashtable->batches[batchno].preallocated < tuple_size)
    1744             :         {
    1745        1060 :             if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
    1746          26 :                 goto retry;
    1747             :         }
    1748             : 
    1749             :         Assert(hashtable->batches[batchno].preallocated >= tuple_size);
    1750      539120 :         hashtable->batches[batchno].preallocated -= tuple_size;
    1751      539120 :         sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
    1752             :                      tuple);
    1753             :     }
    1754     1200068 :     ++hashtable->batches[batchno].ntuples;
    1755             : 
    1756     1200068 :     if (shouldFree)
    1757     1200068 :         heap_free_minimal_tuple(tuple);
    1758     1200068 : }
    1759             : 
    1760             : /*
    1761             :  * Insert a tuple into the current hash table.  Unlike
    1762             :  * ExecParallelHashTableInsert, this version is not prepared to send the tuple
    1763             :  * to other batches or to run out of memory, and should only be called with
    1764             :  * tuples that belong in the current batch once growth has been disabled.
    1765             :  */
    1766             : void
    1767      654364 : ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable,
    1768             :                                         TupleTableSlot *slot,
    1769             :                                         uint32 hashvalue)
    1770             : {
    1771             :     bool        shouldFree;
    1772      654364 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1773             :     HashJoinTuple hashTuple;
    1774             :     dsa_pointer shared;
    1775             :     int         batchno;
    1776             :     int         bucketno;
    1777             : 
    1778      654364 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    1779             :     Assert(batchno == hashtable->curbatch);
    1780      654364 :     hashTuple = ExecParallelHashTupleAlloc(hashtable,
    1781      654364 :                                            HJTUPLE_OVERHEAD + tuple->t_len,
    1782             :                                            &shared);
    1783      654364 :     hashTuple->hashvalue = hashvalue;
    1784      654364 :     memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1785      654364 :     HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1786      654364 :     ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1787             :                               hashTuple, shared);
    1788             : 
    1789      654364 :     if (shouldFree)
    1790           0 :         heap_free_minimal_tuple(tuple);
    1791      654364 : }
    1792             : 
    1793             : /*
    1794             :  * ExecHashGetHashValue
    1795             :  *      Compute the hash value for a tuple
    1796             :  *
    1797             :  * The tuple to be tested must be in econtext->ecxt_outertuple (thus Vars in
    1798             :  * the hashkeys expressions need to have OUTER_VAR as varno). If outer_tuple
    1799             :  * is false (meaning it's the HashJoin's inner node, Hash), econtext,
    1800             :  * hashkeys, and slot need to be from Hash, with hashkeys/slot referencing and
    1801             :  * being suitable for tuples from the node below the Hash. Conversely, if
    1802             :  * outer_tuple is true, econtext is from HashJoin, and hashkeys/slot need to
    1803             :  * be appropriate for tuples from HashJoin's outer node.
    1804             :  *
    1805             :  * A true result means the tuple's hash value has been successfully computed
    1806             :  * and stored at *hashvalue.  A false result means the tuple cannot match
    1807             :  * because it contains a null attribute, and hence it should be discarded
    1808             :  * immediately.  (If keep_nulls is true then false is never returned.)
    1809             :  */
    1810             : bool
    1811    28838430 : ExecHashGetHashValue(HashJoinTable hashtable,
    1812             :                      ExprContext *econtext,
    1813             :                      List *hashkeys,
    1814             :                      bool outer_tuple,
    1815             :                      bool keep_nulls,
    1816             :                      uint32 *hashvalue)
    1817             : {
    1818    28838430 :     uint32      hashkey = 0;
    1819             :     FmgrInfo   *hashfunctions;
    1820             :     ListCell   *hk;
    1821    28838430 :     int         i = 0;
    1822             :     MemoryContext oldContext;
    1823             : 
    1824             :     /*
    1825             :      * We reset the eval context each time to reclaim any memory leaked in the
    1826             :      * hashkey expressions.
    1827             :      */
    1828    28838430 :     ResetExprContext(econtext);
    1829             : 
    1830    28838430 :     oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
    1831             : 
    1832    28838430 :     if (outer_tuple)
    1833    12761122 :         hashfunctions = hashtable->outer_hashfunctions;
    1834             :     else
    1835    16077308 :         hashfunctions = hashtable->inner_hashfunctions;
    1836             : 
    1837    58889300 :     foreach(hk, hashkeys)
    1838             :     {
    1839    30051398 :         ExprState  *keyexpr = (ExprState *) lfirst(hk);
    1840             :         Datum       keyval;
    1841             :         bool        isNull;
    1842             : 
    1843             :         /* rotate hashkey left 1 bit at each step */
    1844    30051398 :         hashkey = (hashkey << 1) | ((hashkey & 0x80000000) ? 1 : 0);
    1845             : 
    1846             :         /*
    1847             :          * Get the join attribute value of the tuple
    1848             :          */
    1849    30051398 :         keyval = ExecEvalExpr(keyexpr, econtext, &isNull);
    1850             : 
    1851             :         /*
    1852             :          * If the attribute is NULL, and the join operator is strict, then
    1853             :          * this tuple cannot pass the join qual so we can reject it
    1854             :          * immediately (unless we're scanning the outside of an outer join, in
    1855             :          * which case we must not reject it).  Otherwise we act like the
    1856             :          * hashcode of NULL is zero (this will support operators that act like
    1857             :          * IS NOT DISTINCT, though not any more-random behavior).  We treat
    1858             :          * the hash support function as strict even if the operator is not.
    1859             :          *
    1860             :          * Note: currently, all hashjoinable operators must be strict since
    1861             :          * the hash index AM assumes that.  However, it takes so little extra
    1862             :          * code here to allow non-strict that we may as well do it.
    1863             :          */
    1864    30051398 :         if (isNull)
    1865             :         {
    1866         702 :             if (hashtable->hashStrict[i] && !keep_nulls)
    1867             :             {
    1868         528 :                 MemoryContextSwitchTo(oldContext);
    1869         528 :                 return false;   /* cannot match */
    1870             :             }
    1871             :             /* else, leave hashkey unmodified, equivalent to hashcode 0 */
    1872             :         }
    1873             :         else
    1874             :         {
    1875             :             /* Compute the hash function */
    1876             :             uint32      hkey;
    1877             : 
    1878    30050696 :             hkey = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[i], hashtable->collations[i], keyval));
    1879    30050696 :             hashkey ^= hkey;
    1880             :         }
    1881             : 
    1882    30050870 :         i++;
    1883             :     }
    1884             : 
    1885    28837902 :     MemoryContextSwitchTo(oldContext);
    1886             : 
    1887    28837902 :     *hashvalue = hashkey;
    1888    28837902 :     return true;
    1889             : }
    1890             : 
    1891             : /*
    1892             :  * ExecHashGetBucketAndBatch
    1893             :  *      Determine the bucket number and batch number for a hash value
    1894             :  *
    1895             :  * Note: on-the-fly increases of nbatch must not change the bucket number
    1896             :  * for a given hash code (since we don't move tuples to different hash
    1897             :  * chains), and must only cause the batch number to remain the same or
    1898             :  * increase.  Our algorithm is
    1899             :  *      bucketno = hashvalue MOD nbuckets
    1900             :  *      batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
    1901             :  * where nbuckets and nbatch are both expected to be powers of 2, so we can
    1902             :  * do the computations by shifting and masking.  (This assumes that all hash
    1903             :  * functions are good about randomizing all their output bits, else we are
    1904             :  * likely to have very skewed bucket or batch occupancy.)
    1905             :  *
    1906             :  * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
    1907             :  * bucket count growth.  Once we start batching, the value is fixed and does
    1908             :  * not change over the course of the join (making it possible to compute batch
    1909             :  * number the way we do here).
    1910             :  *
    1911             :  * nbatch is always a power of 2; we increase it only by doubling it.  This
    1912             :  * effectively adds one more bit to the top of the batchno.  In very large
    1913             :  * joins, we might run out of bits to add, so we do this by rotating the hash
    1914             :  * value.  This causes batchno to steal bits from bucketno when the number of
    1915             :  * virtual buckets exceeds 2^32.  It's better to have longer bucket chains
    1916             :  * than to lose the ability to divide batches.
    1917             :  */
    1918             : void
    1919    35309468 : ExecHashGetBucketAndBatch(HashJoinTable hashtable,
    1920             :                           uint32 hashvalue,
    1921             :                           int *bucketno,
    1922             :                           int *batchno)
    1923             : {
    1924    35309468 :     uint32      nbuckets = (uint32) hashtable->nbuckets;
    1925    35309468 :     uint32      nbatch = (uint32) hashtable->nbatch;
    1926             : 
    1927    35309468 :     if (nbatch > 1)
    1928             :     {
    1929     8404536 :         *bucketno = hashvalue & (nbuckets - 1);
    1930     8404536 :         *batchno = pg_rotate_right32(hashvalue,
    1931     8404536 :                                      hashtable->log2_nbuckets) & (nbatch - 1);
    1932             :     }
    1933             :     else
    1934             :     {
    1935    26904932 :         *bucketno = hashvalue & (nbuckets - 1);
    1936    26904932 :         *batchno = 0;
    1937             :     }
    1938    35309468 : }
    1939             : 
    1940             : /*
    1941             :  * ExecScanHashBucket
    1942             :  *      scan a hash bucket for matches to the current outer tuple
    1943             :  *
    1944             :  * The current outer tuple must be stored in econtext->ecxt_outertuple.
    1945             :  *
    1946             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    1947             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    1948             :  * for the latter.
    1949             :  */
    1950             : bool
    1951    14249160 : ExecScanHashBucket(HashJoinState *hjstate,
    1952             :                    ExprContext *econtext)
    1953             : {
    1954    14249160 :     ExprState  *hjclauses = hjstate->hashclauses;
    1955    14249160 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    1956    14249160 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    1957    14249160 :     uint32      hashvalue = hjstate->hj_CurHashValue;
    1958             : 
    1959             :     /*
    1960             :      * hj_CurTuple is the address of the tuple last returned from the current
    1961             :      * bucket, or NULL if it's time to start scanning a new bucket.
    1962             :      *
    1963             :      * If the tuple hashed to a skew bucket then scan the skew bucket
    1964             :      * otherwise scan the standard hashtable bucket.
    1965             :      */
    1966    14249160 :     if (hashTuple != NULL)
    1967     2688574 :         hashTuple = hashTuple->next.unshared;
    1968    11560586 :     else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
    1969         800 :         hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
    1970             :     else
    1971    11559786 :         hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
    1972             : 
    1973    17232360 :     while (hashTuple != NULL)
    1974             :     {
    1975     8746278 :         if (hashTuple->hashvalue == hashvalue)
    1976             :         {
    1977             :             TupleTableSlot *inntuple;
    1978             : 
    1979             :             /* insert hashtable's tuple into exec slot so ExecQual sees it */
    1980     5763082 :             inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    1981             :                                              hjstate->hj_HashTupleSlot,
    1982             :                                              false);    /* do not pfree */
    1983     5763082 :             econtext->ecxt_innertuple = inntuple;
    1984             : 
    1985     5763082 :             if (ExecQualAndReset(hjclauses, econtext))
    1986             :             {
    1987     5763078 :                 hjstate->hj_CurTuple = hashTuple;
    1988     5763078 :                 return true;
    1989             :             }
    1990             :         }
    1991             : 
    1992     2983200 :         hashTuple = hashTuple->next.unshared;
    1993             :     }
    1994             : 
    1995             :     /*
    1996             :      * no match
    1997             :      */
    1998     8486082 :     return false;
    1999             : }
    2000             : 
    2001             : /*
    2002             :  * ExecParallelScanHashBucket
    2003             :  *      scan a hash bucket for matches to the current outer tuple
    2004             :  *
    2005             :  * The current outer tuple must be stored in econtext->ecxt_outertuple.
    2006             :  *
    2007             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2008             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2009             :  * for the latter.
    2010             :  */
    2011             : bool
    2012     2400032 : ExecParallelScanHashBucket(HashJoinState *hjstate,
    2013             :                            ExprContext *econtext)
    2014             : {
    2015     2400032 :     ExprState  *hjclauses = hjstate->hashclauses;
    2016     2400032 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2017     2400032 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2018     2400032 :     uint32      hashvalue = hjstate->hj_CurHashValue;
    2019             : 
    2020             :     /*
    2021             :      * hj_CurTuple is the address of the tuple last returned from the current
    2022             :      * bucket, or NULL if it's time to start scanning a new bucket.
    2023             :      */
    2024     2400032 :     if (hashTuple != NULL)
    2025     1200016 :         hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2026             :     else
    2027     1200016 :         hashTuple = ExecParallelHashFirstTuple(hashtable,
    2028             :                                                hjstate->hj_CurBucketNo);
    2029             : 
    2030     3180440 :     while (hashTuple != NULL)
    2031             :     {
    2032     1980424 :         if (hashTuple->hashvalue == hashvalue)
    2033             :         {
    2034             :             TupleTableSlot *inntuple;
    2035             : 
    2036             :             /* insert hashtable's tuple into exec slot so ExecQual sees it */
    2037     1200016 :             inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2038             :                                              hjstate->hj_HashTupleSlot,
    2039             :                                              false);    /* do not pfree */
    2040     1200016 :             econtext->ecxt_innertuple = inntuple;
    2041             : 
    2042     1200016 :             if (ExecQualAndReset(hjclauses, econtext))
    2043             :             {
    2044     1200016 :                 hjstate->hj_CurTuple = hashTuple;
    2045     1200016 :                 return true;
    2046             :             }
    2047             :         }
    2048             : 
    2049      780408 :         hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2050             :     }
    2051             : 
    2052             :     /*
    2053             :      * no match
    2054             :      */
    2055     1200016 :     return false;
    2056             : }
    2057             : 
    2058             : /*
    2059             :  * ExecPrepHashTableForUnmatched
    2060             :  *      set up for a series of ExecScanHashTableForUnmatched calls
    2061             :  */
    2062             : void
    2063       11134 : ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
    2064             : {
    2065             :     /*----------
    2066             :      * During this scan we use the HashJoinState fields as follows:
    2067             :      *
    2068             :      * hj_CurBucketNo: next regular bucket to scan
    2069             :      * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
    2070             :      * hj_CurTuple: last tuple returned, or NULL to start next bucket
    2071             :      *----------
    2072             :      */
    2073       11134 :     hjstate->hj_CurBucketNo = 0;
    2074       11134 :     hjstate->hj_CurSkewBucketNo = 0;
    2075       11134 :     hjstate->hj_CurTuple = NULL;
    2076       11134 : }
    2077             : 
    2078             : /*
    2079             :  * ExecScanHashTableForUnmatched
    2080             :  *      scan the hash table for unmatched inner tuples
    2081             :  *
    2082             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2083             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2084             :  * for the latter.
    2085             :  */
    2086             : bool
    2087      228750 : ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
    2088             : {
    2089      228750 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2090      228750 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2091             : 
    2092             :     for (;;)
    2093             :     {
    2094             :         /*
    2095             :          * hj_CurTuple is the address of the tuple last returned from the
    2096             :          * current bucket, or NULL if it's time to start scanning a new
    2097             :          * bucket.
    2098             :          */
    2099    12248462 :         if (hashTuple != NULL)
    2100      217616 :             hashTuple = hashTuple->next.unshared;
    2101    12030846 :         else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
    2102             :         {
    2103    12019716 :             hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
    2104    12019716 :             hjstate->hj_CurBucketNo++;
    2105             :         }
    2106       11130 :         else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
    2107             :         {
    2108           0 :             int         j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
    2109             : 
    2110           0 :             hashTuple = hashtable->skewBucket[j]->tuples;
    2111           0 :             hjstate->hj_CurSkewBucketNo++;
    2112             :         }
    2113             :         else
    2114       11130 :             break;              /* finished all buckets */
    2115             : 
    2116    12484296 :         while (hashTuple != NULL)
    2117             :         {
    2118      464584 :             if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
    2119             :             {
    2120             :                 TupleTableSlot *inntuple;
    2121             : 
    2122             :                 /* insert hashtable's tuple into exec slot */
    2123      217620 :                 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2124             :                                                  hjstate->hj_HashTupleSlot,
    2125             :                                                  false);    /* do not pfree */
    2126      217620 :                 econtext->ecxt_innertuple = inntuple;
    2127             : 
    2128             :                 /*
    2129             :                  * Reset temp memory each time; although this function doesn't
    2130             :                  * do any qual eval, the caller will, so let's keep it
    2131             :                  * parallel to ExecScanHashBucket.
    2132             :                  */
    2133      217620 :                 ResetExprContext(econtext);
    2134             : 
    2135      217620 :                 hjstate->hj_CurTuple = hashTuple;
    2136      217620 :                 return true;
    2137             :             }
    2138             : 
    2139      246964 :             hashTuple = hashTuple->next.unshared;
    2140             :         }
    2141             : 
    2142             :         /* allow this loop to be cancellable */
    2143    12019712 :         CHECK_FOR_INTERRUPTS();
    2144             :     }
    2145             : 
    2146             :     /*
    2147             :      * no more unmatched tuples
    2148             :      */
    2149       11130 :     return false;
    2150             : }
    2151             : 
    2152             : /*
    2153             :  * ExecHashTableReset
    2154             :  *
    2155             :  *      reset hash table header for new batch
    2156             :  */
    2157             : void
    2158         776 : ExecHashTableReset(HashJoinTable hashtable)
    2159             : {
    2160             :     MemoryContext oldcxt;
    2161         776 :     int         nbuckets = hashtable->nbuckets;
    2162             : 
    2163             :     /*
    2164             :      * Release all the hash buckets and tuples acquired in the prior pass, and
    2165             :      * reinitialize the context for a new pass.
    2166             :      */
    2167         776 :     MemoryContextReset(hashtable->batchCxt);
    2168         776 :     oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
    2169             : 
    2170             :     /* Reallocate and reinitialize the hash bucket headers. */
    2171         776 :     hashtable->buckets.unshared = (HashJoinTuple *)
    2172         776 :         palloc0(nbuckets * sizeof(HashJoinTuple));
    2173             : 
    2174         776 :     hashtable->spaceUsed = 0;
    2175             : 
    2176         776 :     MemoryContextSwitchTo(oldcxt);
    2177             : 
    2178             :     /* Forget the chunks (the memory was freed by the context reset above). */
    2179         776 :     hashtable->chunks = NULL;
    2180         776 : }
    2181             : 
    2182             : /*
    2183             :  * ExecHashTableResetMatchFlags
    2184             :  *      Clear all the HeapTupleHeaderHasMatch flags in the table
    2185             :  */
    2186             : void
    2187          20 : ExecHashTableResetMatchFlags(HashJoinTable hashtable)
    2188             : {
    2189             :     HashJoinTuple tuple;
    2190             :     int         i;
    2191             : 
    2192             :     /* Reset all flags in the main table ... */
    2193       20500 :     for (i = 0; i < hashtable->nbuckets; i++)
    2194             :     {
    2195       20568 :         for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
    2196          88 :              tuple = tuple->next.unshared)
    2197          88 :             HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
    2198             :     }
    2199             : 
    2200             :     /* ... and the same for the skew buckets, if any */
    2201          20 :     for (i = 0; i < hashtable->nSkewBuckets; i++)
    2202             :     {
    2203           0 :         int         j = hashtable->skewBucketNums[i];
    2204           0 :         HashSkewBucket *skewBucket = hashtable->skewBucket[j];
    2205             : 
    2206           0 :         for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
    2207           0 :             HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
    2208             :     }
    2209          20 : }
    2210             : 
    2211             : 
    2212             : void
    2213      360126 : ExecReScanHash(HashState *node)
    2214             : {
    2215             :     /*
    2216             :      * if chgParam of subnode is not null then plan will be re-scanned by
    2217             :      * first ExecProcNode.
    2218             :      */
    2219      360126 :     if (node->ps.lefttree->chgParam == NULL)
    2220           0 :         ExecReScan(node->ps.lefttree);
    2221      360126 : }
    2222             : 
    2223             : 
    2224             : /*
    2225             :  * ExecHashBuildSkewHash
    2226             :  *
    2227             :  *      Set up for skew optimization if we can identify the most common values
    2228             :  *      (MCVs) of the outer relation's join key.  We make a skew hash bucket
    2229             :  *      for the hash value of each MCV, up to the number of slots allowed
    2230             :  *      based on available memory.
    2231             :  */
    2232             : static void
    2233          70 : ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
    2234             : {
    2235             :     HeapTupleData *statsTuple;
    2236             :     AttStatsSlot sslot;
    2237             : 
    2238             :     /* Do nothing if planner didn't identify the outer relation's join key */
    2239          70 :     if (!OidIsValid(node->skewTable))
    2240          48 :         return;
    2241             :     /* Also, do nothing if we don't have room for at least one skew bucket */
    2242          70 :     if (mcvsToUse <= 0)
    2243           0 :         return;
    2244             : 
    2245             :     /*
    2246             :      * Try to find the MCV statistics for the outer relation's join key.
    2247             :      */
    2248          70 :     statsTuple = SearchSysCache3(STATRELATTINH,
    2249          70 :                                  ObjectIdGetDatum(node->skewTable),
    2250          70 :                                  Int16GetDatum(node->skewColumn),
    2251          70 :                                  BoolGetDatum(node->skewInherit));
    2252          70 :     if (!HeapTupleIsValid(statsTuple))
    2253          48 :         return;
    2254             : 
    2255          22 :     if (get_attstatsslot(&sslot, statsTuple,
    2256             :                          STATISTIC_KIND_MCV, InvalidOid,
    2257             :                          ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
    2258             :     {
    2259             :         double      frac;
    2260             :         int         nbuckets;
    2261             :         FmgrInfo   *hashfunctions;
    2262             :         int         i;
    2263             : 
    2264           4 :         if (mcvsToUse > sslot.nvalues)
    2265           0 :             mcvsToUse = sslot.nvalues;
    2266             : 
    2267             :         /*
    2268             :          * Calculate the expected fraction of outer relation that will
    2269             :          * participate in the skew optimization.  If this isn't at least
    2270             :          * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
    2271             :          */
    2272           4 :         frac = 0;
    2273          44 :         for (i = 0; i < mcvsToUse; i++)
    2274          40 :             frac += sslot.numbers[i];
    2275           4 :         if (frac < SKEW_MIN_OUTER_FRACTION)
    2276             :         {
    2277           0 :             free_attstatsslot(&sslot);
    2278           0 :             ReleaseSysCache(statsTuple);
    2279           0 :             return;
    2280             :         }
    2281             : 
    2282             :         /*
    2283             :          * Okay, set up the skew hashtable.
    2284             :          *
    2285             :          * skewBucket[] is an open addressing hashtable with a power of 2 size
    2286             :          * that is greater than the number of MCV values.  (This ensures there
    2287             :          * will be at least one null entry, so searches will always
    2288             :          * terminate.)
    2289             :          *
    2290             :          * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
    2291             :          * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
    2292             :          * since we limit pg_statistic entries to much less than that.
    2293             :          */
    2294           4 :         nbuckets = pg_nextpower2_32(mcvsToUse + 1);
    2295             :         /* use two more bits just to help avoid collisions */
    2296           4 :         nbuckets <<= 2;
    2297             : 
    2298           4 :         hashtable->skewEnabled = true;
    2299           4 :         hashtable->skewBucketLen = nbuckets;
    2300             : 
    2301             :         /*
    2302             :          * We allocate the bucket memory in the hashtable's batch context. It
    2303             :          * is only needed during the first batch, and this ensures it will be
    2304             :          * automatically removed once the first batch is done.
    2305             :          */
    2306           4 :         hashtable->skewBucket = (HashSkewBucket **)
    2307           4 :             MemoryContextAllocZero(hashtable->batchCxt,
    2308             :                                    nbuckets * sizeof(HashSkewBucket *));
    2309           4 :         hashtable->skewBucketNums = (int *)
    2310           4 :             MemoryContextAllocZero(hashtable->batchCxt,
    2311             :                                    mcvsToUse * sizeof(int));
    2312             : 
    2313           4 :         hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
    2314           4 :             + mcvsToUse * sizeof(int);
    2315           4 :         hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
    2316           4 :             + mcvsToUse * sizeof(int);
    2317           4 :         if (hashtable->spaceUsed > hashtable->spacePeak)
    2318           4 :             hashtable->spacePeak = hashtable->spaceUsed;
    2319             : 
    2320             :         /*
    2321             :          * Create a skew bucket for each MCV hash value.
    2322             :          *
    2323             :          * Note: it is very important that we create the buckets in order of
    2324             :          * decreasing MCV frequency.  If we have to remove some buckets, they
    2325             :          * must be removed in reverse order of creation (see notes in
    2326             :          * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
    2327             :          * be removed first.
    2328             :          */
    2329           4 :         hashfunctions = hashtable->outer_hashfunctions;
    2330             : 
    2331          44 :         for (i = 0; i < mcvsToUse; i++)
    2332             :         {
    2333             :             uint32      hashvalue;
    2334             :             int         bucket;
    2335             : 
    2336          40 :             hashvalue = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[0],
    2337             :                                                          hashtable->collations[0],
    2338             :                                                          sslot.values[i]));
    2339             : 
    2340             :             /*
    2341             :              * While we have not hit a hole in the hashtable and have not hit
    2342             :              * the desired bucket, we have collided with some previous hash
    2343             :              * value, so try the next bucket location.  NB: this code must
    2344             :              * match ExecHashGetSkewBucket.
    2345             :              */
    2346          40 :             bucket = hashvalue & (nbuckets - 1);
    2347          40 :             while (hashtable->skewBucket[bucket] != NULL &&
    2348           0 :                    hashtable->skewBucket[bucket]->hashvalue != hashvalue)
    2349           0 :                 bucket = (bucket + 1) & (nbuckets - 1);
    2350             : 
    2351             :             /*
    2352             :              * If we found an existing bucket with the same hashvalue, leave
    2353             :              * it alone.  It's okay for two MCVs to share a hashvalue.
    2354             :              */
    2355          40 :             if (hashtable->skewBucket[bucket] != NULL)
    2356           0 :                 continue;
    2357             : 
    2358             :             /* Okay, create a new skew bucket for this hashvalue. */
    2359          80 :             hashtable->skewBucket[bucket] = (HashSkewBucket *)
    2360          40 :                 MemoryContextAlloc(hashtable->batchCxt,
    2361             :                                    sizeof(HashSkewBucket));
    2362          40 :             hashtable->skewBucket[bucket]->hashvalue = hashvalue;
    2363          40 :             hashtable->skewBucket[bucket]->tuples = NULL;
    2364          40 :             hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
    2365          40 :             hashtable->nSkewBuckets++;
    2366          40 :             hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
    2367          40 :             hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
    2368          40 :             if (hashtable->spaceUsed > hashtable->spacePeak)
    2369          40 :                 hashtable->spacePeak = hashtable->spaceUsed;
    2370             :         }
    2371             : 
    2372           4 :         free_attstatsslot(&sslot);
    2373             :     }
    2374             : 
    2375          22 :     ReleaseSysCache(statsTuple);
    2376             : }
    2377             : 
    2378             : /*
    2379             :  * ExecHashGetSkewBucket
    2380             :  *
    2381             :  *      Returns the index of the skew bucket for this hashvalue,
    2382             :  *      or INVALID_SKEW_BUCKET_NO if the hashvalue is not
    2383             :  *      associated with any active skew bucket.
    2384             :  */
    2385             : int
    2386    28623962 : ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
    2387             : {
    2388             :     int         bucket;
    2389             : 
    2390             :     /*
    2391             :      * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
    2392             :      * particular, this happens after the initial batch is done).
    2393             :      */
    2394    28623962 :     if (!hashtable->skewEnabled)
    2395    28543962 :         return INVALID_SKEW_BUCKET_NO;
    2396             : 
    2397             :     /*
    2398             :      * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
    2399             :      */
    2400       80000 :     bucket = hashvalue & (hashtable->skewBucketLen - 1);
    2401             : 
    2402             :     /*
    2403             :      * While we have not hit a hole in the hashtable and have not hit the
    2404             :      * desired bucket, we have collided with some other hash value, so try the
    2405             :      * next bucket location.
    2406             :      */
    2407       84484 :     while (hashtable->skewBucket[bucket] != NULL &&
    2408        5456 :            hashtable->skewBucket[bucket]->hashvalue != hashvalue)
    2409        4484 :         bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
    2410             : 
    2411             :     /*
    2412             :      * Found the desired bucket?
    2413             :      */
    2414       80000 :     if (hashtable->skewBucket[bucket] != NULL)
    2415         972 :         return bucket;
    2416             : 
    2417             :     /*
    2418             :      * There must not be any hashtable entry for this hash value.
    2419             :      */
    2420       79028 :     return INVALID_SKEW_BUCKET_NO;
    2421             : }
    2422             : 
    2423             : /*
    2424             :  * ExecHashSkewTableInsert
    2425             :  *
    2426             :  *      Insert a tuple into the skew hashtable.
    2427             :  *
    2428             :  * This should generally match up with the current-batch case in
    2429             :  * ExecHashTableInsert.
    2430             :  */
    2431             : static void
    2432         172 : ExecHashSkewTableInsert(HashJoinTable hashtable,
    2433             :                         TupleTableSlot *slot,
    2434             :                         uint32 hashvalue,
    2435             :                         int bucketNumber)
    2436             : {
    2437             :     bool        shouldFree;
    2438         172 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    2439             :     HashJoinTuple hashTuple;
    2440             :     int         hashTupleSize;
    2441             : 
    2442             :     /* Create the HashJoinTuple */
    2443         172 :     hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    2444         172 :     hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
    2445             :                                                    hashTupleSize);
    2446         172 :     hashTuple->hashvalue = hashvalue;
    2447         172 :     memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    2448         172 :     HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    2449             : 
    2450             :     /* Push it onto the front of the skew bucket's list */
    2451         172 :     hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
    2452         172 :     hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
    2453             :     Assert(hashTuple != hashTuple->next.unshared);
    2454             : 
    2455             :     /* Account for space used, and back off if we've used too much */
    2456         172 :     hashtable->spaceUsed += hashTupleSize;
    2457         172 :     hashtable->spaceUsedSkew += hashTupleSize;
    2458         172 :     if (hashtable->spaceUsed > hashtable->spacePeak)
    2459         128 :         hashtable->spacePeak = hashtable->spaceUsed;
    2460         204 :     while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
    2461          32 :         ExecHashRemoveNextSkewBucket(hashtable);
    2462             : 
    2463             :     /* Check we are not over the total spaceAllowed, either */
    2464         172 :     if (hashtable->spaceUsed > hashtable->spaceAllowed)
    2465           0 :         ExecHashIncreaseNumBatches(hashtable);
    2466             : 
    2467         172 :     if (shouldFree)
    2468         172 :         heap_free_minimal_tuple(tuple);
    2469         172 : }
    2470             : 
    2471             : /*
    2472             :  *      ExecHashRemoveNextSkewBucket
    2473             :  *
    2474             :  *      Remove the least valuable skew bucket by pushing its tuples into
    2475             :  *      the main hash table.
    2476             :  */
    2477             : static void
    2478          32 : ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
    2479             : {
    2480             :     int         bucketToRemove;
    2481             :     HashSkewBucket *bucket;
    2482             :     uint32      hashvalue;
    2483             :     int         bucketno;
    2484             :     int         batchno;
    2485             :     HashJoinTuple hashTuple;
    2486             : 
    2487             :     /* Locate the bucket to remove */
    2488          32 :     bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
    2489          32 :     bucket = hashtable->skewBucket[bucketToRemove];
    2490             : 
    2491             :     /*
    2492             :      * Calculate which bucket and batch the tuples belong to in the main
    2493             :      * hashtable.  They all have the same hash value, so it's the same for all
    2494             :      * of them.  Also note that it's not possible for nbatch to increase while
    2495             :      * we are processing the tuples.
    2496             :      */
    2497          32 :     hashvalue = bucket->hashvalue;
    2498          32 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    2499             : 
    2500             :     /* Process all tuples in the bucket */
    2501          32 :     hashTuple = bucket->tuples;
    2502         124 :     while (hashTuple != NULL)
    2503             :     {
    2504          92 :         HashJoinTuple nextHashTuple = hashTuple->next.unshared;
    2505             :         MinimalTuple tuple;
    2506             :         Size        tupleSize;
    2507             : 
    2508             :         /*
    2509             :          * This code must agree with ExecHashTableInsert.  We do not use
    2510             :          * ExecHashTableInsert directly as ExecHashTableInsert expects a
    2511             :          * TupleTableSlot while we already have HashJoinTuples.
    2512             :          */
    2513          92 :         tuple = HJTUPLE_MINTUPLE(hashTuple);
    2514          92 :         tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    2515             : 
    2516             :         /* Decide whether to put the tuple in the hash table or a temp file */
    2517          92 :         if (batchno == hashtable->curbatch)
    2518             :         {
    2519             :             /* Move the tuple to the main hash table */
    2520             :             HashJoinTuple copyTuple;
    2521             : 
    2522             :             /*
    2523             :              * We must copy the tuple into the dense storage, else it will not
    2524             :              * be found by, eg, ExecHashIncreaseNumBatches.
    2525             :              */
    2526           8 :             copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
    2527           8 :             memcpy(copyTuple, hashTuple, tupleSize);
    2528           8 :             pfree(hashTuple);
    2529             : 
    2530           8 :             copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    2531           8 :             hashtable->buckets.unshared[bucketno] = copyTuple;
    2532             : 
    2533             :             /* We have reduced skew space, but overall space doesn't change */
    2534           8 :             hashtable->spaceUsedSkew -= tupleSize;
    2535             :         }
    2536             :         else
    2537             :         {
    2538             :             /* Put the tuple into a temp file for later batches */
    2539             :             Assert(batchno > hashtable->curbatch);
    2540          84 :             ExecHashJoinSaveTuple(tuple, hashvalue,
    2541          84 :                                   &hashtable->innerBatchFile[batchno]);
    2542          84 :             pfree(hashTuple);
    2543          84 :             hashtable->spaceUsed -= tupleSize;
    2544          84 :             hashtable->spaceUsedSkew -= tupleSize;
    2545             :         }
    2546             : 
    2547          92 :         hashTuple = nextHashTuple;
    2548             : 
    2549             :         /* allow this loop to be cancellable */
    2550          92 :         CHECK_FOR_INTERRUPTS();
    2551             :     }
    2552             : 
    2553             :     /*
    2554             :      * Free the bucket struct itself and reset the hashtable entry to NULL.
    2555             :      *
    2556             :      * NOTE: this is not nearly as simple as it looks on the surface, because
    2557             :      * of the possibility of collisions in the hashtable.  Suppose that hash
    2558             :      * values A and B collide at a particular hashtable entry, and that A was
    2559             :      * entered first so B gets shifted to a different table entry.  If we were
    2560             :      * to remove A first then ExecHashGetSkewBucket would mistakenly start
    2561             :      * reporting that B is not in the hashtable, because it would hit the NULL
    2562             :      * before finding B.  However, we always remove entries in the reverse
    2563             :      * order of creation, so this failure cannot happen.
    2564             :      */
    2565          32 :     hashtable->skewBucket[bucketToRemove] = NULL;
    2566          32 :     hashtable->nSkewBuckets--;
    2567          32 :     pfree(bucket);
    2568          32 :     hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
    2569          32 :     hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
    2570             : 
    2571             :     /*
    2572             :      * If we have removed all skew buckets then give up on skew optimization.
    2573             :      * Release the arrays since they aren't useful any more.
    2574             :      */
    2575          32 :     if (hashtable->nSkewBuckets == 0)
    2576             :     {
    2577           0 :         hashtable->skewEnabled = false;
    2578           0 :         pfree(hashtable->skewBucket);
    2579           0 :         pfree(hashtable->skewBucketNums);
    2580           0 :         hashtable->skewBucket = NULL;
    2581           0 :         hashtable->skewBucketNums = NULL;
    2582           0 :         hashtable->spaceUsed -= hashtable->spaceUsedSkew;
    2583           0 :         hashtable->spaceUsedSkew = 0;
    2584             :     }
    2585          32 : }
    2586             : 
    2587             : /*
    2588             :  * Reserve space in the DSM segment for instrumentation data.
    2589             :  */
    2590             : void
    2591         112 : ExecHashEstimate(HashState *node, ParallelContext *pcxt)
    2592             : {
    2593             :     size_t      size;
    2594             : 
    2595             :     /* don't need this if not instrumenting or no workers */
    2596         112 :     if (!node->ps.instrument || pcxt->nworkers == 0)
    2597          56 :         return;
    2598             : 
    2599          56 :     size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
    2600          56 :     size = add_size(size, offsetof(SharedHashInfo, hinstrument));
    2601          56 :     shm_toc_estimate_chunk(&pcxt->estimator, size);
    2602          56 :     shm_toc_estimate_keys(&pcxt->estimator, 1);
    2603             : }
    2604             : 
    2605             : /*
    2606             :  * Set up a space in the DSM for all workers to record instrumentation data
    2607             :  * about their hash table.
    2608             :  */
    2609             : void
    2610         112 : ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
    2611             : {
    2612             :     size_t      size;
    2613             : 
    2614             :     /* don't need this if not instrumenting or no workers */
    2615         112 :     if (!node->ps.instrument || pcxt->nworkers == 0)
    2616          56 :         return;
    2617             : 
    2618          56 :     size = offsetof(SharedHashInfo, hinstrument) +
    2619          56 :         pcxt->nworkers * sizeof(HashInstrumentation);
    2620          56 :     node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
    2621             : 
    2622             :     /* Each per-worker area must start out as zeroes. */
    2623          56 :     memset(node->shared_info, 0, size);
    2624             : 
    2625          56 :     node->shared_info->num_workers = pcxt->nworkers;
    2626          56 :     shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
    2627          56 :                    node->shared_info);
    2628             : }
    2629             : 
    2630             : /*
    2631             :  * Locate the DSM space for hash table instrumentation data that we'll write
    2632             :  * to at shutdown time.
    2633             :  */
    2634             : void
    2635         332 : ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
    2636             : {
    2637             :     SharedHashInfo *shared_info;
    2638             : 
    2639             :     /* don't need this if not instrumenting */
    2640         332 :     if (!node->ps.instrument)
    2641         164 :         return;
    2642             : 
    2643             :     /*
    2644             :      * Find our entry in the shared area, and set up a pointer to it so that
    2645             :      * we'll accumulate stats there when shutting down or rebuilding the hash
    2646             :      * table.
    2647             :      */
    2648             :     shared_info = (SharedHashInfo *)
    2649         168 :         shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
    2650         168 :     node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
    2651             : }
    2652             : 
    2653             : /*
    2654             :  * Collect EXPLAIN stats if needed, saving them into DSM memory if
    2655             :  * ExecHashInitializeWorker was called, or local storage if not.  In the
    2656             :  * parallel case, this must be done in ExecShutdownHash() rather than
    2657             :  * ExecEndHash() because the latter runs after we've detached from the DSM
    2658             :  * segment.
    2659             :  */
    2660             : void
    2661       44152 : ExecShutdownHash(HashState *node)
    2662             : {
    2663             :     /* Allocate save space if EXPLAIN'ing and we didn't do so already */
    2664       44152 :     if (node->ps.instrument && !node->hinstrument)
    2665          72 :         node->hinstrument = (HashInstrumentation *)
    2666          72 :             palloc0(sizeof(HashInstrumentation));
    2667             :     /* Now accumulate data for the current (final) hash table */
    2668       44152 :     if (node->hinstrument && node->hashtable)
    2669         194 :         ExecHashAccumInstrumentation(node->hinstrument, node->hashtable);
    2670       44152 : }
    2671             : 
    2672             : /*
    2673             :  * Retrieve instrumentation data from workers before the DSM segment is
    2674             :  * detached, so that EXPLAIN can access it.
    2675             :  */
    2676             : void
    2677          56 : ExecHashRetrieveInstrumentation(HashState *node)
    2678             : {
    2679          56 :     SharedHashInfo *shared_info = node->shared_info;
    2680             :     size_t      size;
    2681             : 
    2682          56 :     if (shared_info == NULL)
    2683           0 :         return;
    2684             : 
    2685             :     /* Replace node->shared_info with a copy in backend-local memory. */
    2686          56 :     size = offsetof(SharedHashInfo, hinstrument) +
    2687          56 :         shared_info->num_workers * sizeof(HashInstrumentation);
    2688          56 :     node->shared_info = palloc(size);
    2689          56 :     memcpy(node->shared_info, shared_info, size);
    2690             : }
    2691             : 
    2692             : /*
    2693             :  * Accumulate instrumentation data from 'hashtable' into an
    2694             :  * initially-zeroed HashInstrumentation struct.
    2695             :  *
    2696             :  * This is used to merge information across successive hash table instances
    2697             :  * within a single plan node.  We take the maximum values of each interesting
    2698             :  * number.  The largest nbuckets and largest nbatch values might have occurred
    2699             :  * in different instances, so there's some risk of confusion from reporting
    2700             :  * unrelated numbers; but there's a bigger risk of misdiagnosing a performance
    2701             :  * issue if we don't report the largest values.  Similarly, we want to report
    2702             :  * the largest spacePeak regardless of whether it happened in the same
    2703             :  * instance as the largest nbuckets or nbatch.  All the instances should have
    2704             :  * the same nbuckets_original and nbatch_original; but there's little value
    2705             :  * in depending on that here, so handle them the same way.
    2706             :  */
    2707             : void
    2708         194 : ExecHashAccumInstrumentation(HashInstrumentation *instrument,
    2709             :                              HashJoinTable hashtable)
    2710             : {
    2711         194 :     instrument->nbuckets = Max(instrument->nbuckets,
    2712             :                                hashtable->nbuckets);
    2713         194 :     instrument->nbuckets_original = Max(instrument->nbuckets_original,
    2714             :                                         hashtable->nbuckets_original);
    2715         194 :     instrument->nbatch = Max(instrument->nbatch,
    2716             :                              hashtable->nbatch);
    2717         194 :     instrument->nbatch_original = Max(instrument->nbatch_original,
    2718             :                                       hashtable->nbatch_original);
    2719         194 :     instrument->space_peak = Max(instrument->space_peak,
    2720             :                                  hashtable->spacePeak);
    2721         194 : }
    2722             : 
    2723             : /*
    2724             :  * Allocate 'size' bytes from the currently active HashMemoryChunk
    2725             :  */
    2726             : static void *
    2727    15073650 : dense_alloc(HashJoinTable hashtable, Size size)
    2728             : {
    2729             :     HashMemoryChunk newChunk;
    2730             :     char       *ptr;
    2731             : 
    2732             :     /* just in case the size is not already aligned properly */
    2733    15073650 :     size = MAXALIGN(size);
    2734             : 
    2735             :     /*
    2736             :      * If tuple size is larger than threshold, allocate a separate chunk.
    2737             :      */
    2738    15073650 :     if (size > HASH_CHUNK_THRESHOLD)
    2739             :     {
    2740             :         /* allocate new chunk and put it at the beginning of the list */
    2741           0 :         newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
    2742             :                                                         HASH_CHUNK_HEADER_SIZE + size);
    2743           0 :         newChunk->maxlen = size;
    2744           0 :         newChunk->used = size;
    2745           0 :         newChunk->ntuples = 1;
    2746             : 
    2747             :         /*
    2748             :          * Add this chunk to the list after the first existing chunk, so that
    2749             :          * we don't lose the remaining space in the "current" chunk.
    2750             :          */
    2751           0 :         if (hashtable->chunks != NULL)
    2752             :         {
    2753           0 :             newChunk->next = hashtable->chunks->next;
    2754           0 :             hashtable->chunks->next.unshared = newChunk;
    2755             :         }
    2756             :         else
    2757             :         {
    2758           0 :             newChunk->next.unshared = hashtable->chunks;
    2759           0 :             hashtable->chunks = newChunk;
    2760             :         }
    2761             : 
    2762           0 :         return HASH_CHUNK_DATA(newChunk);
    2763             :     }
    2764             : 
    2765             :     /*
    2766             :      * See if we have enough space for it in the current chunk (if any). If
    2767             :      * not, allocate a fresh chunk.
    2768             :      */
    2769    15073650 :     if ((hashtable->chunks == NULL) ||
    2770    14684508 :         (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
    2771             :     {
    2772             :         /* allocate new chunk and put it at the beginning of the list */
    2773      400474 :         newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
    2774             :                                                         HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE);
    2775             : 
    2776      400474 :         newChunk->maxlen = HASH_CHUNK_SIZE;
    2777      400474 :         newChunk->used = size;
    2778      400474 :         newChunk->ntuples = 1;
    2779             : 
    2780      400474 :         newChunk->next.unshared = hashtable->chunks;
    2781      400474 :         hashtable->chunks = newChunk;
    2782             : 
    2783      400474 :         return HASH_CHUNK_DATA(newChunk);
    2784             :     }
    2785             : 
    2786             :     /* There is enough space in the current chunk, let's add the tuple */
    2787    14673176 :     ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
    2788    14673176 :     hashtable->chunks->used += size;
    2789    14673176 :     hashtable->chunks->ntuples += 1;
    2790             : 
    2791             :     /* return pointer to the start of the tuple memory */
    2792    14673176 :     return ptr;
    2793             : }
    2794             : 
    2795             : /*
    2796             :  * Allocate space for a tuple in shared dense storage.  This is equivalent to
    2797             :  * dense_alloc but for Parallel Hash using shared memory.
    2798             :  *
    2799             :  * While loading a tuple into shared memory, we might run out of memory and
    2800             :  * decide to repartition, or determine that the load factor is too high and
    2801             :  * decide to expand the bucket array, or discover that another participant has
    2802             :  * commanded us to help do that.  Return NULL if number of buckets or batches
    2803             :  * has changed, indicating that the caller must retry (considering the
    2804             :  * possibility that the tuple no longer belongs in the same batch).
    2805             :  */
    2806             : static HashJoinTuple
    2807     1351144 : ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
    2808             :                            dsa_pointer *shared)
    2809             : {
    2810     1351144 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    2811             :     dsa_pointer chunk_shared;
    2812             :     HashMemoryChunk chunk;
    2813             :     Size        chunk_size;
    2814             :     HashJoinTuple result;
    2815     1351144 :     int         curbatch = hashtable->curbatch;
    2816             : 
    2817     1351144 :     size = MAXALIGN(size);
    2818             : 
    2819             :     /*
    2820             :      * Fast path: if there is enough space in this backend's current chunk,
    2821             :      * then we can allocate without any locking.
    2822             :      */
    2823     1351144 :     chunk = hashtable->current_chunk;
    2824     1351144 :     if (chunk != NULL &&
    2825     1350510 :         size <= HASH_CHUNK_THRESHOLD &&
    2826     1350510 :         chunk->maxlen - chunk->used >= size)
    2827             :     {
    2828             : 
    2829     1348970 :         chunk_shared = hashtable->current_chunk_shared;
    2830             :         Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
    2831     1348970 :         *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
    2832     1348970 :         result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
    2833     1348970 :         chunk->used += size;
    2834             : 
    2835             :         Assert(chunk->used <= chunk->maxlen);
    2836             :         Assert(result == dsa_get_address(hashtable->area, *shared));
    2837             : 
    2838     1348970 :         return result;
    2839             :     }
    2840             : 
    2841             :     /* Slow path: try to allocate a new chunk. */
    2842        2174 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    2843             : 
    2844             :     /*
    2845             :      * Check if we need to help increase the number of buckets or batches.
    2846             :      */
    2847        2174 :     if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
    2848        2150 :         pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    2849             :     {
    2850         120 :         ParallelHashGrowth growth = pstate->growth;
    2851             : 
    2852         120 :         hashtable->current_chunk = NULL;
    2853         120 :         LWLockRelease(&pstate->lock);
    2854             : 
    2855             :         /* Another participant has commanded us to help grow. */
    2856         120 :         if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
    2857          24 :             ExecParallelHashIncreaseNumBatches(hashtable);
    2858          96 :         else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    2859          96 :             ExecParallelHashIncreaseNumBuckets(hashtable);
    2860             : 
    2861             :         /* The caller must retry. */
    2862         120 :         return NULL;
    2863             :     }
    2864             : 
    2865             :     /* Oversized tuples get their own chunk. */
    2866        2054 :     if (size > HASH_CHUNK_THRESHOLD)
    2867          32 :         chunk_size = size + HASH_CHUNK_HEADER_SIZE;
    2868             :     else
    2869        2022 :         chunk_size = HASH_CHUNK_SIZE;
    2870             : 
    2871             :     /* Check if it's time to grow batches or buckets. */
    2872        2054 :     if (pstate->growth != PHJ_GROWTH_DISABLED)
    2873             :     {
    2874             :         Assert(curbatch == 0);
    2875             :         Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER);
    2876             : 
    2877             :         /*
    2878             :          * Check if our space limit would be exceeded.  To avoid choking on
    2879             :          * very large tuples or very low hash_mem setting, we'll always allow
    2880             :          * each backend to allocate at least one chunk.
    2881             :          */
    2882         996 :         if (hashtable->batches[0].at_least_one_chunk &&
    2883         756 :             hashtable->batches[0].shared->size +
    2884         756 :             chunk_size > pstate->space_allowed)
    2885             :         {
    2886          24 :             pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    2887          24 :             hashtable->batches[0].shared->space_exhausted = true;
    2888          24 :             LWLockRelease(&pstate->lock);
    2889             : 
    2890          24 :             return NULL;
    2891             :         }
    2892             : 
    2893             :         /* Check if our load factor limit would be exceeded. */
    2894         972 :         if (hashtable->nbatch == 1)
    2895             :         {
    2896         842 :             hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
    2897         842 :             hashtable->batches[0].ntuples = 0;
    2898             :             /* Guard against integer overflow and alloc size overflow */
    2899         842 :             if (hashtable->batches[0].shared->ntuples + 1 >
    2900         842 :                 hashtable->nbuckets * NTUP_PER_BUCKET &&
    2901          72 :                 hashtable->nbuckets < (INT_MAX / 2) &&
    2902          72 :                 hashtable->nbuckets * 2 <=
    2903             :                 MaxAllocSize / sizeof(dsa_pointer_atomic))
    2904             :             {
    2905          72 :                 pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS;
    2906          72 :                 LWLockRelease(&pstate->lock);
    2907             : 
    2908          72 :                 return NULL;
    2909             :             }
    2910             :         }
    2911             :     }
    2912             : 
    2913             :     /* We are cleared to allocate a new chunk. */
    2914        1958 :     chunk_shared = dsa_allocate(hashtable->area, chunk_size);
    2915        1958 :     hashtable->batches[curbatch].shared->size += chunk_size;
    2916        1958 :     hashtable->batches[curbatch].at_least_one_chunk = true;
    2917             : 
    2918             :     /* Set up the chunk. */
    2919        1958 :     chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
    2920        1958 :     *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
    2921        1958 :     chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
    2922        1958 :     chunk->used = size;
    2923             : 
    2924             :     /*
    2925             :      * Push it onto the list of chunks, so that it can be found if we need to
    2926             :      * increase the number of buckets or batches (batch 0 only) and later for
    2927             :      * freeing the memory (all batches).
    2928             :      */
    2929        1958 :     chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
    2930        1958 :     hashtable->batches[curbatch].shared->chunks = chunk_shared;
    2931             : 
    2932        1958 :     if (size <= HASH_CHUNK_THRESHOLD)
    2933             :     {
    2934             :         /*
    2935             :          * Make this the current chunk so that we can use the fast path to
    2936             :          * fill the rest of it up in future calls.
    2937             :          */
    2938        1934 :         hashtable->current_chunk = chunk;
    2939        1934 :         hashtable->current_chunk_shared = chunk_shared;
    2940             :     }
    2941        1958 :     LWLockRelease(&pstate->lock);
    2942             : 
    2943             :     Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
    2944        1958 :     result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
    2945             : 
    2946        1958 :     return result;
    2947             : }
    2948             : 
    2949             : /*
    2950             :  * One backend needs to set up the shared batch state including tuplestores.
    2951             :  * Other backends will ensure they have correctly configured accessors by
    2952             :  * called ExecParallelHashEnsureBatchAccessors().
    2953             :  */
    2954             : static void
    2955         132 : ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
    2956             : {
    2957         132 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    2958             :     ParallelHashJoinBatch *batches;
    2959             :     MemoryContext oldcxt;
    2960             :     int         i;
    2961             : 
    2962             :     Assert(hashtable->batches == NULL);
    2963             : 
    2964             :     /* Allocate space. */
    2965         132 :     pstate->batches =
    2966         132 :         dsa_allocate0(hashtable->area,
    2967             :                       EstimateParallelHashJoinBatch(hashtable) * nbatch);
    2968         132 :     pstate->nbatch = nbatch;
    2969         132 :     batches = dsa_get_address(hashtable->area, pstate->batches);
    2970             : 
    2971             :     /* Use hash join memory context. */
    2972         132 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
    2973             : 
    2974             :     /* Allocate this backend's accessor array. */
    2975         132 :     hashtable->nbatch = nbatch;
    2976         132 :     hashtable->batches = (ParallelHashJoinBatchAccessor *)
    2977         132 :         palloc0(sizeof(ParallelHashJoinBatchAccessor) * hashtable->nbatch);
    2978             : 
    2979             :     /* Set up the shared state, tuplestores and backend-local accessors. */
    2980         708 :     for (i = 0; i < hashtable->nbatch; ++i)
    2981             :     {
    2982         576 :         ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
    2983         576 :         ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
    2984             :         char        name[MAXPGPATH];
    2985             : 
    2986             :         /*
    2987             :          * All members of shared were zero-initialized.  We just need to set
    2988             :          * up the Barrier.
    2989             :          */
    2990         576 :         BarrierInit(&shared->batch_barrier, 0);
    2991         576 :         if (i == 0)
    2992             :         {
    2993             :             /* Batch 0 doesn't need to be loaded. */
    2994         132 :             BarrierAttach(&shared->batch_barrier);
    2995         528 :             while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBING)
    2996         396 :                 BarrierArriveAndWait(&shared->batch_barrier, 0);
    2997         132 :             BarrierDetach(&shared->batch_barrier);
    2998             :         }
    2999             : 
    3000             :         /* Initialize accessor state.  All members were zero-initialized. */
    3001         576 :         accessor->shared = shared;
    3002             : 
    3003             :         /* Initialize the shared tuplestores. */
    3004         576 :         snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
    3005         576 :         accessor->inner_tuples =
    3006         576 :             sts_initialize(ParallelHashJoinBatchInner(shared),
    3007             :                            pstate->nparticipants,
    3008             :                            ParallelWorkerNumber + 1,
    3009             :                            sizeof(uint32),
    3010             :                            SHARED_TUPLESTORE_SINGLE_PASS,
    3011             :                            &pstate->fileset,
    3012             :                            name);
    3013         576 :         snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
    3014         576 :         accessor->outer_tuples =
    3015         576 :             sts_initialize(ParallelHashJoinBatchOuter(shared,
    3016             :                                                       pstate->nparticipants),
    3017             :                            pstate->nparticipants,
    3018             :                            ParallelWorkerNumber + 1,
    3019             :                            sizeof(uint32),
    3020             :                            SHARED_TUPLESTORE_SINGLE_PASS,
    3021             :                            &pstate->fileset,
    3022             :                            name);
    3023             :     }
    3024             : 
    3025         132 :     MemoryContextSwitchTo(oldcxt);
    3026         132 : }
    3027             : 
    3028             : /*
    3029             :  * Free the current set of ParallelHashJoinBatchAccessor objects.
    3030             :  */
    3031             : static void
    3032          40 : ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
    3033             : {
    3034             :     int         i;
    3035             : 
    3036         160 :     for (i = 0; i < hashtable->nbatch; ++i)
    3037             :     {
    3038             :         /* Make sure no files are left open. */
    3039         120 :         sts_end_write(hashtable->batches[i].inner_tuples);
    3040         120 :         sts_end_write(hashtable->batches[i].outer_tuples);
    3041         120 :         sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
    3042         120 :         sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
    3043             :     }
    3044          40 :     pfree(hashtable->batches);
    3045          40 :     hashtable->batches = NULL;
    3046          40 : }
    3047             : 
    3048             : /*
    3049             :  * Make sure this backend has up-to-date accessors for the current set of
    3050             :  * batches.
    3051             :  */
    3052             : static void
    3053         532 : ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
    3054             : {
    3055         532 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3056             :     ParallelHashJoinBatch *batches;
    3057             :     MemoryContext oldcxt;
    3058             :     int         i;
    3059             : 
    3060         532 :     if (hashtable->batches != NULL)
    3061             :     {
    3062         410 :         if (hashtable->nbatch == pstate->nbatch)
    3063         408 :             return;
    3064           2 :         ExecParallelHashCloseBatchAccessors(hashtable);
    3065             :     }
    3066             : 
    3067             :     /*
    3068             :      * It's possible for a backend to start up very late so that the whole
    3069             :      * join is finished and the shm state for tracking batches has already
    3070             :      * been freed by ExecHashTableDetach().  In that case we'll just leave
    3071             :      * hashtable->batches as NULL so that ExecParallelHashJoinNewBatch() gives
    3072             :      * up early.
    3073             :      */
    3074         124 :     if (!DsaPointerIsValid(pstate->batches))
    3075           0 :         return;
    3076             : 
    3077             :     /* Use hash join memory context. */
    3078         124 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
    3079             : 
    3080             :     /* Allocate this backend's accessor array. */
    3081         124 :     hashtable->nbatch = pstate->nbatch;
    3082         124 :     hashtable->batches = (ParallelHashJoinBatchAccessor *)
    3083         124 :         palloc0(sizeof(ParallelHashJoinBatchAccessor) * hashtable->nbatch);
    3084             : 
    3085             :     /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
    3086             :     batches = (ParallelHashJoinBatch *)
    3087         124 :         dsa_get_address(hashtable->area, pstate->batches);
    3088             : 
    3089             :     /* Set up the accessor array and attach to the tuplestores. */
    3090         780 :     for (i = 0; i < hashtable->nbatch; ++i)
    3091             :     {
    3092         656 :         ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
    3093         656 :         ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
    3094             : 
    3095         656 :         accessor->shared = shared;
    3096         656 :         accessor->preallocated = 0;
    3097         656 :         accessor->done = false;
    3098         656 :         accessor->inner_tuples =
    3099         656 :             sts_attach(ParallelHashJoinBatchInner(shared),
    3100             :                        ParallelWorkerNumber + 1,
    3101             :                        &pstate->fileset);
    3102         656 :         accessor->outer_tuples =
    3103         656 :             sts_attach(ParallelHashJoinBatchOuter(shared,
    3104             :                                                   pstate->nparticipants),
    3105             :                        ParallelWorkerNumber + 1,
    3106             :                        &pstate->fileset);
    3107             :     }
    3108             : 
    3109         124 :     MemoryContextSwitchTo(oldcxt);
    3110             : }
    3111             : 
    3112             : /*
    3113             :  * Allocate an empty shared memory hash table for a given batch.
    3114             :  */
    3115             : void
    3116         488 : ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
    3117             : {
    3118         488 :     ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
    3119             :     dsa_pointer_atomic *buckets;
    3120         488 :     int         nbuckets = hashtable->parallel_state->nbuckets;
    3121             :     int         i;
    3122             : 
    3123         488 :     batch->buckets =
    3124         488 :         dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
    3125             :     buckets = (dsa_pointer_atomic *)
    3126         488 :         dsa_get_address(hashtable->area, batch->buckets);
    3127     1733096 :     for (i = 0; i < nbuckets; ++i)
    3128     1732608 :         dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    3129         488 : }
    3130             : 
    3131             : /*
    3132             :  * If we are currently attached to a shared hash join batch, detach.  If we
    3133             :  * are last to detach, clean up.
    3134             :  */
    3135             : void
    3136       37912 : ExecHashTableDetachBatch(HashJoinTable hashtable)
    3137             : {
    3138       37912 :     if (hashtable->parallel_state != NULL &&
    3139         782 :         hashtable->curbatch >= 0)
    3140             :     {
    3141         566 :         int         curbatch = hashtable->curbatch;
    3142         566 :         ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
    3143             : 
    3144             :         /* Make sure any temporary files are closed. */
    3145         566 :         sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
    3146         566 :         sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
    3147             : 
    3148             :         /* Detach from the batch we were last working on. */
    3149         566 :         if (BarrierArriveAndDetach(&batch->batch_barrier))
    3150             :         {
    3151             :             /*
    3152             :              * Technically we shouldn't access the barrier because we're no
    3153             :              * longer attached, but since there is no way it's moving after
    3154             :              * this point it seems safe to make the following assertion.
    3155             :              */
    3156             :             Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_DONE);
    3157             : 
    3158             :             /* Free shared chunks and buckets. */
    3159        2248 :             while (DsaPointerIsValid(batch->chunks))
    3160             :             {
    3161             :                 HashMemoryChunk chunk =
    3162        1760 :                 dsa_get_address(hashtable->area, batch->chunks);
    3163        1760 :                 dsa_pointer next = chunk->next.shared;
    3164             : 
    3165        1760 :                 dsa_free(hashtable->area, batch->chunks);
    3166        1760 :                 batch->chunks = next;
    3167             :             }
    3168         488 :             if (DsaPointerIsValid(batch->buckets))
    3169             :             {
    3170         488 :                 dsa_free(hashtable->area, batch->buckets);
    3171         488 :                 batch->buckets = InvalidDsaPointer;
    3172             :             }
    3173             :         }
    3174             : 
    3175             :         /*
    3176             :          * Track the largest batch we've been attached to.  Though each
    3177             :          * backend might see a different subset of batches, explain.c will
    3178             :          * scan the results from all backends to find the largest value.
    3179             :          */
    3180         566 :         hashtable->spacePeak =
    3181         566 :             Max(hashtable->spacePeak,
    3182             :                 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
    3183             : 
    3184             :         /* Remember that we are not attached to a batch. */
    3185         566 :         hashtable->curbatch = -1;
    3186             :     }
    3187       37912 : }
    3188             : 
    3189             : /*
    3190             :  * Detach from all shared resources.  If we are last to detach, clean up.
    3191             :  */
    3192             : void
    3193       37346 : ExecHashTableDetach(HashJoinTable hashtable)
    3194             : {
    3195       37346 :     if (hashtable->parallel_state)
    3196             :     {
    3197         216 :         ParallelHashJoinState *pstate = hashtable->parallel_state;
    3198             :         int         i;
    3199             : 
    3200             :         /* Make sure any temporary files are closed. */
    3201         216 :         if (hashtable->batches)
    3202             :         {
    3203        1328 :             for (i = 0; i < hashtable->nbatch; ++i)
    3204             :             {
    3205        1112 :                 sts_end_write(hashtable->batches[i].inner_tuples);
    3206        1112 :                 sts_end_write(hashtable->batches[i].outer_tuples);
    3207        1112 :                 sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
    3208        1112 :                 sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
    3209             :             }
    3210             :         }
    3211             : 
    3212             :         /* If we're last to detach, clean up shared memory. */
    3213         216 :         if (BarrierDetach(&pstate->build_barrier))
    3214             :         {
    3215          96 :             if (DsaPointerIsValid(pstate->batches))
    3216             :             {
    3217          96 :                 dsa_free(hashtable->area, pstate->batches);
    3218          96 :                 pstate->batches = InvalidDsaPointer;
    3219             :             }
    3220             :         }
    3221             : 
    3222         216 :         hashtable->parallel_state = NULL;
    3223             :     }
    3224       37346 : }
    3225             : 
    3226             : /*
    3227             :  * Get the first tuple in a given bucket identified by number.
    3228             :  */
    3229             : static inline HashJoinTuple
    3230     1200016 : ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
    3231             : {
    3232             :     HashJoinTuple tuple;
    3233             :     dsa_pointer p;
    3234             : 
    3235             :     Assert(hashtable->parallel_state);
    3236     1200016 :     p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
    3237     1200016 :     tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
    3238             : 
    3239     1200016 :     return tuple;
    3240             : }
    3241             : 
    3242             : /*
    3243             :  * Get the next tuple in the same bucket as 'tuple'.
    3244             :  */
    3245             : static inline HashJoinTuple
    3246     1980424 : ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
    3247             : {
    3248             :     HashJoinTuple next;
    3249             : 
    3250             :     Assert(hashtable->parallel_state);
    3251     1980424 :     next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
    3252             : 
    3253     1980424 :     return next;
    3254             : }
    3255             : 
    3256             : /*
    3257             :  * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
    3258             :  */
    3259             : static inline void
    3260     1716226 : ExecParallelHashPushTuple(dsa_pointer_atomic *head,
    3261             :                           HashJoinTuple tuple,
    3262             :                           dsa_pointer tuple_shared)
    3263             : {
    3264             :     for (;;)
    3265             :     {
    3266     1716226 :         tuple->next.shared = dsa_pointer_atomic_read(head);
    3267     1716226 :         if (dsa_pointer_atomic_compare_exchange(head,
    3268     1716226 :                                                 &tuple->next.shared,
    3269             :                                                 tuple_shared))
    3270     1714120 :             break;
    3271             :     }
    3272     1714120 : }
    3273             : 
    3274             : /*
    3275             :  * Prepare to work on a given batch.
    3276             :  */
    3277             : void
    3278        1278 : ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
    3279             : {
    3280             :     Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
    3281             : 
    3282        1278 :     hashtable->curbatch = batchno;
    3283        1278 :     hashtable->buckets.shared = (dsa_pointer_atomic *)
    3284        1278 :         dsa_get_address(hashtable->area,
    3285        1278 :                         hashtable->batches[batchno].shared->buckets);
    3286        1278 :     hashtable->nbuckets = hashtable->parallel_state->nbuckets;
    3287        1278 :     hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
    3288        1278 :     hashtable->current_chunk = NULL;
    3289        1278 :     hashtable->current_chunk_shared = InvalidDsaPointer;
    3290        1278 :     hashtable->batches[batchno].at_least_one_chunk = false;
    3291        1278 : }
    3292             : 
    3293             : /*
    3294             :  * Take the next available chunk from the queue of chunks being worked on in
    3295             :  * parallel.  Return NULL if there are none left.  Otherwise return a pointer
    3296             :  * to the chunk, and set *shared to the DSA pointer to the chunk.
    3297             :  */
    3298             : static HashMemoryChunk
    3299         776 : ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
    3300             : {
    3301         776 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3302             :     HashMemoryChunk chunk;
    3303             : 
    3304         776 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3305         776 :     if (DsaPointerIsValid(pstate->chunk_work_queue))
    3306             :     {
    3307         642 :         *shared = pstate->chunk_work_queue;
    3308             :         chunk = (HashMemoryChunk)
    3309         642 :             dsa_get_address(hashtable->area, *shared);
    3310         642 :         pstate->chunk_work_queue = chunk->next.shared;
    3311             :     }
    3312             :     else
    3313         134 :         chunk = NULL;
    3314         776 :     LWLockRelease(&pstate->lock);
    3315             : 
    3316         776 :     return chunk;
    3317             : }
    3318             : 
    3319             : /*
    3320             :  * Increase the space preallocated in this backend for a given inner batch by
    3321             :  * at least a given amount.  This allows us to track whether a given batch
    3322             :  * would fit in memory when loaded back in.  Also increase the number of
    3323             :  * batches or buckets if required.
    3324             :  *
    3325             :  * This maintains a running estimation of how much space will be taken when we
    3326             :  * load the batch back into memory by simulating the way chunks will be handed
    3327             :  * out to workers.  It's not perfectly accurate because the tuples will be
    3328             :  * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
    3329             :  * it should be pretty close.  It tends to overestimate by a fraction of a
    3330             :  * chunk per worker since all workers gang up to preallocate during hashing,
    3331             :  * but workers tend to reload batches alone if there are enough to go around,
    3332             :  * leaving fewer partially filled chunks.  This effect is bounded by
    3333             :  * nparticipants.
    3334             :  *
    3335             :  * Return false if the number of batches or buckets has changed, and the
    3336             :  * caller should reconsider which batch a given tuple now belongs in and call
    3337             :  * again.
    3338             :  */
    3339             : static bool
    3340        1060 : ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
    3341             : {
    3342        1060 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3343        1060 :     ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
    3344        1060 :     size_t      want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
    3345             : 
    3346             :     Assert(batchno > 0);
    3347             :     Assert(batchno < hashtable->nbatch);
    3348             :     Assert(size == MAXALIGN(size));
    3349             : 
    3350        1060 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3351             : 
    3352             :     /* Has another participant commanded us to help grow? */
    3353        1060 :     if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
    3354        1046 :         pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3355             :     {
    3356          14 :         ParallelHashGrowth growth = pstate->growth;
    3357             : 
    3358          14 :         LWLockRelease(&pstate->lock);
    3359          14 :         if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
    3360          14 :             ExecParallelHashIncreaseNumBatches(hashtable);
    3361           0 :         else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3362           0 :             ExecParallelHashIncreaseNumBuckets(hashtable);
    3363             : 
    3364          14 :         return false;
    3365             :     }
    3366             : 
    3367        1046 :     if (pstate->growth != PHJ_GROWTH_DISABLED &&
    3368         894 :         batch->at_least_one_chunk &&
    3369         344 :         (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
    3370         344 :          > pstate->space_allowed))
    3371             :     {
    3372             :         /*
    3373             :          * We have determined that this batch would exceed the space budget if
    3374             :          * loaded into memory.  Command all participants to help repartition.
    3375             :          */
    3376          12 :         batch->shared->space_exhausted = true;
    3377          12 :         pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    3378          12 :         LWLockRelease(&pstate->lock);
    3379             : 
    3380          12 :         return false;
    3381             :     }
    3382             : 
    3383        1034 :     batch->at_least_one_chunk = true;
    3384        1034 :     batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE;
    3385        1034 :     batch->preallocated = want;
    3386        1034 :     LWLockRelease(&pstate->lock);
    3387             : 
    3388        1034 :     return true;
    3389             : }
    3390             : 
    3391             : /*
    3392             :  * Calculate the limit on how much memory can be used by Hash and similar
    3393             :  * plan types.  This is work_mem times hash_mem_multiplier, and is
    3394             :  * expressed in bytes.
    3395             :  *
    3396             :  * Exported for use by the planner, as well as other hash-like executor
    3397             :  * nodes.  This is a rather random place for this, but there is no better
    3398             :  * place.
    3399             :  */
    3400             : size_t
    3401     2471114 : get_hash_memory_limit(void)
    3402             : {
    3403             :     double      mem_limit;
    3404             : 
    3405             :     /* Do initial calculation in double arithmetic */
    3406     2471114 :     mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
    3407             : 
    3408             :     /* Clamp in case it doesn't fit in size_t */
    3409     2471114 :     mem_limit = Min(mem_limit, (double) SIZE_MAX);
    3410             : 
    3411     2471114 :     return (size_t) mem_limit;
    3412             : }

Generated by: LCOV version 1.14