LCOV - code coverage report
Current view: top level - src/backend/executor - nodeHash.c (source / functions) Hit Total Coverage
Test: PostgreSQL 16beta1 Lines: 1071 1125 95.2 %
Date: 2023-06-06 10:12:12 Functions: 54 55 98.2 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * nodeHash.c
       4             :  *    Routines to hash relations for hashjoin
       5             :  *
       6             :  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/executor/nodeHash.c
      12             :  *
      13             :  * See note on parallelism in nodeHashjoin.c.
      14             :  *
      15             :  *-------------------------------------------------------------------------
      16             :  */
      17             : /*
      18             :  * INTERFACE ROUTINES
      19             :  *      MultiExecHash   - generate an in-memory hash table of the relation
      20             :  *      ExecInitHash    - initialize node and subnodes
      21             :  *      ExecEndHash     - shutdown node and subnodes
      22             :  */
      23             : 
      24             : #include "postgres.h"
      25             : 
      26             : #include <math.h>
      27             : #include <limits.h>
      28             : 
      29             : #include "access/htup_details.h"
      30             : #include "access/parallel.h"
      31             : #include "catalog/pg_statistic.h"
      32             : #include "commands/tablespace.h"
      33             : #include "executor/execdebug.h"
      34             : #include "executor/hashjoin.h"
      35             : #include "executor/nodeHash.h"
      36             : #include "executor/nodeHashjoin.h"
      37             : #include "miscadmin.h"
      38             : #include "pgstat.h"
      39             : #include "port/atomics.h"
      40             : #include "port/pg_bitutils.h"
      41             : #include "utils/dynahash.h"
      42             : #include "utils/guc.h"
      43             : #include "utils/lsyscache.h"
      44             : #include "utils/memutils.h"
      45             : #include "utils/syscache.h"
      46             : 
      47             : static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
      48             : static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
      49             : static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable);
      50             : static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable);
      51             : static void ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node,
      52             :                                   int mcvsToUse);
      53             : static void ExecHashSkewTableInsert(HashJoinTable hashtable,
      54             :                                     TupleTableSlot *slot,
      55             :                                     uint32 hashvalue,
      56             :                                     int bucketNumber);
      57             : static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
      58             : 
      59             : static void *dense_alloc(HashJoinTable hashtable, Size size);
      60             : static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable,
      61             :                                                 size_t size,
      62             :                                                 dsa_pointer *shared);
      63             : static void MultiExecPrivateHash(HashState *node);
      64             : static void MultiExecParallelHash(HashState *node);
      65             : static inline HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable hashtable,
      66             :                                                        int bucketno);
      67             : static inline HashJoinTuple ExecParallelHashNextTuple(HashJoinTable hashtable,
      68             :                                                       HashJoinTuple tuple);
      69             : static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
      70             :                                              HashJoinTuple tuple,
      71             :                                              dsa_pointer tuple_shared);
      72             : static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
      73             : static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable);
      74             : static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable);
      75             : static void ExecParallelHashRepartitionRest(HashJoinTable hashtable);
      76             : static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable hashtable,
      77             :                                                      dsa_pointer *shared);
      78             : static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable,
      79             :                                           int batchno,
      80             :                                           size_t size);
      81             : static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
      82             : static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable);
      83             : 
      84             : 
      85             : /* ----------------------------------------------------------------
      86             :  *      ExecHash
      87             :  *
      88             :  *      stub for pro forma compliance
      89             :  * ----------------------------------------------------------------
      90             :  */
      91             : static TupleTableSlot *
      92           0 : ExecHash(PlanState *pstate)
      93             : {
      94           0 :     elog(ERROR, "Hash node does not support ExecProcNode call convention");
      95             :     return NULL;
      96             : }
      97             : 
      98             : /* ----------------------------------------------------------------
      99             :  *      MultiExecHash
     100             :  *
     101             :  *      build hash table for hashjoin, doing partitioning if more
     102             :  *      than one batch is required.
     103             :  * ----------------------------------------------------------------
     104             :  */
     105             : Node *
     106       19374 : MultiExecHash(HashState *node)
     107             : {
     108             :     /* must provide our own instrumentation support */
     109       19374 :     if (node->ps.instrument)
     110         294 :         InstrStartNode(node->ps.instrument);
     111             : 
     112       19374 :     if (node->parallel_state != NULL)
     113         398 :         MultiExecParallelHash(node);
     114             :     else
     115       18976 :         MultiExecPrivateHash(node);
     116             : 
     117             :     /* must provide our own instrumentation support */
     118       19374 :     if (node->ps.instrument)
     119         294 :         InstrStopNode(node->ps.instrument, node->hashtable->partialTuples);
     120             : 
     121             :     /*
     122             :      * We do not return the hash table directly because it's not a subtype of
     123             :      * Node, and so would violate the MultiExecProcNode API.  Instead, our
     124             :      * parent Hashjoin node is expected to know how to fish it out of our node
     125             :      * state.  Ugly but not really worth cleaning up, since Hashjoin knows
     126             :      * quite a bit more about Hash besides that.
     127             :      */
     128       19374 :     return NULL;
     129             : }
     130             : 
     131             : /* ----------------------------------------------------------------
     132             :  *      MultiExecPrivateHash
     133             :  *
     134             :  *      parallel-oblivious version, building a backend-private
     135             :  *      hash table and (if necessary) batch files.
     136             :  * ----------------------------------------------------------------
     137             :  */
     138             : static void
     139       18976 : MultiExecPrivateHash(HashState *node)
     140             : {
     141             :     PlanState  *outerNode;
     142             :     List       *hashkeys;
     143             :     HashJoinTable hashtable;
     144             :     TupleTableSlot *slot;
     145             :     ExprContext *econtext;
     146             :     uint32      hashvalue;
     147             : 
     148             :     /*
     149             :      * get state info from node
     150             :      */
     151       18976 :     outerNode = outerPlanState(node);
     152       18976 :     hashtable = node->hashtable;
     153             : 
     154             :     /*
     155             :      * set expression context
     156             :      */
     157       18976 :     hashkeys = node->hashkeys;
     158       18976 :     econtext = node->ps.ps_ExprContext;
     159             : 
     160             :     /*
     161             :      * Get all tuples from the node below the Hash node and insert into the
     162             :      * hash table (or temp files).
     163             :      */
     164             :     for (;;)
     165             :     {
     166     9270196 :         slot = ExecProcNode(outerNode);
     167     9270196 :         if (TupIsNull(slot))
     168             :             break;
     169             :         /* We have to compute the hash value */
     170     9251220 :         econtext->ecxt_outertuple = slot;
     171     9251220 :         if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
     172     9251220 :                                  false, hashtable->keepNulls,
     173             :                                  &hashvalue))
     174             :         {
     175             :             int         bucketNumber;
     176             : 
     177     9251208 :             bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
     178     9251208 :             if (bucketNumber != INVALID_SKEW_BUCKET_NO)
     179             :             {
     180             :                 /* It's a skew tuple, so put it into that hash table */
     181         588 :                 ExecHashSkewTableInsert(hashtable, slot, hashvalue,
     182             :                                         bucketNumber);
     183         588 :                 hashtable->skewTuples += 1;
     184             :             }
     185             :             else
     186             :             {
     187             :                 /* Not subject to skew optimization, so insert normally */
     188     9250620 :                 ExecHashTableInsert(hashtable, slot, hashvalue);
     189             :             }
     190     9251208 :             hashtable->totalTuples += 1;
     191             :         }
     192             :     }
     193             : 
     194             :     /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
     195       18976 :     if (hashtable->nbuckets != hashtable->nbuckets_optimal)
     196          72 :         ExecHashIncreaseNumBuckets(hashtable);
     197             : 
     198             :     /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
     199       18976 :     hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
     200       18976 :     if (hashtable->spaceUsed > hashtable->spacePeak)
     201       18950 :         hashtable->spacePeak = hashtable->spaceUsed;
     202             : 
     203       18976 :     hashtable->partialTuples = hashtable->totalTuples;
     204       18976 : }
     205             : 
     206             : /* ----------------------------------------------------------------
     207             :  *      MultiExecParallelHash
     208             :  *
     209             :  *      parallel-aware version, building a shared hash table and
     210             :  *      (if necessary) batch files using the combined effort of
     211             :  *      a set of co-operating backends.
     212             :  * ----------------------------------------------------------------
     213             :  */
     214             : static void
     215         398 : MultiExecParallelHash(HashState *node)
     216             : {
     217             :     ParallelHashJoinState *pstate;
     218             :     PlanState  *outerNode;
     219             :     List       *hashkeys;
     220             :     HashJoinTable hashtable;
     221             :     TupleTableSlot *slot;
     222             :     ExprContext *econtext;
     223             :     uint32      hashvalue;
     224             :     Barrier    *build_barrier;
     225             :     int         i;
     226             : 
     227             :     /*
     228             :      * get state info from node
     229             :      */
     230         398 :     outerNode = outerPlanState(node);
     231         398 :     hashtable = node->hashtable;
     232             : 
     233             :     /*
     234             :      * set expression context
     235             :      */
     236         398 :     hashkeys = node->hashkeys;
     237         398 :     econtext = node->ps.ps_ExprContext;
     238             : 
     239             :     /*
     240             :      * Synchronize the parallel hash table build.  At this stage we know that
     241             :      * the shared hash table has been or is being set up by
     242             :      * ExecHashTableCreate(), but we don't know if our peers have returned
     243             :      * from there or are here in MultiExecParallelHash(), and if so how far
     244             :      * through they are.  To find out, we check the build_barrier phase then
     245             :      * and jump to the right step in the build algorithm.
     246             :      */
     247         398 :     pstate = hashtable->parallel_state;
     248         398 :     build_barrier = &pstate->build_barrier;
     249             :     Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATE);
     250         398 :     switch (BarrierPhase(build_barrier))
     251             :     {
     252         168 :         case PHJ_BUILD_ALLOCATE:
     253             : 
     254             :             /*
     255             :              * Either I just allocated the initial hash table in
     256             :              * ExecHashTableCreate(), or someone else is doing that.  Either
     257             :              * way, wait for everyone to arrive here so we can proceed.
     258             :              */
     259         168 :             BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE);
     260             :             /* Fall through. */
     261             : 
     262         242 :         case PHJ_BUILD_HASH_INNER:
     263             : 
     264             :             /*
     265             :              * It's time to begin hashing, or if we just arrived here then
     266             :              * hashing is already underway, so join in that effort.  While
     267             :              * hashing we have to be prepared to help increase the number of
     268             :              * batches or buckets at any time, and if we arrived here when
     269             :              * that was already underway we'll have to help complete that work
     270             :              * immediately so that it's safe to access batches and buckets
     271             :              * below.
     272             :              */
     273         242 :             if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) !=
     274             :                 PHJ_GROW_BATCHES_ELECT)
     275           0 :                 ExecParallelHashIncreaseNumBatches(hashtable);
     276         242 :             if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) !=
     277             :                 PHJ_GROW_BUCKETS_ELECT)
     278           0 :                 ExecParallelHashIncreaseNumBuckets(hashtable);
     279         242 :             ExecParallelHashEnsureBatchAccessors(hashtable);
     280         242 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
     281             :             for (;;)
     282             :             {
     283     2160336 :                 slot = ExecProcNode(outerNode);
     284     2160336 :                 if (TupIsNull(slot))
     285             :                     break;
     286     2160094 :                 econtext->ecxt_outertuple = slot;
     287     2160094 :                 if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
     288     2160094 :                                          false, hashtable->keepNulls,
     289             :                                          &hashvalue))
     290     2160094 :                     ExecParallelHashTableInsert(hashtable, slot, hashvalue);
     291     2160094 :                 hashtable->partialTuples++;
     292             :             }
     293             : 
     294             :             /*
     295             :              * Make sure that any tuples we wrote to disk are visible to
     296             :              * others before anyone tries to load them.
     297             :              */
     298        1350 :             for (i = 0; i < hashtable->nbatch; ++i)
     299        1108 :                 sts_end_write(hashtable->batches[i].inner_tuples);
     300             : 
     301             :             /*
     302             :              * Update shared counters.  We need an accurate total tuple count
     303             :              * to control the empty table optimization.
     304             :              */
     305         242 :             ExecParallelHashMergeCounters(hashtable);
     306             : 
     307         242 :             BarrierDetach(&pstate->grow_buckets_barrier);
     308         242 :             BarrierDetach(&pstate->grow_batches_barrier);
     309             : 
     310             :             /*
     311             :              * Wait for everyone to finish building and flushing files and
     312             :              * counters.
     313             :              */
     314         242 :             if (BarrierArriveAndWait(build_barrier,
     315             :                                      WAIT_EVENT_HASH_BUILD_HASH_INNER))
     316             :             {
     317             :                 /*
     318             :                  * Elect one backend to disable any further growth.  Batches
     319             :                  * are now fixed.  While building them we made sure they'd fit
     320             :                  * in our memory budget when we load them back in later (or we
     321             :                  * tried to do that and gave up because we detected extreme
     322             :                  * skew).
     323             :                  */
     324         168 :                 pstate->growth = PHJ_GROWTH_DISABLED;
     325             :             }
     326             :     }
     327             : 
     328             :     /*
     329             :      * We're not yet attached to a batch.  We all agree on the dimensions and
     330             :      * number of inner tuples (for the empty table optimization).
     331             :      */
     332         398 :     hashtable->curbatch = -1;
     333         398 :     hashtable->nbuckets = pstate->nbuckets;
     334         398 :     hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
     335         398 :     hashtable->totalTuples = pstate->total_tuples;
     336             : 
     337             :     /*
     338             :      * Unless we're completely done and the batch state has been freed, make
     339             :      * sure we have accessors.
     340             :      */
     341         398 :     if (BarrierPhase(build_barrier) < PHJ_BUILD_FREE)
     342         398 :         ExecParallelHashEnsureBatchAccessors(hashtable);
     343             : 
     344             :     /*
     345             :      * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
     346             :      * case, which will bring the build phase to PHJ_BUILD_RUN (if it isn't
     347             :      * there already).
     348             :      */
     349             :     Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASH_OUTER ||
     350             :            BarrierPhase(build_barrier) == PHJ_BUILD_RUN ||
     351             :            BarrierPhase(build_barrier) == PHJ_BUILD_FREE);
     352         398 : }
     353             : 
     354             : /* ----------------------------------------------------------------
     355             :  *      ExecInitHash
     356             :  *
     357             :  *      Init routine for Hash node
     358             :  * ----------------------------------------------------------------
     359             :  */
     360             : HashState *
     361       27842 : ExecInitHash(Hash *node, EState *estate, int eflags)
     362             : {
     363             :     HashState  *hashstate;
     364             : 
     365             :     /* check for unsupported flags */
     366             :     Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
     367             : 
     368             :     /*
     369             :      * create state structure
     370             :      */
     371       27842 :     hashstate = makeNode(HashState);
     372       27842 :     hashstate->ps.plan = (Plan *) node;
     373       27842 :     hashstate->ps.state = estate;
     374       27842 :     hashstate->ps.ExecProcNode = ExecHash;
     375       27842 :     hashstate->hashtable = NULL;
     376       27842 :     hashstate->hashkeys = NIL;   /* will be set by parent HashJoin */
     377             : 
     378             :     /*
     379             :      * Miscellaneous initialization
     380             :      *
     381             :      * create expression context for node
     382             :      */
     383       27842 :     ExecAssignExprContext(estate, &hashstate->ps);
     384             : 
     385             :     /*
     386             :      * initialize child nodes
     387             :      */
     388       27842 :     outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
     389             : 
     390             :     /*
     391             :      * initialize our result slot and type. No need to build projection
     392             :      * because this node doesn't do projections.
     393             :      */
     394       27842 :     ExecInitResultTupleSlotTL(&hashstate->ps, &TTSOpsMinimalTuple);
     395       27842 :     hashstate->ps.ps_ProjInfo = NULL;
     396             : 
     397             :     /*
     398             :      * initialize child expressions
     399             :      */
     400             :     Assert(node->plan.qual == NIL);
     401       27842 :     hashstate->hashkeys =
     402       27842 :         ExecInitExprList(node->hashkeys, (PlanState *) hashstate);
     403             : 
     404       27842 :     return hashstate;
     405             : }
     406             : 
     407             : /* ---------------------------------------------------------------
     408             :  *      ExecEndHash
     409             :  *
     410             :  *      clean up routine for Hash node
     411             :  * ----------------------------------------------------------------
     412             :  */
     413             : void
     414       27758 : ExecEndHash(HashState *node)
     415             : {
     416             :     PlanState  *outerPlan;
     417             : 
     418             :     /*
     419             :      * free exprcontext
     420             :      */
     421       27758 :     ExecFreeExprContext(&node->ps);
     422             : 
     423             :     /*
     424             :      * shut down the subplan
     425             :      */
     426       27758 :     outerPlan = outerPlanState(node);
     427       27758 :     ExecEndNode(outerPlan);
     428       27758 : }
     429             : 
     430             : 
     431             : /* ----------------------------------------------------------------
     432             :  *      ExecHashTableCreate
     433             :  *
     434             :  *      create an empty hashtable data structure for hashjoin.
     435             :  * ----------------------------------------------------------------
     436             :  */
     437             : HashJoinTable
     438       19374 : ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations, bool keepNulls)
     439             : {
     440             :     Hash       *node;
     441             :     HashJoinTable hashtable;
     442             :     Plan       *outerNode;
     443             :     size_t      space_allowed;
     444             :     int         nbuckets;
     445             :     int         nbatch;
     446             :     double      rows;
     447             :     int         num_skew_mcvs;
     448             :     int         log2_nbuckets;
     449             :     int         nkeys;
     450             :     int         i;
     451             :     ListCell   *ho;
     452             :     ListCell   *hc;
     453             :     MemoryContext oldcxt;
     454             : 
     455             :     /*
     456             :      * Get information about the size of the relation to be hashed (it's the
     457             :      * "outer" subtree of this node, but the inner relation of the hashjoin).
     458             :      * Compute the appropriate size of the hash table.
     459             :      */
     460       19374 :     node = (Hash *) state->ps.plan;
     461       19374 :     outerNode = outerPlan(node);
     462             : 
     463             :     /*
     464             :      * If this is shared hash table with a partial plan, then we can't use
     465             :      * outerNode->plan_rows to estimate its size.  We need an estimate of the
     466             :      * total number of rows across all copies of the partial plan.
     467             :      */
     468       19374 :     rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
     469             : 
     470       18976 :     ExecChooseHashTableSize(rows, outerNode->plan_width,
     471       19374 :                             OidIsValid(node->skewTable),
     472       19374 :                             state->parallel_state != NULL,
     473       19374 :                             state->parallel_state != NULL ?
     474         398 :                             state->parallel_state->nparticipants - 1 : 0,
     475             :                             &space_allowed,
     476             :                             &nbuckets, &nbatch, &num_skew_mcvs);
     477             : 
     478             :     /* nbuckets must be a power of 2 */
     479       19374 :     log2_nbuckets = my_log2(nbuckets);
     480             :     Assert(nbuckets == (1 << log2_nbuckets));
     481             : 
     482             :     /*
     483             :      * Initialize the hash table control block.
     484             :      *
     485             :      * The hashtable control block is just palloc'd from the executor's
     486             :      * per-query memory context.  Everything else should be kept inside the
     487             :      * subsidiary hashCxt, batchCxt or spillCxt.
     488             :      */
     489       19374 :     hashtable = palloc_object(HashJoinTableData);
     490       19374 :     hashtable->nbuckets = nbuckets;
     491       19374 :     hashtable->nbuckets_original = nbuckets;
     492       19374 :     hashtable->nbuckets_optimal = nbuckets;
     493       19374 :     hashtable->log2_nbuckets = log2_nbuckets;
     494       19374 :     hashtable->log2_nbuckets_optimal = log2_nbuckets;
     495       19374 :     hashtable->buckets.unshared = NULL;
     496       19374 :     hashtable->keepNulls = keepNulls;
     497       19374 :     hashtable->skewEnabled = false;
     498       19374 :     hashtable->skewBucket = NULL;
     499       19374 :     hashtable->skewBucketLen = 0;
     500       19374 :     hashtable->nSkewBuckets = 0;
     501       19374 :     hashtable->skewBucketNums = NULL;
     502       19374 :     hashtable->nbatch = nbatch;
     503       19374 :     hashtable->curbatch = 0;
     504       19374 :     hashtable->nbatch_original = nbatch;
     505       19374 :     hashtable->nbatch_outstart = nbatch;
     506       19374 :     hashtable->growEnabled = true;
     507       19374 :     hashtable->totalTuples = 0;
     508       19374 :     hashtable->partialTuples = 0;
     509       19374 :     hashtable->skewTuples = 0;
     510       19374 :     hashtable->innerBatchFile = NULL;
     511       19374 :     hashtable->outerBatchFile = NULL;
     512       19374 :     hashtable->spaceUsed = 0;
     513       19374 :     hashtable->spacePeak = 0;
     514       19374 :     hashtable->spaceAllowed = space_allowed;
     515       19374 :     hashtable->spaceUsedSkew = 0;
     516       19374 :     hashtable->spaceAllowedSkew =
     517       19374 :         hashtable->spaceAllowed * SKEW_HASH_MEM_PERCENT / 100;
     518       19374 :     hashtable->chunks = NULL;
     519       19374 :     hashtable->current_chunk = NULL;
     520       19374 :     hashtable->parallel_state = state->parallel_state;
     521       19374 :     hashtable->area = state->ps.state->es_query_dsa;
     522       19374 :     hashtable->batches = NULL;
     523             : 
     524             : #ifdef HJDEBUG
     525             :     printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
     526             :            hashtable, nbatch, nbuckets);
     527             : #endif
     528             : 
     529             :     /*
     530             :      * Create temporary memory contexts in which to keep the hashtable working
     531             :      * storage.  See notes in executor/hashjoin.h.
     532             :      */
     533       19374 :     hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
     534             :                                                "HashTableContext",
     535             :                                                ALLOCSET_DEFAULT_SIZES);
     536             : 
     537       19374 :     hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
     538             :                                                 "HashBatchContext",
     539             :                                                 ALLOCSET_DEFAULT_SIZES);
     540             : 
     541       19374 :     hashtable->spillCxt = AllocSetContextCreate(hashtable->hashCxt,
     542             :                                                 "HashSpillContext",
     543             :                                                 ALLOCSET_DEFAULT_SIZES);
     544             : 
     545             :     /* Allocate data that will live for the life of the hashjoin */
     546             : 
     547       19374 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
     548             : 
     549             :     /*
     550             :      * Get info about the hash functions to be used for each hash key. Also
     551             :      * remember whether the join operators are strict.
     552             :      */
     553       19374 :     nkeys = list_length(hashOperators);
     554       19374 :     hashtable->outer_hashfunctions = palloc_array(FmgrInfo, nkeys);
     555       19374 :     hashtable->inner_hashfunctions = palloc_array(FmgrInfo, nkeys);
     556       19374 :     hashtable->hashStrict = palloc_array(bool, nkeys);
     557       19374 :     hashtable->collations = palloc_array(Oid, nkeys);
     558       19374 :     i = 0;
     559       39976 :     forboth(ho, hashOperators, hc, hashCollations)
     560             :     {
     561       20602 :         Oid         hashop = lfirst_oid(ho);
     562             :         Oid         left_hashfn;
     563             :         Oid         right_hashfn;
     564             : 
     565       20602 :         if (!get_op_hash_functions(hashop, &left_hashfn, &right_hashfn))
     566           0 :             elog(ERROR, "could not find hash function for hash operator %u",
     567             :                  hashop);
     568       20602 :         fmgr_info(left_hashfn, &hashtable->outer_hashfunctions[i]);
     569       20602 :         fmgr_info(right_hashfn, &hashtable->inner_hashfunctions[i]);
     570       20602 :         hashtable->hashStrict[i] = op_strict(hashop);
     571       20602 :         hashtable->collations[i] = lfirst_oid(hc);
     572       20602 :         i++;
     573             :     }
     574             : 
     575       19374 :     if (nbatch > 1 && hashtable->parallel_state == NULL)
     576             :     {
     577             :         MemoryContext oldctx;
     578             : 
     579             :         /*
     580             :          * allocate and initialize the file arrays in hashCxt (not needed for
     581             :          * parallel case which uses shared tuplestores instead of raw files)
     582             :          */
     583         110 :         oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
     584             : 
     585         110 :         hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
     586         110 :         hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
     587             : 
     588         110 :         MemoryContextSwitchTo(oldctx);
     589             : 
     590             :         /* The files will not be opened until needed... */
     591             :         /* ... but make sure we have temp tablespaces established for them */
     592         110 :         PrepareTempTablespaces();
     593             :     }
     594             : 
     595       19374 :     MemoryContextSwitchTo(oldcxt);
     596             : 
     597       19374 :     if (hashtable->parallel_state)
     598             :     {
     599         398 :         ParallelHashJoinState *pstate = hashtable->parallel_state;
     600             :         Barrier    *build_barrier;
     601             : 
     602             :         /*
     603             :          * Attach to the build barrier.  The corresponding detach operation is
     604             :          * in ExecHashTableDetach.  Note that we won't attach to the
     605             :          * batch_barrier for batch 0 yet.  We'll attach later and start it out
     606             :          * in PHJ_BATCH_PROBE phase, because batch 0 is allocated up front and
     607             :          * then loaded while hashing (the standard hybrid hash join
     608             :          * algorithm), and we'll coordinate that using build_barrier.
     609             :          */
     610         398 :         build_barrier = &pstate->build_barrier;
     611         398 :         BarrierAttach(build_barrier);
     612             : 
     613             :         /*
     614             :          * So far we have no idea whether there are any other participants,
     615             :          * and if so, what phase they are working on.  The only thing we care
     616             :          * about at this point is whether someone has already created the
     617             :          * SharedHashJoinBatch objects and the hash table for batch 0.  One
     618             :          * backend will be elected to do that now if necessary.
     619             :          */
     620         566 :         if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECT &&
     621         168 :             BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT))
     622             :         {
     623         168 :             pstate->nbatch = nbatch;
     624         168 :             pstate->space_allowed = space_allowed;
     625         168 :             pstate->growth = PHJ_GROWTH_OK;
     626             : 
     627             :             /* Set up the shared state for coordinating batches. */
     628         168 :             ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
     629             : 
     630             :             /*
     631             :              * Allocate batch 0's hash table up front so we can load it
     632             :              * directly while hashing.
     633             :              */
     634         168 :             pstate->nbuckets = nbuckets;
     635         168 :             ExecParallelHashTableAlloc(hashtable, 0);
     636             :         }
     637             : 
     638             :         /*
     639             :          * The next Parallel Hash synchronization point is in
     640             :          * MultiExecParallelHash(), which will progress it all the way to
     641             :          * PHJ_BUILD_RUN.  The caller must not return control from this
     642             :          * executor node between now and then.
     643             :          */
     644             :     }
     645             :     else
     646             :     {
     647             :         /*
     648             :          * Prepare context for the first-scan space allocations; allocate the
     649             :          * hashbucket array therein, and set each bucket "empty".
     650             :          */
     651       18976 :         MemoryContextSwitchTo(hashtable->batchCxt);
     652             : 
     653       18976 :         hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
     654             : 
     655             :         /*
     656             :          * Set up for skew optimization, if possible and there's a need for
     657             :          * more than one batch.  (In a one-batch join, there's no point in
     658             :          * it.)
     659             :          */
     660       18976 :         if (nbatch > 1)
     661         110 :             ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs);
     662             : 
     663       18976 :         MemoryContextSwitchTo(oldcxt);
     664             :     }
     665             : 
     666       19374 :     return hashtable;
     667             : }
     668             : 
     669             : 
     670             : /*
     671             :  * Compute appropriate size for hashtable given the estimated size of the
     672             :  * relation to be hashed (number of rows and average row width).
     673             :  *
     674             :  * This is exported so that the planner's costsize.c can use it.
     675             :  */
     676             : 
     677             : /* Target bucket loading (tuples per bucket) */
     678             : #define NTUP_PER_BUCKET         1
     679             : 
     680             : void
     681      456540 : ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
     682             :                         bool try_combined_hash_mem,
     683             :                         int parallel_workers,
     684             :                         size_t *space_allowed,
     685             :                         int *numbuckets,
     686             :                         int *numbatches,
     687             :                         int *num_skew_mcvs)
     688             : {
     689             :     int         tupsize;
     690             :     double      inner_rel_bytes;
     691             :     size_t      hash_table_bytes;
     692             :     size_t      bucket_bytes;
     693             :     size_t      max_pointers;
     694      456540 :     int         nbatch = 1;
     695             :     int         nbuckets;
     696             :     double      dbuckets;
     697             : 
     698             :     /* Force a plausible relation size if no info */
     699      456540 :     if (ntuples <= 0.0)
     700         150 :         ntuples = 1000.0;
     701             : 
     702             :     /*
     703             :      * Estimate tupsize based on footprint of tuple in hashtable... note this
     704             :      * does not allow for any palloc overhead.  The manipulations of spaceUsed
     705             :      * don't count palloc overhead either.
     706             :      */
     707      456540 :     tupsize = HJTUPLE_OVERHEAD +
     708      456540 :         MAXALIGN(SizeofMinimalTupleHeader) +
     709      456540 :         MAXALIGN(tupwidth);
     710      456540 :     inner_rel_bytes = ntuples * tupsize;
     711             : 
     712             :     /*
     713             :      * Compute in-memory hashtable size limit from GUCs.
     714             :      */
     715      456540 :     hash_table_bytes = get_hash_memory_limit();
     716             : 
     717             :     /*
     718             :      * Parallel Hash tries to use the combined hash_mem of all workers to
     719             :      * avoid the need to batch.  If that won't work, it falls back to hash_mem
     720             :      * per worker and tries to process batches in parallel.
     721             :      */
     722      456540 :     if (try_combined_hash_mem)
     723             :     {
     724             :         /* Careful, this could overflow size_t */
     725             :         double      newlimit;
     726             : 
     727       11588 :         newlimit = (double) hash_table_bytes * (double) (parallel_workers + 1);
     728       11588 :         newlimit = Min(newlimit, (double) SIZE_MAX);
     729       11588 :         hash_table_bytes = (size_t) newlimit;
     730             :     }
     731             : 
     732      456540 :     *space_allowed = hash_table_bytes;
     733             : 
     734             :     /*
     735             :      * If skew optimization is possible, estimate the number of skew buckets
     736             :      * that will fit in the memory allowed, and decrement the assumed space
     737             :      * available for the main hash table accordingly.
     738             :      *
     739             :      * We make the optimistic assumption that each skew bucket will contain
     740             :      * one inner-relation tuple.  If that turns out to be low, we will recover
     741             :      * at runtime by reducing the number of skew buckets.
     742             :      *
     743             :      * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
     744             :      * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
     745             :      * will round up to the next power of 2 and then multiply by 4 to reduce
     746             :      * collisions.
     747             :      */
     748      456540 :     if (useskew)
     749             :     {
     750             :         size_t      bytes_per_mcv;
     751             :         size_t      skew_mcvs;
     752             : 
     753             :         /*----------
     754             :          * Compute number of MCVs we could hold in hash_table_bytes
     755             :          *
     756             :          * Divisor is:
     757             :          * size of a hash tuple +
     758             :          * worst-case size of skewBucket[] per MCV +
     759             :          * size of skewBucketNums[] entry +
     760             :          * size of skew bucket struct itself
     761             :          *----------
     762             :          */
     763      453070 :         bytes_per_mcv = tupsize +
     764             :             (8 * sizeof(HashSkewBucket *)) +
     765      453070 :             sizeof(int) +
     766             :             SKEW_BUCKET_OVERHEAD;
     767      453070 :         skew_mcvs = hash_table_bytes / bytes_per_mcv;
     768             : 
     769             :         /*
     770             :          * Now scale by SKEW_HASH_MEM_PERCENT (we do it in this order so as
     771             :          * not to worry about size_t overflow in the multiplication)
     772             :          */
     773      453070 :         skew_mcvs = (skew_mcvs * SKEW_HASH_MEM_PERCENT) / 100;
     774             : 
     775             :         /* Now clamp to integer range */
     776      453070 :         skew_mcvs = Min(skew_mcvs, INT_MAX);
     777             : 
     778      453070 :         *num_skew_mcvs = (int) skew_mcvs;
     779             : 
     780             :         /* Reduce hash_table_bytes by the amount needed for the skew table */
     781      453070 :         if (skew_mcvs > 0)
     782      453070 :             hash_table_bytes -= skew_mcvs * bytes_per_mcv;
     783             :     }
     784             :     else
     785        3470 :         *num_skew_mcvs = 0;
     786             : 
     787             :     /*
     788             :      * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
     789             :      * memory is filled, assuming a single batch; but limit the value so that
     790             :      * the pointer arrays we'll try to allocate do not exceed hash_table_bytes
     791             :      * nor MaxAllocSize.
     792             :      *
     793             :      * Note that both nbuckets and nbatch must be powers of 2 to make
     794             :      * ExecHashGetBucketAndBatch fast.
     795             :      */
     796      456540 :     max_pointers = hash_table_bytes / sizeof(HashJoinTuple);
     797      456540 :     max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
     798             :     /* If max_pointers isn't a power of 2, must round it down to one */
     799      456540 :     max_pointers = pg_prevpower2_size_t(max_pointers);
     800             : 
     801             :     /* Also ensure we avoid integer overflow in nbatch and nbuckets */
     802             :     /* (this step is redundant given the current value of MaxAllocSize) */
     803      456540 :     max_pointers = Min(max_pointers, INT_MAX / 2 + 1);
     804             : 
     805      456540 :     dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
     806      456540 :     dbuckets = Min(dbuckets, max_pointers);
     807      456540 :     nbuckets = (int) dbuckets;
     808             :     /* don't let nbuckets be really small, though ... */
     809      456540 :     nbuckets = Max(nbuckets, 1024);
     810             :     /* ... and force it to be a power of 2. */
     811      456540 :     nbuckets = pg_nextpower2_32(nbuckets);
     812             : 
     813             :     /*
     814             :      * If there's not enough space to store the projected number of tuples and
     815             :      * the required bucket headers, we will need multiple batches.
     816             :      */
     817      456540 :     bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
     818      456540 :     if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
     819             :     {
     820             :         /* We'll need multiple batches */
     821             :         size_t      sbuckets;
     822             :         double      dbatch;
     823             :         int         minbatch;
     824             :         size_t      bucket_size;
     825             : 
     826             :         /*
     827             :          * If Parallel Hash with combined hash_mem would still need multiple
     828             :          * batches, we'll have to fall back to regular hash_mem budget.
     829             :          */
     830        4386 :         if (try_combined_hash_mem)
     831             :         {
     832         246 :             ExecChooseHashTableSize(ntuples, tupwidth, useskew,
     833             :                                     false, parallel_workers,
     834             :                                     space_allowed,
     835             :                                     numbuckets,
     836             :                                     numbatches,
     837             :                                     num_skew_mcvs);
     838         246 :             return;
     839             :         }
     840             : 
     841             :         /*
     842             :          * Estimate the number of buckets we'll want to have when hash_mem is
     843             :          * entirely full.  Each bucket will contain a bucket pointer plus
     844             :          * NTUP_PER_BUCKET tuples, whose projected size already includes
     845             :          * overhead for the hash code, pointer to the next tuple, etc.
     846             :          */
     847        4140 :         bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
     848        4140 :         if (hash_table_bytes <= bucket_size)
     849           0 :             sbuckets = 1;       /* avoid pg_nextpower2_size_t(0) */
     850             :         else
     851        4140 :             sbuckets = pg_nextpower2_size_t(hash_table_bytes / bucket_size);
     852        4140 :         sbuckets = Min(sbuckets, max_pointers);
     853        4140 :         nbuckets = (int) sbuckets;
     854        4140 :         nbuckets = pg_nextpower2_32(nbuckets);
     855        4140 :         bucket_bytes = nbuckets * sizeof(HashJoinTuple);
     856             : 
     857             :         /*
     858             :          * Buckets are simple pointers to hashjoin tuples, while tupsize
     859             :          * includes the pointer, hash code, and MinimalTupleData.  So buckets
     860             :          * should never really exceed 25% of hash_mem (even for
     861             :          * NTUP_PER_BUCKET=1); except maybe for hash_mem values that are not
     862             :          * 2^N bytes, where we might get more because of doubling. So let's
     863             :          * look for 50% here.
     864             :          */
     865             :         Assert(bucket_bytes <= hash_table_bytes / 2);
     866             : 
     867             :         /* Calculate required number of batches. */
     868        4140 :         dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
     869        4140 :         dbatch = Min(dbatch, max_pointers);
     870        4140 :         minbatch = (int) dbatch;
     871        4140 :         nbatch = pg_nextpower2_32(Max(2, minbatch));
     872             :     }
     873             : 
     874             :     Assert(nbuckets > 0);
     875             :     Assert(nbatch > 0);
     876             : 
     877      456294 :     *numbuckets = nbuckets;
     878      456294 :     *numbatches = nbatch;
     879             : }
     880             : 
     881             : 
     882             : /* ----------------------------------------------------------------
     883             :  *      ExecHashTableDestroy
     884             :  *
     885             :  *      destroy a hash table
     886             :  * ----------------------------------------------------------------
     887             :  */
     888             : void
     889       19290 : ExecHashTableDestroy(HashJoinTable hashtable)
     890             : {
     891             :     int         i;
     892             : 
     893             :     /*
     894             :      * Make sure all the temp files are closed.  We skip batch 0, since it
     895             :      * can't have any temp files (and the arrays might not even exist if
     896             :      * nbatch is only 1).  Parallel hash joins don't use these files.
     897             :      */
     898       19290 :     if (hashtable->innerBatchFile != NULL)
     899             :     {
     900        1388 :         for (i = 1; i < hashtable->nbatch; i++)
     901             :         {
     902        1224 :             if (hashtable->innerBatchFile[i])
     903           0 :                 BufFileClose(hashtable->innerBatchFile[i]);
     904        1224 :             if (hashtable->outerBatchFile[i])
     905           0 :                 BufFileClose(hashtable->outerBatchFile[i]);
     906             :         }
     907             :     }
     908             : 
     909             :     /* Release working memory (batchCxt is a child, so it goes away too) */
     910       19290 :     MemoryContextDelete(hashtable->hashCxt);
     911             : 
     912             :     /* And drop the control block */
     913       19290 :     pfree(hashtable);
     914       19290 : }
     915             : 
     916             : /*
     917             :  * ExecHashIncreaseNumBatches
     918             :  *      increase the original number of batches in order to reduce
     919             :  *      current memory consumption
     920             :  */
     921             : static void
     922      449136 : ExecHashIncreaseNumBatches(HashJoinTable hashtable)
     923             : {
     924      449136 :     int         oldnbatch = hashtable->nbatch;
     925      449136 :     int         curbatch = hashtable->curbatch;
     926             :     int         nbatch;
     927             :     long        ninmemory;
     928             :     long        nfreed;
     929             :     HashMemoryChunk oldchunks;
     930             : 
     931             :     /* do nothing if we've decided to shut off growth */
     932      449136 :     if (!hashtable->growEnabled)
     933      449020 :         return;
     934             : 
     935             :     /* safety check to avoid overflow */
     936         116 :     if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
     937           0 :         return;
     938             : 
     939         116 :     nbatch = oldnbatch * 2;
     940             :     Assert(nbatch > 1);
     941             : 
     942             : #ifdef HJDEBUG
     943             :     printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
     944             :            hashtable, nbatch, hashtable->spaceUsed);
     945             : #endif
     946             : 
     947         116 :     if (hashtable->innerBatchFile == NULL)
     948             :     {
     949          54 :         MemoryContext oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
     950             : 
     951             :         /* we had no file arrays before */
     952          54 :         hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
     953          54 :         hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
     954             : 
     955          54 :         MemoryContextSwitchTo(oldcxt);
     956             : 
     957             :         /* time to establish the temp tablespaces, too */
     958          54 :         PrepareTempTablespaces();
     959             :     }
     960             :     else
     961             :     {
     962             :         /* enlarge arrays and zero out added entries */
     963          62 :         hashtable->innerBatchFile = repalloc0_array(hashtable->innerBatchFile, BufFile *, oldnbatch, nbatch);
     964          62 :         hashtable->outerBatchFile = repalloc0_array(hashtable->outerBatchFile, BufFile *, oldnbatch, nbatch);
     965             :     }
     966             : 
     967         116 :     hashtable->nbatch = nbatch;
     968             : 
     969             :     /*
     970             :      * Scan through the existing hash table entries and dump out any that are
     971             :      * no longer of the current batch.
     972             :      */
     973         116 :     ninmemory = nfreed = 0;
     974             : 
     975             :     /* If know we need to resize nbuckets, we can do it while rebatching. */
     976         116 :     if (hashtable->nbuckets_optimal != hashtable->nbuckets)
     977             :     {
     978             :         /* we never decrease the number of buckets */
     979             :         Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
     980             : 
     981          54 :         hashtable->nbuckets = hashtable->nbuckets_optimal;
     982          54 :         hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
     983             : 
     984          54 :         hashtable->buckets.unshared =
     985          54 :             repalloc_array(hashtable->buckets.unshared,
     986             :                            HashJoinTuple, hashtable->nbuckets);
     987             :     }
     988             : 
     989             :     /*
     990             :      * We will scan through the chunks directly, so that we can reset the
     991             :      * buckets now and not have to keep track which tuples in the buckets have
     992             :      * already been processed. We will free the old chunks as we go.
     993             :      */
     994         116 :     memset(hashtable->buckets.unshared, 0,
     995         116 :            sizeof(HashJoinTuple) * hashtable->nbuckets);
     996         116 :     oldchunks = hashtable->chunks;
     997         116 :     hashtable->chunks = NULL;
     998             : 
     999             :     /* so, let's scan through the old chunks, and all tuples in each chunk */
    1000         580 :     while (oldchunks != NULL)
    1001             :     {
    1002         464 :         HashMemoryChunk nextchunk = oldchunks->next.unshared;
    1003             : 
    1004             :         /* position within the buffer (up to oldchunks->used) */
    1005         464 :         size_t      idx = 0;
    1006             : 
    1007             :         /* process all tuples stored in this chunk (and then free it) */
    1008      316828 :         while (idx < oldchunks->used)
    1009             :         {
    1010      316364 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
    1011      316364 :             MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
    1012      316364 :             int         hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
    1013             :             int         bucketno;
    1014             :             int         batchno;
    1015             : 
    1016      316364 :             ninmemory++;
    1017      316364 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1018             :                                       &bucketno, &batchno);
    1019             : 
    1020      316364 :             if (batchno == curbatch)
    1021             :             {
    1022             :                 /* keep tuple in memory - copy it into the new chunk */
    1023             :                 HashJoinTuple copyTuple;
    1024             : 
    1025      121786 :                 copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
    1026      121786 :                 memcpy(copyTuple, hashTuple, hashTupleSize);
    1027             : 
    1028             :                 /* and add it back to the appropriate bucket */
    1029      121786 :                 copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1030      121786 :                 hashtable->buckets.unshared[bucketno] = copyTuple;
    1031             :             }
    1032             :             else
    1033             :             {
    1034             :                 /* dump it out */
    1035             :                 Assert(batchno > curbatch);
    1036      194578 :                 ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
    1037             :                                       hashTuple->hashvalue,
    1038      194578 :                                       &hashtable->innerBatchFile[batchno],
    1039             :                                       hashtable);
    1040             : 
    1041      194578 :                 hashtable->spaceUsed -= hashTupleSize;
    1042      194578 :                 nfreed++;
    1043             :             }
    1044             : 
    1045             :             /* next tuple in this chunk */
    1046      316364 :             idx += MAXALIGN(hashTupleSize);
    1047             : 
    1048             :             /* allow this loop to be cancellable */
    1049      316364 :             CHECK_FOR_INTERRUPTS();
    1050             :         }
    1051             : 
    1052             :         /* we're done with this chunk - free it and proceed to the next one */
    1053         464 :         pfree(oldchunks);
    1054         464 :         oldchunks = nextchunk;
    1055             :     }
    1056             : 
    1057             : #ifdef HJDEBUG
    1058             :     printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
    1059             :            hashtable, nfreed, ninmemory, hashtable->spaceUsed);
    1060             : #endif
    1061             : 
    1062             :     /*
    1063             :      * If we dumped out either all or none of the tuples in the table, disable
    1064             :      * further expansion of nbatch.  This situation implies that we have
    1065             :      * enough tuples of identical hashvalues to overflow spaceAllowed.
    1066             :      * Increasing nbatch will not fix it since there's no way to subdivide the
    1067             :      * group any more finely. We have to just gut it out and hope the server
    1068             :      * has enough RAM.
    1069             :      */
    1070         116 :     if (nfreed == 0 || nfreed == ninmemory)
    1071             :     {
    1072          26 :         hashtable->growEnabled = false;
    1073             : #ifdef HJDEBUG
    1074             :         printf("Hashjoin %p: disabling further increase of nbatch\n",
    1075             :                hashtable);
    1076             : #endif
    1077             :     }
    1078             : }
    1079             : 
    1080             : /*
    1081             :  * ExecParallelHashIncreaseNumBatches
    1082             :  *      Every participant attached to grow_batches_barrier must run this
    1083             :  *      function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
    1084             :  */
    1085             : static void
    1086          52 : ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
    1087             : {
    1088          52 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1089             : 
    1090             :     Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    1091             : 
    1092             :     /*
    1093             :      * It's unlikely, but we need to be prepared for new participants to show
    1094             :      * up while we're in the middle of this operation so we need to switch on
    1095             :      * barrier phase here.
    1096             :      */
    1097          52 :     switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier)))
    1098             :     {
    1099          52 :         case PHJ_GROW_BATCHES_ELECT:
    1100             : 
    1101             :             /*
    1102             :              * Elect one participant to prepare to grow the number of batches.
    1103             :              * This involves reallocating or resetting the buckets of batch 0
    1104             :              * in preparation for all participants to begin repartitioning the
    1105             :              * tuples.
    1106             :              */
    1107          52 :             if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1108             :                                      WAIT_EVENT_HASH_GROW_BATCHES_ELECT))
    1109             :             {
    1110             :                 dsa_pointer_atomic *buckets;
    1111             :                 ParallelHashJoinBatch *old_batch0;
    1112             :                 int         new_nbatch;
    1113             :                 int         i;
    1114             : 
    1115             :                 /* Move the old batch out of the way. */
    1116          50 :                 old_batch0 = hashtable->batches[0].shared;
    1117          50 :                 pstate->old_batches = pstate->batches;
    1118          50 :                 pstate->old_nbatch = hashtable->nbatch;
    1119          50 :                 pstate->batches = InvalidDsaPointer;
    1120             : 
    1121             :                 /* Free this backend's old accessors. */
    1122          50 :                 ExecParallelHashCloseBatchAccessors(hashtable);
    1123             : 
    1124             :                 /* Figure out how many batches to use. */
    1125          50 :                 if (hashtable->nbatch == 1)
    1126             :                 {
    1127             :                     /*
    1128             :                      * We are going from single-batch to multi-batch.  We need
    1129             :                      * to switch from one large combined memory budget to the
    1130             :                      * regular hash_mem budget.
    1131             :                      */
    1132          36 :                     pstate->space_allowed = get_hash_memory_limit();
    1133             : 
    1134             :                     /*
    1135             :                      * The combined hash_mem of all participants wasn't
    1136             :                      * enough. Therefore one batch per participant would be
    1137             :                      * approximately equivalent and would probably also be
    1138             :                      * insufficient.  So try two batches per participant,
    1139             :                      * rounded up to a power of two.
    1140             :                      */
    1141          36 :                     new_nbatch = pg_nextpower2_32(pstate->nparticipants * 2);
    1142             :                 }
    1143             :                 else
    1144             :                 {
    1145             :                     /*
    1146             :                      * We were already multi-batched.  Try doubling the number
    1147             :                      * of batches.
    1148             :                      */
    1149          14 :                     new_nbatch = hashtable->nbatch * 2;
    1150             :                 }
    1151             : 
    1152             :                 /* Allocate new larger generation of batches. */
    1153             :                 Assert(hashtable->nbatch == pstate->nbatch);
    1154          50 :                 ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
    1155             :                 Assert(hashtable->nbatch == pstate->nbatch);
    1156             : 
    1157             :                 /* Replace or recycle batch 0's bucket array. */
    1158          50 :                 if (pstate->old_nbatch == 1)
    1159             :                 {
    1160             :                     double      dtuples;
    1161             :                     double      dbuckets;
    1162             :                     int         new_nbuckets;
    1163             : 
    1164             :                     /*
    1165             :                      * We probably also need a smaller bucket array.  How many
    1166             :                      * tuples do we expect per batch, assuming we have only
    1167             :                      * half of them so far?  Normally we don't need to change
    1168             :                      * the bucket array's size, because the size of each batch
    1169             :                      * stays the same as we add more batches, but in this
    1170             :                      * special case we move from a large batch to many smaller
    1171             :                      * batches and it would be wasteful to keep the large
    1172             :                      * array.
    1173             :                      */
    1174          36 :                     dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
    1175          36 :                     dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
    1176          36 :                     dbuckets = Min(dbuckets,
    1177             :                                    MaxAllocSize / sizeof(dsa_pointer_atomic));
    1178          36 :                     new_nbuckets = (int) dbuckets;
    1179          36 :                     new_nbuckets = Max(new_nbuckets, 1024);
    1180          36 :                     new_nbuckets = pg_nextpower2_32(new_nbuckets);
    1181          36 :                     dsa_free(hashtable->area, old_batch0->buckets);
    1182          72 :                     hashtable->batches[0].shared->buckets =
    1183          36 :                         dsa_allocate(hashtable->area,
    1184             :                                      sizeof(dsa_pointer_atomic) * new_nbuckets);
    1185             :                     buckets = (dsa_pointer_atomic *)
    1186          36 :                         dsa_get_address(hashtable->area,
    1187          36 :                                         hashtable->batches[0].shared->buckets);
    1188      110628 :                     for (i = 0; i < new_nbuckets; ++i)
    1189      110592 :                         dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    1190          36 :                     pstate->nbuckets = new_nbuckets;
    1191             :                 }
    1192             :                 else
    1193             :                 {
    1194             :                     /* Recycle the existing bucket array. */
    1195          14 :                     hashtable->batches[0].shared->buckets = old_batch0->buckets;
    1196             :                     buckets = (dsa_pointer_atomic *)
    1197          14 :                         dsa_get_address(hashtable->area, old_batch0->buckets);
    1198       53262 :                     for (i = 0; i < hashtable->nbuckets; ++i)
    1199       53248 :                         dsa_pointer_atomic_write(&buckets[i], InvalidDsaPointer);
    1200             :                 }
    1201             : 
    1202             :                 /* Move all chunks to the work queue for parallel processing. */
    1203          50 :                 pstate->chunk_work_queue = old_batch0->chunks;
    1204             : 
    1205             :                 /* Disable further growth temporarily while we're growing. */
    1206          50 :                 pstate->growth = PHJ_GROWTH_DISABLED;
    1207             :             }
    1208             :             else
    1209             :             {
    1210             :                 /* All other participants just flush their tuples to disk. */
    1211           2 :                 ExecParallelHashCloseBatchAccessors(hashtable);
    1212             :             }
    1213             :             /* Fall through. */
    1214             : 
    1215             :         case PHJ_GROW_BATCHES_REALLOCATE:
    1216             :             /* Wait for the above to be finished. */
    1217          52 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1218             :                                  WAIT_EVENT_HASH_GROW_BATCHES_REALLOCATE);
    1219             :             /* Fall through. */
    1220             : 
    1221          52 :         case PHJ_GROW_BATCHES_REPARTITION:
    1222             :             /* Make sure that we have the current dimensions and buckets. */
    1223          52 :             ExecParallelHashEnsureBatchAccessors(hashtable);
    1224          52 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1225             :             /* Then partition, flush counters. */
    1226          52 :             ExecParallelHashRepartitionFirst(hashtable);
    1227          52 :             ExecParallelHashRepartitionRest(hashtable);
    1228          52 :             ExecParallelHashMergeCounters(hashtable);
    1229             :             /* Wait for the above to be finished. */
    1230          52 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1231             :                                  WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION);
    1232             :             /* Fall through. */
    1233             : 
    1234          52 :         case PHJ_GROW_BATCHES_DECIDE:
    1235             : 
    1236             :             /*
    1237             :              * Elect one participant to clean up and decide whether further
    1238             :              * repartitioning is needed, or should be disabled because it's
    1239             :              * not helping.
    1240             :              */
    1241          52 :             if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1242             :                                      WAIT_EVENT_HASH_GROW_BATCHES_DECIDE))
    1243             :             {
    1244          50 :                 bool        space_exhausted = false;
    1245          50 :                 bool        extreme_skew_detected = false;
    1246             : 
    1247             :                 /* Make sure that we have the current dimensions and buckets. */
    1248          50 :                 ExecParallelHashEnsureBatchAccessors(hashtable);
    1249          50 :                 ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1250             : 
    1251             :                 /* Are any of the new generation of batches exhausted? */
    1252         370 :                 for (int i = 0; i < hashtable->nbatch; ++i)
    1253             :                 {
    1254         320 :                     ParallelHashJoinBatch *batch = hashtable->batches[i].shared;
    1255             : 
    1256         320 :                     if (batch->space_exhausted ||
    1257         320 :                         batch->estimated_size > pstate->space_allowed)
    1258             :                     {
    1259             :                         int         parent;
    1260             : 
    1261          24 :                         space_exhausted = true;
    1262             : 
    1263             :                         /*
    1264             :                          * Did this batch receive ALL of the tuples from its
    1265             :                          * parent batch?  That would indicate that further
    1266             :                          * repartitioning isn't going to help (the hash values
    1267             :                          * are probably all the same).
    1268             :                          */
    1269          24 :                         parent = i % pstate->old_nbatch;
    1270          24 :                         if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
    1271          24 :                             extreme_skew_detected = true;
    1272             :                     }
    1273             :                 }
    1274             : 
    1275             :                 /* Don't keep growing if it's not helping or we'd overflow. */
    1276          50 :                 if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
    1277          24 :                     pstate->growth = PHJ_GROWTH_DISABLED;
    1278          26 :                 else if (space_exhausted)
    1279           0 :                     pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    1280             :                 else
    1281          26 :                     pstate->growth = PHJ_GROWTH_OK;
    1282             : 
    1283             :                 /* Free the old batches in shared memory. */
    1284          50 :                 dsa_free(hashtable->area, pstate->old_batches);
    1285          50 :                 pstate->old_batches = InvalidDsaPointer;
    1286             :             }
    1287             :             /* Fall through. */
    1288             : 
    1289             :         case PHJ_GROW_BATCHES_FINISH:
    1290             :             /* Wait for the above to complete. */
    1291          52 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1292             :                                  WAIT_EVENT_HASH_GROW_BATCHES_FINISH);
    1293             :     }
    1294          52 : }
    1295             : 
    1296             : /*
    1297             :  * Repartition the tuples currently loaded into memory for inner batch 0
    1298             :  * because the number of batches has been increased.  Some tuples are retained
    1299             :  * in memory and some are written out to a later batch.
    1300             :  */
    1301             : static void
    1302          52 : ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
    1303             : {
    1304             :     dsa_pointer chunk_shared;
    1305             :     HashMemoryChunk chunk;
    1306             : 
    1307             :     Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
    1308             : 
    1309         342 :     while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
    1310             :     {
    1311         290 :         size_t      idx = 0;
    1312             : 
    1313             :         /* Repartition all tuples in this chunk. */
    1314      222320 :         while (idx < chunk->used)
    1315             :         {
    1316      222030 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1317      222030 :             MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
    1318             :             HashJoinTuple copyTuple;
    1319             :             dsa_pointer shared;
    1320             :             int         bucketno;
    1321             :             int         batchno;
    1322             : 
    1323      222030 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1324             :                                       &bucketno, &batchno);
    1325             : 
    1326             :             Assert(batchno < hashtable->nbatch);
    1327      222030 :             if (batchno == 0)
    1328             :             {
    1329             :                 /* It still belongs in batch 0.  Copy to a new chunk. */
    1330             :                 copyTuple =
    1331       51416 :                     ExecParallelHashTupleAlloc(hashtable,
    1332       51416 :                                                HJTUPLE_OVERHEAD + tuple->t_len,
    1333             :                                                &shared);
    1334       51416 :                 copyTuple->hashvalue = hashTuple->hashvalue;
    1335       51416 :                 memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
    1336       51416 :                 ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1337             :                                           copyTuple, shared);
    1338             :             }
    1339             :             else
    1340             :             {
    1341      170614 :                 size_t      tuple_size =
    1342      170614 :                     MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1343             : 
    1344             :                 /* It belongs in a later batch. */
    1345      170614 :                 hashtable->batches[batchno].estimated_size += tuple_size;
    1346      170614 :                 sts_puttuple(hashtable->batches[batchno].inner_tuples,
    1347      170614 :                              &hashTuple->hashvalue, tuple);
    1348             :             }
    1349             : 
    1350             :             /* Count this tuple. */
    1351      222030 :             ++hashtable->batches[0].old_ntuples;
    1352      222030 :             ++hashtable->batches[batchno].ntuples;
    1353             : 
    1354      222030 :             idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1355             :                             HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1356             :         }
    1357             : 
    1358             :         /* Free this chunk. */
    1359         290 :         dsa_free(hashtable->area, chunk_shared);
    1360             : 
    1361         290 :         CHECK_FOR_INTERRUPTS();
    1362             :     }
    1363          52 : }
    1364             : 
    1365             : /*
    1366             :  * Help repartition inner batches 1..n.
    1367             :  */
    1368             : static void
    1369          52 : ExecParallelHashRepartitionRest(HashJoinTable hashtable)
    1370             : {
    1371          52 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1372          52 :     int         old_nbatch = pstate->old_nbatch;
    1373             :     SharedTuplestoreAccessor **old_inner_tuples;
    1374             :     ParallelHashJoinBatch *old_batches;
    1375             :     int         i;
    1376             : 
    1377             :     /* Get our hands on the previous generation of batches. */
    1378             :     old_batches = (ParallelHashJoinBatch *)
    1379          52 :         dsa_get_address(hashtable->area, pstate->old_batches);
    1380          52 :     old_inner_tuples = palloc0_array(SharedTuplestoreAccessor *, old_nbatch);
    1381         116 :     for (i = 1; i < old_nbatch; ++i)
    1382             :     {
    1383          64 :         ParallelHashJoinBatch *shared =
    1384          64 :             NthParallelHashJoinBatch(old_batches, i);
    1385             : 
    1386          64 :         old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
    1387             :                                          ParallelWorkerNumber + 1,
    1388             :                                          &pstate->fileset);
    1389             :     }
    1390             : 
    1391             :     /* Join in the effort to repartition them. */
    1392         116 :     for (i = 1; i < old_nbatch; ++i)
    1393             :     {
    1394             :         MinimalTuple tuple;
    1395             :         uint32      hashvalue;
    1396             : 
    1397             :         /* Scan one partition from the previous generation. */
    1398          64 :         sts_begin_parallel_scan(old_inner_tuples[i]);
    1399      172650 :         while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
    1400             :         {
    1401      172586 :             size_t      tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1402             :             int         bucketno;
    1403             :             int         batchno;
    1404             : 
    1405             :             /* Decide which partition it goes to in the new generation. */
    1406      172586 :             ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
    1407             :                                       &batchno);
    1408             : 
    1409      172586 :             hashtable->batches[batchno].estimated_size += tuple_size;
    1410      172586 :             ++hashtable->batches[batchno].ntuples;
    1411      172586 :             ++hashtable->batches[i].old_ntuples;
    1412             : 
    1413             :             /* Store the tuple its new batch. */
    1414      172586 :             sts_puttuple(hashtable->batches[batchno].inner_tuples,
    1415             :                          &hashvalue, tuple);
    1416             : 
    1417      172586 :             CHECK_FOR_INTERRUPTS();
    1418             :         }
    1419          64 :         sts_end_parallel_scan(old_inner_tuples[i]);
    1420             :     }
    1421             : 
    1422          52 :     pfree(old_inner_tuples);
    1423          52 : }
    1424             : 
    1425             : /*
    1426             :  * Transfer the backend-local per-batch counters to the shared totals.
    1427             :  */
    1428             : static void
    1429         294 : ExecParallelHashMergeCounters(HashJoinTable hashtable)
    1430             : {
    1431         294 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1432             :     int         i;
    1433             : 
    1434         294 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    1435         294 :     pstate->total_tuples = 0;
    1436        1754 :     for (i = 0; i < hashtable->nbatch; ++i)
    1437             :     {
    1438        1460 :         ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
    1439             : 
    1440        1460 :         batch->shared->size += batch->size;
    1441        1460 :         batch->shared->estimated_size += batch->estimated_size;
    1442        1460 :         batch->shared->ntuples += batch->ntuples;
    1443        1460 :         batch->shared->old_ntuples += batch->old_ntuples;
    1444        1460 :         batch->size = 0;
    1445        1460 :         batch->estimated_size = 0;
    1446        1460 :         batch->ntuples = 0;
    1447        1460 :         batch->old_ntuples = 0;
    1448        1460 :         pstate->total_tuples += batch->shared->ntuples;
    1449             :     }
    1450         294 :     LWLockRelease(&pstate->lock);
    1451         294 : }
    1452             : 
    1453             : /*
    1454             :  * ExecHashIncreaseNumBuckets
    1455             :  *      increase the original number of buckets in order to reduce
    1456             :  *      number of tuples per bucket
    1457             :  */
    1458             : static void
    1459          72 : ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
    1460             : {
    1461             :     HashMemoryChunk chunk;
    1462             : 
    1463             :     /* do nothing if not an increase (it's called increase for a reason) */
    1464          72 :     if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
    1465           0 :         return;
    1466             : 
    1467             : #ifdef HJDEBUG
    1468             :     printf("Hashjoin %p: increasing nbuckets %d => %d\n",
    1469             :            hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
    1470             : #endif
    1471             : 
    1472          72 :     hashtable->nbuckets = hashtable->nbuckets_optimal;
    1473          72 :     hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
    1474             : 
    1475             :     Assert(hashtable->nbuckets > 1);
    1476             :     Assert(hashtable->nbuckets <= (INT_MAX / 2));
    1477             :     Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
    1478             : 
    1479             :     /*
    1480             :      * Just reallocate the proper number of buckets - we don't need to walk
    1481             :      * through them - we can walk the dense-allocated chunks (just like in
    1482             :      * ExecHashIncreaseNumBatches, but without all the copying into new
    1483             :      * chunks)
    1484             :      */
    1485          72 :     hashtable->buckets.unshared =
    1486          72 :         repalloc_array(hashtable->buckets.unshared,
    1487             :                        HashJoinTuple, hashtable->nbuckets);
    1488             : 
    1489          72 :     memset(hashtable->buckets.unshared, 0,
    1490          72 :            hashtable->nbuckets * sizeof(HashJoinTuple));
    1491             : 
    1492             :     /* scan through all tuples in all chunks to rebuild the hash table */
    1493        1008 :     for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
    1494             :     {
    1495             :         /* process all tuples stored in this chunk */
    1496         936 :         size_t      idx = 0;
    1497             : 
    1498      720936 :         while (idx < chunk->used)
    1499             :         {
    1500      720000 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1501             :             int         bucketno;
    1502             :             int         batchno;
    1503             : 
    1504      720000 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1505             :                                       &bucketno, &batchno);
    1506             : 
    1507             :             /* add the tuple to the proper bucket */
    1508      720000 :             hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1509      720000 :             hashtable->buckets.unshared[bucketno] = hashTuple;
    1510             : 
    1511             :             /* advance index past the tuple */
    1512      720000 :             idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1513             :                             HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1514             :         }
    1515             : 
    1516             :         /* allow this loop to be cancellable */
    1517         936 :         CHECK_FOR_INTERRUPTS();
    1518             :     }
    1519             : }
    1520             : 
    1521             : static void
    1522         144 : ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
    1523             : {
    1524         144 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1525             :     int         i;
    1526             :     HashMemoryChunk chunk;
    1527             :     dsa_pointer chunk_s;
    1528             : 
    1529             :     Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    1530             : 
    1531             :     /*
    1532             :      * It's unlikely, but we need to be prepared for new participants to show
    1533             :      * up while we're in the middle of this operation so we need to switch on
    1534             :      * barrier phase here.
    1535             :      */
    1536         144 :     switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier)))
    1537             :     {
    1538         144 :         case PHJ_GROW_BUCKETS_ELECT:
    1539             :             /* Elect one participant to prepare to increase nbuckets. */
    1540         144 :             if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1541             :                                      WAIT_EVENT_HASH_GROW_BUCKETS_ELECT))
    1542             :             {
    1543             :                 size_t      size;
    1544             :                 dsa_pointer_atomic *buckets;
    1545             : 
    1546             :                 /* Double the size of the bucket array. */
    1547         108 :                 pstate->nbuckets *= 2;
    1548         108 :                 size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
    1549         108 :                 hashtable->batches[0].shared->size += size / 2;
    1550         108 :                 dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
    1551         216 :                 hashtable->batches[0].shared->buckets =
    1552         108 :                     dsa_allocate(hashtable->area, size);
    1553             :                 buckets = (dsa_pointer_atomic *)
    1554         108 :                     dsa_get_address(hashtable->area,
    1555         108 :                                     hashtable->batches[0].shared->buckets);
    1556      933996 :                 for (i = 0; i < pstate->nbuckets; ++i)
    1557      933888 :                     dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    1558             : 
    1559             :                 /* Put the chunk list onto the work queue. */
    1560         108 :                 pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
    1561             : 
    1562             :                 /* Clear the flag. */
    1563         108 :                 pstate->growth = PHJ_GROWTH_OK;
    1564             :             }
    1565             :             /* Fall through. */
    1566             : 
    1567             :         case PHJ_GROW_BUCKETS_REALLOCATE:
    1568             :             /* Wait for the above to complete. */
    1569         144 :             BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1570             :                                  WAIT_EVENT_HASH_GROW_BUCKETS_REALLOCATE);
    1571             :             /* Fall through. */
    1572             : 
    1573         144 :         case PHJ_GROW_BUCKETS_REINSERT:
    1574             :             /* Reinsert all tuples into the hash table. */
    1575         144 :             ExecParallelHashEnsureBatchAccessors(hashtable);
    1576         144 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1577         808 :             while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
    1578             :             {
    1579         664 :                 size_t      idx = 0;
    1580             : 
    1581      543816 :                 while (idx < chunk->used)
    1582             :                 {
    1583      543152 :                     HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1584      543152 :                     dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
    1585             :                     int         bucketno;
    1586             :                     int         batchno;
    1587             : 
    1588      543152 :                     ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1589             :                                               &bucketno, &batchno);
    1590             :                     Assert(batchno == 0);
    1591             : 
    1592             :                     /* add the tuple to the proper bucket */
    1593      543152 :                     ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1594             :                                               hashTuple, shared);
    1595             : 
    1596             :                     /* advance index past the tuple */
    1597      543152 :                     idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1598             :                                     HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1599             :                 }
    1600             : 
    1601             :                 /* allow this loop to be cancellable */
    1602         664 :                 CHECK_FOR_INTERRUPTS();
    1603             :             }
    1604         144 :             BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1605             :                                  WAIT_EVENT_HASH_GROW_BUCKETS_REINSERT);
    1606             :     }
    1607         144 : }
    1608             : 
    1609             : /*
    1610             :  * ExecHashTableInsert
    1611             :  *      insert a tuple into the hash table depending on the hash value
    1612             :  *      it may just go to a temp file for later batches
    1613             :  *
    1614             :  * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
    1615             :  * tuple; the minimal case in particular is certain to happen while reloading
    1616             :  * tuples from batch files.  We could save some cycles in the regular-tuple
    1617             :  * case by not forcing the slot contents into minimal form; not clear if it's
    1618             :  * worth the messiness required.
    1619             :  */
    1620             : void
    1621    11682946 : ExecHashTableInsert(HashJoinTable hashtable,
    1622             :                     TupleTableSlot *slot,
    1623             :                     uint32 hashvalue)
    1624             : {
    1625             :     bool        shouldFree;
    1626    11682946 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1627             :     int         bucketno;
    1628             :     int         batchno;
    1629             : 
    1630    11682946 :     ExecHashGetBucketAndBatch(hashtable, hashvalue,
    1631             :                               &bucketno, &batchno);
    1632             : 
    1633             :     /*
    1634             :      * decide whether to put the tuple in the hash table or a temp file
    1635             :      */
    1636    11682946 :     if (batchno == hashtable->curbatch)
    1637             :     {
    1638             :         /*
    1639             :          * put the tuple in hash table
    1640             :          */
    1641             :         HashJoinTuple hashTuple;
    1642             :         int         hashTupleSize;
    1643     9445408 :         double      ntuples = (hashtable->totalTuples - hashtable->skewTuples);
    1644             : 
    1645             :         /* Create the HashJoinTuple */
    1646     9445408 :         hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    1647     9445408 :         hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
    1648             : 
    1649     9445408 :         hashTuple->hashvalue = hashvalue;
    1650     9445408 :         memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1651             : 
    1652             :         /*
    1653             :          * We always reset the tuple-matched flag on insertion.  This is okay
    1654             :          * even when reloading a tuple from a batch file, since the tuple
    1655             :          * could not possibly have been matched to an outer tuple before it
    1656             :          * went into the batch file.
    1657             :          */
    1658     9445408 :         HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1659             : 
    1660             :         /* Push it onto the front of the bucket's list */
    1661     9445408 :         hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1662     9445408 :         hashtable->buckets.unshared[bucketno] = hashTuple;
    1663             : 
    1664             :         /*
    1665             :          * Increase the (optimal) number of buckets if we just exceeded the
    1666             :          * NTUP_PER_BUCKET threshold, but only when there's still a single
    1667             :          * batch.
    1668             :          */
    1669     9445408 :         if (hashtable->nbatch == 1 &&
    1670     6898682 :             ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
    1671             :         {
    1672             :             /* Guard against integer overflow and alloc size overflow */
    1673         180 :             if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
    1674         180 :                 hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
    1675             :             {
    1676         180 :                 hashtable->nbuckets_optimal *= 2;
    1677         180 :                 hashtable->log2_nbuckets_optimal += 1;
    1678             :             }
    1679             :         }
    1680             : 
    1681             :         /* Account for space used, and back off if we've used too much */
    1682     9445408 :         hashtable->spaceUsed += hashTupleSize;
    1683     9445408 :         if (hashtable->spaceUsed > hashtable->spacePeak)
    1684     7489330 :             hashtable->spacePeak = hashtable->spaceUsed;
    1685     9445408 :         if (hashtable->spaceUsed +
    1686     9445408 :             hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
    1687     9445408 :             > hashtable->spaceAllowed)
    1688      449136 :             ExecHashIncreaseNumBatches(hashtable);
    1689             :     }
    1690             :     else
    1691             :     {
    1692             :         /*
    1693             :          * put the tuple into a temp file for later batches
    1694             :          */
    1695             :         Assert(batchno > hashtable->curbatch);
    1696     2237538 :         ExecHashJoinSaveTuple(tuple,
    1697             :                               hashvalue,
    1698     2237538 :                               &hashtable->innerBatchFile[batchno],
    1699             :                               hashtable);
    1700             :     }
    1701             : 
    1702    11682946 :     if (shouldFree)
    1703     9220264 :         heap_free_minimal_tuple(tuple);
    1704    11682946 : }
    1705             : 
    1706             : /*
    1707             :  * ExecParallelHashTableInsert
    1708             :  *      insert a tuple into a shared hash table or shared batch tuplestore
    1709             :  */
    1710             : void
    1711     2160094 : ExecParallelHashTableInsert(HashJoinTable hashtable,
    1712             :                             TupleTableSlot *slot,
    1713             :                             uint32 hashvalue)
    1714             : {
    1715             :     bool        shouldFree;
    1716     2160094 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1717             :     dsa_pointer shared;
    1718             :     int         bucketno;
    1719             :     int         batchno;
    1720             : 
    1721     2160448 : retry:
    1722     2160448 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    1723             : 
    1724     2160448 :     if (batchno == 0)
    1725             :     {
    1726             :         HashJoinTuple hashTuple;
    1727             : 
    1728             :         /* Try to load it into memory. */
    1729             :         Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) ==
    1730             :                PHJ_BUILD_HASH_INNER);
    1731     1246858 :         hashTuple = ExecParallelHashTupleAlloc(hashtable,
    1732     1246858 :                                                HJTUPLE_OVERHEAD + tuple->t_len,
    1733             :                                                &shared);
    1734     1246858 :         if (hashTuple == NULL)
    1735         324 :             goto retry;
    1736             : 
    1737             :         /* Store the hash value in the HashJoinTuple header. */
    1738     1246534 :         hashTuple->hashvalue = hashvalue;
    1739     1246534 :         memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1740     1246534 :         HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1741             : 
    1742             :         /* Push it onto the front of the bucket's list */
    1743     1246534 :         ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1744             :                                   hashTuple, shared);
    1745             :     }
    1746             :     else
    1747             :     {
    1748      913590 :         size_t      tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1749             : 
    1750             :         Assert(batchno > 0);
    1751             : 
    1752             :         /* Try to preallocate space in the batch if necessary. */
    1753      913590 :         if (hashtable->batches[batchno].preallocated < tuple_size)
    1754             :         {
    1755        1750 :             if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
    1756          30 :                 goto retry;
    1757             :         }
    1758             : 
    1759             :         Assert(hashtable->batches[batchno].preallocated >= tuple_size);
    1760      913560 :         hashtable->batches[batchno].preallocated -= tuple_size;
    1761      913560 :         sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
    1762             :                      tuple);
    1763             :     }
    1764     2160094 :     ++hashtable->batches[batchno].ntuples;
    1765             : 
    1766     2160094 :     if (shouldFree)
    1767     2160094 :         heap_free_minimal_tuple(tuple);
    1768     2160094 : }
    1769             : 
    1770             : /*
    1771             :  * Insert a tuple into the current hash table.  Unlike
    1772             :  * ExecParallelHashTableInsert, this version is not prepared to send the tuple
    1773             :  * to other batches or to run out of memory, and should only be called with
    1774             :  * tuples that belong in the current batch once growth has been disabled.
    1775             :  */
    1776             : void
    1777     1084174 : ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable,
    1778             :                                         TupleTableSlot *slot,
    1779             :                                         uint32 hashvalue)
    1780             : {
    1781             :     bool        shouldFree;
    1782     1084174 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1783             :     HashJoinTuple hashTuple;
    1784             :     dsa_pointer shared;
    1785             :     int         batchno;
    1786             :     int         bucketno;
    1787             : 
    1788     1084174 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    1789             :     Assert(batchno == hashtable->curbatch);
    1790     1084174 :     hashTuple = ExecParallelHashTupleAlloc(hashtable,
    1791     1084174 :                                            HJTUPLE_OVERHEAD + tuple->t_len,
    1792             :                                            &shared);
    1793     1084174 :     hashTuple->hashvalue = hashvalue;
    1794     1084174 :     memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1795     1084174 :     HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1796     1084174 :     ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1797             :                               hashTuple, shared);
    1798             : 
    1799     1084174 :     if (shouldFree)
    1800           0 :         heap_free_minimal_tuple(tuple);
    1801     1084174 : }
    1802             : 
    1803             : /*
    1804             :  * ExecHashGetHashValue
    1805             :  *      Compute the hash value for a tuple
    1806             :  *
    1807             :  * The tuple to be tested must be in econtext->ecxt_outertuple (thus Vars in
    1808             :  * the hashkeys expressions need to have OUTER_VAR as varno). If outer_tuple
    1809             :  * is false (meaning it's the HashJoin's inner node, Hash), econtext,
    1810             :  * hashkeys, and slot need to be from Hash, with hashkeys/slot referencing and
    1811             :  * being suitable for tuples from the node below the Hash. Conversely, if
    1812             :  * outer_tuple is true, econtext is from HashJoin, and hashkeys/slot need to
    1813             :  * be appropriate for tuples from HashJoin's outer node.
    1814             :  *
    1815             :  * A true result means the tuple's hash value has been successfully computed
    1816             :  * and stored at *hashvalue.  A false result means the tuple cannot match
    1817             :  * because it contains a null attribute, and hence it should be discarded
    1818             :  * immediately.  (If keep_nulls is true then false is never returned.)
    1819             :  */
    1820             : bool
    1821    26599514 : ExecHashGetHashValue(HashJoinTable hashtable,
    1822             :                      ExprContext *econtext,
    1823             :                      List *hashkeys,
    1824             :                      bool outer_tuple,
    1825             :                      bool keep_nulls,
    1826             :                      uint32 *hashvalue)
    1827             : {
    1828    26599514 :     uint32      hashkey = 0;
    1829             :     FmgrInfo   *hashfunctions;
    1830             :     ListCell   *hk;
    1831    26599514 :     int         i = 0;
    1832             :     MemoryContext oldContext;
    1833             : 
    1834             :     /*
    1835             :      * We reset the eval context each time to reclaim any memory leaked in the
    1836             :      * hashkey expressions.
    1837             :      */
    1838    26599514 :     ResetExprContext(econtext);
    1839             : 
    1840    26599514 :     oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
    1841             : 
    1842    26599514 :     if (outer_tuple)
    1843    15188200 :         hashfunctions = hashtable->outer_hashfunctions;
    1844             :     else
    1845    11411314 :         hashfunctions = hashtable->inner_hashfunctions;
    1846             : 
    1847    54780286 :     foreach(hk, hashkeys)
    1848             :     {
    1849    28181598 :         ExprState  *keyexpr = (ExprState *) lfirst(hk);
    1850             :         Datum       keyval;
    1851             :         bool        isNull;
    1852             : 
    1853             :         /* combine successive hashkeys by rotating */
    1854    28181598 :         hashkey = pg_rotate_left32(hashkey, 1);
    1855             : 
    1856             :         /*
    1857             :          * Get the join attribute value of the tuple
    1858             :          */
    1859    28181598 :         keyval = ExecEvalExpr(keyexpr, econtext, &isNull);
    1860             : 
    1861             :         /*
    1862             :          * If the attribute is NULL, and the join operator is strict, then
    1863             :          * this tuple cannot pass the join qual so we can reject it
    1864             :          * immediately (unless we're scanning the outside of an outer join, in
    1865             :          * which case we must not reject it).  Otherwise we act like the
    1866             :          * hashcode of NULL is zero (this will support operators that act like
    1867             :          * IS NOT DISTINCT, though not any more-random behavior).  We treat
    1868             :          * the hash support function as strict even if the operator is not.
    1869             :          *
    1870             :          * Note: currently, all hashjoinable operators must be strict since
    1871             :          * the hash index AM assumes that.  However, it takes so little extra
    1872             :          * code here to allow non-strict that we may as well do it.
    1873             :          */
    1874    28181598 :         if (isNull)
    1875             :         {
    1876        1058 :             if (hashtable->hashStrict[i] && !keep_nulls)
    1877             :             {
    1878         826 :                 MemoryContextSwitchTo(oldContext);
    1879         826 :                 return false;   /* cannot match */
    1880             :             }
    1881             :             /* else, leave hashkey unmodified, equivalent to hashcode 0 */
    1882             :         }
    1883             :         else
    1884             :         {
    1885             :             /* Compute the hash function */
    1886             :             uint32      hkey;
    1887             : 
    1888    28180540 :             hkey = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[i], hashtable->collations[i], keyval));
    1889    28180540 :             hashkey ^= hkey;
    1890             :         }
    1891             : 
    1892    28180772 :         i++;
    1893             :     }
    1894             : 
    1895    26598688 :     MemoryContextSwitchTo(oldContext);
    1896             : 
    1897    26598688 :     *hashvalue = hashkey;
    1898    26598688 :     return true;
    1899             : }
    1900             : 
    1901             : /*
    1902             :  * ExecHashGetBucketAndBatch
    1903             :  *      Determine the bucket number and batch number for a hash value
    1904             :  *
    1905             :  * Note: on-the-fly increases of nbatch must not change the bucket number
    1906             :  * for a given hash code (since we don't move tuples to different hash
    1907             :  * chains), and must only cause the batch number to remain the same or
    1908             :  * increase.  Our algorithm is
    1909             :  *      bucketno = hashvalue MOD nbuckets
    1910             :  *      batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
    1911             :  * where nbuckets and nbatch are both expected to be powers of 2, so we can
    1912             :  * do the computations by shifting and masking.  (This assumes that all hash
    1913             :  * functions are good about randomizing all their output bits, else we are
    1914             :  * likely to have very skewed bucket or batch occupancy.)
    1915             :  *
    1916             :  * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
    1917             :  * bucket count growth.  Once we start batching, the value is fixed and does
    1918             :  * not change over the course of the join (making it possible to compute batch
    1919             :  * number the way we do here).
    1920             :  *
    1921             :  * nbatch is always a power of 2; we increase it only by doubling it.  This
    1922             :  * effectively adds one more bit to the top of the batchno.  In very large
    1923             :  * joins, we might run out of bits to add, so we do this by rotating the hash
    1924             :  * value.  This causes batchno to steal bits from bucketno when the number of
    1925             :  * virtual buckets exceeds 2^32.  It's better to have longer bucket chains
    1926             :  * than to lose the ability to divide batches.
    1927             :  */
    1928             : void
    1929    34759404 : ExecHashGetBucketAndBatch(HashJoinTable hashtable,
    1930             :                           uint32 hashvalue,
    1931             :                           int *bucketno,
    1932             :                           int *batchno)
    1933             : {
    1934    34759404 :     uint32      nbuckets = (uint32) hashtable->nbuckets;
    1935    34759404 :     uint32      nbatch = (uint32) hashtable->nbatch;
    1936             : 
    1937    34759404 :     if (nbatch > 1)
    1938             :     {
    1939    13342930 :         *bucketno = hashvalue & (nbuckets - 1);
    1940    13342930 :         *batchno = pg_rotate_right32(hashvalue,
    1941    13342930 :                                      hashtable->log2_nbuckets) & (nbatch - 1);
    1942             :     }
    1943             :     else
    1944             :     {
    1945    21416474 :         *bucketno = hashvalue & (nbuckets - 1);
    1946    21416474 :         *batchno = 0;
    1947             :     }
    1948    34759404 : }
    1949             : 
    1950             : /*
    1951             :  * ExecScanHashBucket
    1952             :  *      scan a hash bucket for matches to the current outer tuple
    1953             :  *
    1954             :  * The current outer tuple must be stored in econtext->ecxt_outertuple.
    1955             :  *
    1956             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    1957             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    1958             :  * for the latter.
    1959             :  */
    1960             : bool
    1961    17154314 : ExecScanHashBucket(HashJoinState *hjstate,
    1962             :                    ExprContext *econtext)
    1963             : {
    1964    17154314 :     ExprState  *hjclauses = hjstate->hashclauses;
    1965    17154314 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    1966    17154314 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    1967    17154314 :     uint32      hashvalue = hjstate->hj_CurHashValue;
    1968             : 
    1969             :     /*
    1970             :      * hj_CurTuple is the address of the tuple last returned from the current
    1971             :      * bucket, or NULL if it's time to start scanning a new bucket.
    1972             :      *
    1973             :      * If the tuple hashed to a skew bucket then scan the skew bucket
    1974             :      * otherwise scan the standard hashtable bucket.
    1975             :      */
    1976    17154314 :     if (hashTuple != NULL)
    1977     4126958 :         hashTuple = hashTuple->next.unshared;
    1978    13027356 :     else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
    1979        2400 :         hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
    1980             :     else
    1981    13024956 :         hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
    1982             : 
    1983    22232846 :     while (hashTuple != NULL)
    1984             :     {
    1985    12885998 :         if (hashTuple->hashvalue == hashvalue)
    1986             :         {
    1987             :             TupleTableSlot *inntuple;
    1988             : 
    1989             :             /* insert hashtable's tuple into exec slot so ExecQual sees it */
    1990     7807472 :             inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    1991             :                                              hjstate->hj_HashTupleSlot,
    1992             :                                              false);    /* do not pfree */
    1993     7807472 :             econtext->ecxt_innertuple = inntuple;
    1994             : 
    1995     7807472 :             if (ExecQualAndReset(hjclauses, econtext))
    1996             :             {
    1997     7807466 :                 hjstate->hj_CurTuple = hashTuple;
    1998     7807466 :                 return true;
    1999             :             }
    2000             :         }
    2001             : 
    2002     5078532 :         hashTuple = hashTuple->next.unshared;
    2003             :     }
    2004             : 
    2005             :     /*
    2006             :      * no match
    2007             :      */
    2008     9346848 :     return false;
    2009             : }
    2010             : 
    2011             : /*
    2012             :  * ExecParallelScanHashBucket
    2013             :  *      scan a hash bucket for matches to the current outer tuple
    2014             :  *
    2015             :  * The current outer tuple must be stored in econtext->ecxt_outertuple.
    2016             :  *
    2017             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2018             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2019             :  * for the latter.
    2020             :  */
    2021             : bool
    2022     4200054 : ExecParallelScanHashBucket(HashJoinState *hjstate,
    2023             :                            ExprContext *econtext)
    2024             : {
    2025     4200054 :     ExprState  *hjclauses = hjstate->hashclauses;
    2026     4200054 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2027     4200054 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2028     4200054 :     uint32      hashvalue = hjstate->hj_CurHashValue;
    2029             : 
    2030             :     /*
    2031             :      * hj_CurTuple is the address of the tuple last returned from the current
    2032             :      * bucket, or NULL if it's time to start scanning a new bucket.
    2033             :      */
    2034     4200054 :     if (hashTuple != NULL)
    2035     2040024 :         hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2036             :     else
    2037     2160030 :         hashTuple = ExecParallelHashFirstTuple(hashtable,
    2038             :                                                hjstate->hj_CurBucketNo);
    2039             : 
    2040     5600842 :     while (hashTuple != NULL)
    2041             :     {
    2042     3440812 :         if (hashTuple->hashvalue == hashvalue)
    2043             :         {
    2044             :             TupleTableSlot *inntuple;
    2045             : 
    2046             :             /* insert hashtable's tuple into exec slot so ExecQual sees it */
    2047     2040024 :             inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2048             :                                              hjstate->hj_HashTupleSlot,
    2049             :                                              false);    /* do not pfree */
    2050     2040024 :             econtext->ecxt_innertuple = inntuple;
    2051             : 
    2052     2040024 :             if (ExecQualAndReset(hjclauses, econtext))
    2053             :             {
    2054     2040024 :                 hjstate->hj_CurTuple = hashTuple;
    2055     2040024 :                 return true;
    2056             :             }
    2057             :         }
    2058             : 
    2059     1400788 :         hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2060             :     }
    2061             : 
    2062             :     /*
    2063             :      * no match
    2064             :      */
    2065     2160030 :     return false;
    2066             : }
    2067             : 
    2068             : /*
    2069             :  * ExecPrepHashTableForUnmatched
    2070             :  *      set up for a series of ExecScanHashTableForUnmatched calls
    2071             :  */
    2072             : void
    2073        4834 : ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
    2074             : {
    2075             :     /*----------
    2076             :      * During this scan we use the HashJoinState fields as follows:
    2077             :      *
    2078             :      * hj_CurBucketNo: next regular bucket to scan
    2079             :      * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
    2080             :      * hj_CurTuple: last tuple returned, or NULL to start next bucket
    2081             :      *----------
    2082             :      */
    2083        4834 :     hjstate->hj_CurBucketNo = 0;
    2084        4834 :     hjstate->hj_CurSkewBucketNo = 0;
    2085        4834 :     hjstate->hj_CurTuple = NULL;
    2086        4834 : }
    2087             : 
    2088             : /*
    2089             :  * Decide if this process is allowed to run the unmatched scan.  If so, the
    2090             :  * batch barrier is advanced to PHJ_BATCH_SCAN and true is returned.
    2091             :  * Otherwise the batch is detached and false is returned.
    2092             :  */
    2093             : bool
    2094          72 : ExecParallelPrepHashTableForUnmatched(HashJoinState *hjstate)
    2095             : {
    2096          72 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2097          72 :     int         curbatch = hashtable->curbatch;
    2098          72 :     ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
    2099             : 
    2100             :     Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE);
    2101             : 
    2102             :     /*
    2103             :      * It would not be deadlock-free to wait on the batch barrier, because it
    2104             :      * is in PHJ_BATCH_PROBE phase, and thus processes attached to it have
    2105             :      * already emitted tuples.  Therefore, we'll hold a wait-free election:
    2106             :      * only one process can continue to the next phase, and all others detach
    2107             :      * from this batch.  They can still go any work on other batches, if there
    2108             :      * are any.
    2109             :      */
    2110          72 :     if (!BarrierArriveAndDetachExceptLast(&batch->batch_barrier))
    2111             :     {
    2112             :         /* This process considers the batch to be done. */
    2113           6 :         hashtable->batches[hashtable->curbatch].done = true;
    2114             : 
    2115             :         /* Make sure any temporary files are closed. */
    2116           6 :         sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
    2117           6 :         sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
    2118             : 
    2119             :         /*
    2120             :          * Track largest batch we've seen, which would normally happen in
    2121             :          * ExecHashTableDetachBatch().
    2122             :          */
    2123           6 :         hashtable->spacePeak =
    2124           6 :             Max(hashtable->spacePeak,
    2125             :                 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
    2126           6 :         hashtable->curbatch = -1;
    2127           6 :         return false;
    2128             :     }
    2129             : 
    2130             :     /* Now we are alone with this batch. */
    2131             :     Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
    2132             : 
    2133             :     /*
    2134             :      * Has another process decided to give up early and command all processes
    2135             :      * to skip the unmatched scan?
    2136             :      */
    2137          66 :     if (batch->skip_unmatched)
    2138             :     {
    2139           0 :         hashtable->batches[hashtable->curbatch].done = true;
    2140           0 :         ExecHashTableDetachBatch(hashtable);
    2141           0 :         return false;
    2142             :     }
    2143             : 
    2144             :     /* Now prepare the process local state, just as for non-parallel join. */
    2145          66 :     ExecPrepHashTableForUnmatched(hjstate);
    2146             : 
    2147          66 :     return true;
    2148             : }
    2149             : 
    2150             : /*
    2151             :  * ExecScanHashTableForUnmatched
    2152             :  *      scan the hash table for unmatched inner tuples
    2153             :  *
    2154             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2155             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2156             :  * for the latter.
    2157             :  */
    2158             : bool
    2159      748538 : ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
    2160             : {
    2161      748538 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2162      748538 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2163             : 
    2164             :     for (;;)
    2165             :     {
    2166             :         /*
    2167             :          * hj_CurTuple is the address of the tuple last returned from the
    2168             :          * current bucket, or NULL if it's time to start scanning a new
    2169             :          * bucket.
    2170             :          */
    2171     6571002 :         if (hashTuple != NULL)
    2172      743770 :             hashTuple = hashTuple->next.unshared;
    2173     5827232 :         else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
    2174             :         {
    2175     5822470 :             hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
    2176     5822470 :             hjstate->hj_CurBucketNo++;
    2177             :         }
    2178        4762 :         else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
    2179             :         {
    2180           0 :             int         j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
    2181             : 
    2182           0 :             hashTuple = hashtable->skewBucket[j]->tuples;
    2183           0 :             hjstate->hj_CurSkewBucketNo++;
    2184             :         }
    2185             :         else
    2186        4762 :             break;              /* finished all buckets */
    2187             : 
    2188     6933378 :         while (hashTuple != NULL)
    2189             :         {
    2190     1110914 :             if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
    2191             :             {
    2192             :                 TupleTableSlot *inntuple;
    2193             : 
    2194             :                 /* insert hashtable's tuple into exec slot */
    2195      743776 :                 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2196             :                                                  hjstate->hj_HashTupleSlot,
    2197             :                                                  false);    /* do not pfree */
    2198      743776 :                 econtext->ecxt_innertuple = inntuple;
    2199             : 
    2200             :                 /*
    2201             :                  * Reset temp memory each time; although this function doesn't
    2202             :                  * do any qual eval, the caller will, so let's keep it
    2203             :                  * parallel to ExecScanHashBucket.
    2204             :                  */
    2205      743776 :                 ResetExprContext(econtext);
    2206             : 
    2207      743776 :                 hjstate->hj_CurTuple = hashTuple;
    2208      743776 :                 return true;
    2209             :             }
    2210             : 
    2211      367138 :             hashTuple = hashTuple->next.unshared;
    2212             :         }
    2213             : 
    2214             :         /* allow this loop to be cancellable */
    2215     5822464 :         CHECK_FOR_INTERRUPTS();
    2216             :     }
    2217             : 
    2218             :     /*
    2219             :      * no more unmatched tuples
    2220             :      */
    2221        4762 :     return false;
    2222             : }
    2223             : 
    2224             : /*
    2225             :  * ExecParallelScanHashTableForUnmatched
    2226             :  *      scan the hash table for unmatched inner tuples, in parallel join
    2227             :  *
    2228             :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2229             :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2230             :  * for the latter.
    2231             :  */
    2232             : bool
    2233      120072 : ExecParallelScanHashTableForUnmatched(HashJoinState *hjstate,
    2234             :                                       ExprContext *econtext)
    2235             : {
    2236      120072 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2237      120072 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2238             : 
    2239             :     for (;;)
    2240             :     {
    2241             :         /*
    2242             :          * hj_CurTuple is the address of the tuple last returned from the
    2243             :          * current bucket, or NULL if it's time to start scanning a new
    2244             :          * bucket.
    2245             :          */
    2246      734472 :         if (hashTuple != NULL)
    2247      120006 :             hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2248      614466 :         else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
    2249      614400 :             hashTuple = ExecParallelHashFirstTuple(hashtable,
    2250      614400 :                                                    hjstate->hj_CurBucketNo++);
    2251             :         else
    2252          66 :             break;              /* finished all buckets */
    2253             : 
    2254      974406 :         while (hashTuple != NULL)
    2255             :         {
    2256      360006 :             if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
    2257             :             {
    2258             :                 TupleTableSlot *inntuple;
    2259             : 
    2260             :                 /* insert hashtable's tuple into exec slot */
    2261      120006 :                 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2262             :                                                  hjstate->hj_HashTupleSlot,
    2263             :                                                  false);    /* do not pfree */
    2264      120006 :                 econtext->ecxt_innertuple = inntuple;
    2265             : 
    2266             :                 /*
    2267             :                  * Reset temp memory each time; although this function doesn't
    2268             :                  * do any qual eval, the caller will, so let's keep it
    2269             :                  * parallel to ExecScanHashBucket.
    2270             :                  */
    2271      120006 :                 ResetExprContext(econtext);
    2272             : 
    2273      120006 :                 hjstate->hj_CurTuple = hashTuple;
    2274      120006 :                 return true;
    2275             :             }
    2276             : 
    2277      240000 :             hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2278             :         }
    2279             : 
    2280             :         /* allow this loop to be cancellable */
    2281      614400 :         CHECK_FOR_INTERRUPTS();
    2282             :     }
    2283             : 
    2284             :     /*
    2285             :      * no more unmatched tuples
    2286             :      */
    2287          66 :     return false;
    2288             : }
    2289             : 
    2290             : /*
    2291             :  * ExecHashTableReset
    2292             :  *
    2293             :  *      reset hash table header for new batch
    2294             :  */
    2295             : void
    2296        1224 : ExecHashTableReset(HashJoinTable hashtable)
    2297             : {
    2298             :     MemoryContext oldcxt;
    2299        1224 :     int         nbuckets = hashtable->nbuckets;
    2300             : 
    2301             :     /*
    2302             :      * Release all the hash buckets and tuples acquired in the prior pass, and
    2303             :      * reinitialize the context for a new pass.
    2304             :      */
    2305        1224 :     MemoryContextReset(hashtable->batchCxt);
    2306        1224 :     oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
    2307             : 
    2308             :     /* Reallocate and reinitialize the hash bucket headers. */
    2309        1224 :     hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
    2310             : 
    2311        1224 :     hashtable->spaceUsed = 0;
    2312             : 
    2313        1224 :     MemoryContextSwitchTo(oldcxt);
    2314             : 
    2315             :     /* Forget the chunks (the memory was freed by the context reset above). */
    2316        1224 :     hashtable->chunks = NULL;
    2317        1224 : }
    2318             : 
    2319             : /*
    2320             :  * ExecHashTableResetMatchFlags
    2321             :  *      Clear all the HeapTupleHeaderHasMatch flags in the table
    2322             :  */
    2323             : void
    2324           6 : ExecHashTableResetMatchFlags(HashJoinTable hashtable)
    2325             : {
    2326             :     HashJoinTuple tuple;
    2327             :     int         i;
    2328             : 
    2329             :     /* Reset all flags in the main table ... */
    2330        6150 :     for (i = 0; i < hashtable->nbuckets; i++)
    2331             :     {
    2332        6156 :         for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
    2333          12 :              tuple = tuple->next.unshared)
    2334          12 :             HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
    2335             :     }
    2336             : 
    2337             :     /* ... and the same for the skew buckets, if any */
    2338           6 :     for (i = 0; i < hashtable->nSkewBuckets; i++)
    2339             :     {
    2340           0 :         int         j = hashtable->skewBucketNums[i];
    2341           0 :         HashSkewBucket *skewBucket = hashtable->skewBucket[j];
    2342             : 
    2343           0 :         for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
    2344           0 :             HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
    2345             :     }
    2346           6 : }
    2347             : 
    2348             : 
    2349             : void
    2350         998 : ExecReScanHash(HashState *node)
    2351             : {
    2352         998 :     PlanState  *outerPlan = outerPlanState(node);
    2353             : 
    2354             :     /*
    2355             :      * if chgParam of subnode is not null then plan will be re-scanned by
    2356             :      * first ExecProcNode.
    2357             :      */
    2358         998 :     if (outerPlan->chgParam == NULL)
    2359           0 :         ExecReScan(outerPlan);
    2360         998 : }
    2361             : 
    2362             : 
    2363             : /*
    2364             :  * ExecHashBuildSkewHash
    2365             :  *
    2366             :  *      Set up for skew optimization if we can identify the most common values
    2367             :  *      (MCVs) of the outer relation's join key.  We make a skew hash bucket
    2368             :  *      for the hash value of each MCV, up to the number of slots allowed
    2369             :  *      based on available memory.
    2370             :  */
    2371             : static void
    2372         110 : ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
    2373             : {
    2374             :     HeapTupleData *statsTuple;
    2375             :     AttStatsSlot sslot;
    2376             : 
    2377             :     /* Do nothing if planner didn't identify the outer relation's join key */
    2378         110 :     if (!OidIsValid(node->skewTable))
    2379          72 :         return;
    2380             :     /* Also, do nothing if we don't have room for at least one skew bucket */
    2381         110 :     if (mcvsToUse <= 0)
    2382           0 :         return;
    2383             : 
    2384             :     /*
    2385             :      * Try to find the MCV statistics for the outer relation's join key.
    2386             :      */
    2387         110 :     statsTuple = SearchSysCache3(STATRELATTINH,
    2388             :                                  ObjectIdGetDatum(node->skewTable),
    2389         110 :                                  Int16GetDatum(node->skewColumn),
    2390         110 :                                  BoolGetDatum(node->skewInherit));
    2391         110 :     if (!HeapTupleIsValid(statsTuple))
    2392          72 :         return;
    2393             : 
    2394          38 :     if (get_attstatsslot(&sslot, statsTuple,
    2395             :                          STATISTIC_KIND_MCV, InvalidOid,
    2396             :                          ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
    2397             :     {
    2398             :         double      frac;
    2399             :         int         nbuckets;
    2400             :         FmgrInfo   *hashfunctions;
    2401             :         int         i;
    2402             : 
    2403           6 :         if (mcvsToUse > sslot.nvalues)
    2404           0 :             mcvsToUse = sslot.nvalues;
    2405             : 
    2406             :         /*
    2407             :          * Calculate the expected fraction of outer relation that will
    2408             :          * participate in the skew optimization.  If this isn't at least
    2409             :          * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
    2410             :          */
    2411           6 :         frac = 0;
    2412         132 :         for (i = 0; i < mcvsToUse; i++)
    2413         126 :             frac += sslot.numbers[i];
    2414           6 :         if (frac < SKEW_MIN_OUTER_FRACTION)
    2415             :         {
    2416           0 :             free_attstatsslot(&sslot);
    2417           0 :             ReleaseSysCache(statsTuple);
    2418           0 :             return;
    2419             :         }
    2420             : 
    2421             :         /*
    2422             :          * Okay, set up the skew hashtable.
    2423             :          *
    2424             :          * skewBucket[] is an open addressing hashtable with a power of 2 size
    2425             :          * that is greater than the number of MCV values.  (This ensures there
    2426             :          * will be at least one null entry, so searches will always
    2427             :          * terminate.)
    2428             :          *
    2429             :          * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
    2430             :          * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
    2431             :          * since we limit pg_statistic entries to much less than that.
    2432             :          */
    2433           6 :         nbuckets = pg_nextpower2_32(mcvsToUse + 1);
    2434             :         /* use two more bits just to help avoid collisions */
    2435           6 :         nbuckets <<= 2;
    2436             : 
    2437           6 :         hashtable->skewEnabled = true;
    2438           6 :         hashtable->skewBucketLen = nbuckets;
    2439             : 
    2440             :         /*
    2441             :          * We allocate the bucket memory in the hashtable's batch context. It
    2442             :          * is only needed during the first batch, and this ensures it will be
    2443             :          * automatically removed once the first batch is done.
    2444             :          */
    2445           6 :         hashtable->skewBucket = (HashSkewBucket **)
    2446           6 :             MemoryContextAllocZero(hashtable->batchCxt,
    2447             :                                    nbuckets * sizeof(HashSkewBucket *));
    2448           6 :         hashtable->skewBucketNums = (int *)
    2449           6 :             MemoryContextAllocZero(hashtable->batchCxt,
    2450             :                                    mcvsToUse * sizeof(int));
    2451             : 
    2452           6 :         hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
    2453           6 :             + mcvsToUse * sizeof(int);
    2454           6 :         hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
    2455           6 :             + mcvsToUse * sizeof(int);
    2456           6 :         if (hashtable->spaceUsed > hashtable->spacePeak)
    2457           6 :             hashtable->spacePeak = hashtable->spaceUsed;
    2458             : 
    2459             :         /*
    2460             :          * Create a skew bucket for each MCV hash value.
    2461             :          *
    2462             :          * Note: it is very important that we create the buckets in order of
    2463             :          * decreasing MCV frequency.  If we have to remove some buckets, they
    2464             :          * must be removed in reverse order of creation (see notes in
    2465             :          * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
    2466             :          * be removed first.
    2467             :          */
    2468           6 :         hashfunctions = hashtable->outer_hashfunctions;
    2469             : 
    2470         132 :         for (i = 0; i < mcvsToUse; i++)
    2471             :         {
    2472             :             uint32      hashvalue;
    2473             :             int         bucket;
    2474             : 
    2475         126 :             hashvalue = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[0],
    2476         126 :                                                          hashtable->collations[0],
    2477         126 :                                                          sslot.values[i]));
    2478             : 
    2479             :             /*
    2480             :              * While we have not hit a hole in the hashtable and have not hit
    2481             :              * the desired bucket, we have collided with some previous hash
    2482             :              * value, so try the next bucket location.  NB: this code must
    2483             :              * match ExecHashGetSkewBucket.
    2484             :              */
    2485         126 :             bucket = hashvalue & (nbuckets - 1);
    2486         126 :             while (hashtable->skewBucket[bucket] != NULL &&
    2487           0 :                    hashtable->skewBucket[bucket]->hashvalue != hashvalue)
    2488           0 :                 bucket = (bucket + 1) & (nbuckets - 1);
    2489             : 
    2490             :             /*
    2491             :              * If we found an existing bucket with the same hashvalue, leave
    2492             :              * it alone.  It's okay for two MCVs to share a hashvalue.
    2493             :              */
    2494         126 :             if (hashtable->skewBucket[bucket] != NULL)
    2495           0 :                 continue;
    2496             : 
    2497             :             /* Okay, create a new skew bucket for this hashvalue. */
    2498         252 :             hashtable->skewBucket[bucket] = (HashSkewBucket *)
    2499         126 :                 MemoryContextAlloc(hashtable->batchCxt,
    2500             :                                    sizeof(HashSkewBucket));
    2501         126 :             hashtable->skewBucket[bucket]->hashvalue = hashvalue;
    2502         126 :             hashtable->skewBucket[bucket]->tuples = NULL;
    2503         126 :             hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
    2504         126 :             hashtable->nSkewBuckets++;
    2505         126 :             hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
    2506         126 :             hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
    2507         126 :             if (hashtable->spaceUsed > hashtable->spacePeak)
    2508         126 :                 hashtable->spacePeak = hashtable->spaceUsed;
    2509             :         }
    2510             : 
    2511           6 :         free_attstatsslot(&sslot);
    2512             :     }
    2513             : 
    2514          38 :     ReleaseSysCache(statsTuple);
    2515             : }
    2516             : 
    2517             : /*
    2518             :  * ExecHashGetSkewBucket
    2519             :  *
    2520             :  *      Returns the index of the skew bucket for this hashvalue,
    2521             :  *      or INVALID_SKEW_BUCKET_NO if the hashvalue is not
    2522             :  *      associated with any active skew bucket.
    2523             :  */
    2524             : int
    2525    25908786 : ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
    2526             : {
    2527             :     int         bucket;
    2528             : 
    2529             :     /*
    2530             :      * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
    2531             :      * particular, this happens after the initial batch is done).
    2532             :      */
    2533    25908786 :     if (!hashtable->skewEnabled)
    2534    25788786 :         return INVALID_SKEW_BUCKET_NO;
    2535             : 
    2536             :     /*
    2537             :      * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
    2538             :      */
    2539      120000 :     bucket = hashvalue & (hashtable->skewBucketLen - 1);
    2540             : 
    2541             :     /*
    2542             :      * While we have not hit a hole in the hashtable and have not hit the
    2543             :      * desired bucket, we have collided with some other hash value, so try the
    2544             :      * next bucket location.
    2545             :      */
    2546      127830 :     while (hashtable->skewBucket[bucket] != NULL &&
    2547       10818 :            hashtable->skewBucket[bucket]->hashvalue != hashvalue)
    2548        7830 :         bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
    2549             : 
    2550             :     /*
    2551             :      * Found the desired bucket?
    2552             :      */
    2553      120000 :     if (hashtable->skewBucket[bucket] != NULL)
    2554        2988 :         return bucket;
    2555             : 
    2556             :     /*
    2557             :      * There must not be any hashtable entry for this hash value.
    2558             :      */
    2559      117012 :     return INVALID_SKEW_BUCKET_NO;
    2560             : }
    2561             : 
    2562             : /*
    2563             :  * ExecHashSkewTableInsert
    2564             :  *
    2565             :  *      Insert a tuple into the skew hashtable.
    2566             :  *
    2567             :  * This should generally match up with the current-batch case in
    2568             :  * ExecHashTableInsert.
    2569             :  */
    2570             : static void
    2571         588 : ExecHashSkewTableInsert(HashJoinTable hashtable,
    2572             :                         TupleTableSlot *slot,
    2573             :                         uint32 hashvalue,
    2574             :                         int bucketNumber)
    2575             : {
    2576             :     bool        shouldFree;
    2577         588 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    2578             :     HashJoinTuple hashTuple;
    2579             :     int         hashTupleSize;
    2580             : 
    2581             :     /* Create the HashJoinTuple */
    2582         588 :     hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    2583         588 :     hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
    2584             :                                                    hashTupleSize);
    2585         588 :     hashTuple->hashvalue = hashvalue;
    2586         588 :     memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    2587         588 :     HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    2588             : 
    2589             :     /* Push it onto the front of the skew bucket's list */
    2590         588 :     hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
    2591         588 :     hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
    2592             :     Assert(hashTuple != hashTuple->next.unshared);
    2593             : 
    2594             :     /* Account for space used, and back off if we've used too much */
    2595         588 :     hashtable->spaceUsed += hashTupleSize;
    2596         588 :     hashtable->spaceUsedSkew += hashTupleSize;
    2597         588 :     if (hashtable->spaceUsed > hashtable->spacePeak)
    2598         432 :         hashtable->spacePeak = hashtable->spaceUsed;
    2599         690 :     while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
    2600         102 :         ExecHashRemoveNextSkewBucket(hashtable);
    2601             : 
    2602             :     /* Check we are not over the total spaceAllowed, either */
    2603         588 :     if (hashtable->spaceUsed > hashtable->spaceAllowed)
    2604           0 :         ExecHashIncreaseNumBatches(hashtable);
    2605             : 
    2606         588 :     if (shouldFree)
    2607         588 :         heap_free_minimal_tuple(tuple);
    2608         588 : }
    2609             : 
    2610             : /*
    2611             :  *      ExecHashRemoveNextSkewBucket
    2612             :  *
    2613             :  *      Remove the least valuable skew bucket by pushing its tuples into
    2614             :  *      the main hash table.
    2615             :  */
    2616             : static void
    2617         102 : ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
    2618             : {
    2619             :     int         bucketToRemove;
    2620             :     HashSkewBucket *bucket;
    2621             :     uint32      hashvalue;
    2622             :     int         bucketno;
    2623             :     int         batchno;
    2624             :     HashJoinTuple hashTuple;
    2625             : 
    2626             :     /* Locate the bucket to remove */
    2627         102 :     bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
    2628         102 :     bucket = hashtable->skewBucket[bucketToRemove];
    2629             : 
    2630             :     /*
    2631             :      * Calculate which bucket and batch the tuples belong to in the main
    2632             :      * hashtable.  They all have the same hash value, so it's the same for all
    2633             :      * of them.  Also note that it's not possible for nbatch to increase while
    2634             :      * we are processing the tuples.
    2635             :      */
    2636         102 :     hashvalue = bucket->hashvalue;
    2637         102 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    2638             : 
    2639             :     /* Process all tuples in the bucket */
    2640         102 :     hashTuple = bucket->tuples;
    2641         450 :     while (hashTuple != NULL)
    2642             :     {
    2643         348 :         HashJoinTuple nextHashTuple = hashTuple->next.unshared;
    2644             :         MinimalTuple tuple;
    2645             :         Size        tupleSize;
    2646             : 
    2647             :         /*
    2648             :          * This code must agree with ExecHashTableInsert.  We do not use
    2649             :          * ExecHashTableInsert directly as ExecHashTableInsert expects a
    2650             :          * TupleTableSlot while we already have HashJoinTuples.
    2651             :          */
    2652         348 :         tuple = HJTUPLE_MINTUPLE(hashTuple);
    2653         348 :         tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    2654             : 
    2655             :         /* Decide whether to put the tuple in the hash table or a temp file */
    2656         348 :         if (batchno == hashtable->curbatch)
    2657             :         {
    2658             :             /* Move the tuple to the main hash table */
    2659             :             HashJoinTuple copyTuple;
    2660             : 
    2661             :             /*
    2662             :              * We must copy the tuple into the dense storage, else it will not
    2663             :              * be found by, eg, ExecHashIncreaseNumBatches.
    2664             :              */
    2665         138 :             copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
    2666         138 :             memcpy(copyTuple, hashTuple, tupleSize);
    2667         138 :             pfree(hashTuple);
    2668             : 
    2669         138 :             copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    2670         138 :             hashtable->buckets.unshared[bucketno] = copyTuple;
    2671             : 
    2672             :             /* We have reduced skew space, but overall space doesn't change */
    2673         138 :             hashtable->spaceUsedSkew -= tupleSize;
    2674             :         }
    2675             :         else
    2676             :         {
    2677             :             /* Put the tuple into a temp file for later batches */
    2678             :             Assert(batchno > hashtable->curbatch);
    2679         210 :             ExecHashJoinSaveTuple(tuple, hashvalue,
    2680         210 :                                   &hashtable->innerBatchFile[batchno],
    2681             :                                   hashtable);
    2682         210 :             pfree(hashTuple);
    2683         210 :             hashtable->spaceUsed -= tupleSize;
    2684         210 :             hashtable->spaceUsedSkew -= tupleSize;
    2685             :         }
    2686             : 
    2687         348 :         hashTuple = nextHashTuple;
    2688             : 
    2689             :         /* allow this loop to be cancellable */
    2690         348 :         CHECK_FOR_INTERRUPTS();
    2691             :     }
    2692             : 
    2693             :     /*
    2694             :      * Free the bucket struct itself and reset the hashtable entry to NULL.
    2695             :      *
    2696             :      * NOTE: this is not nearly as simple as it looks on the surface, because
    2697             :      * of the possibility of collisions in the hashtable.  Suppose that hash
    2698             :      * values A and B collide at a particular hashtable entry, and that A was
    2699             :      * entered first so B gets shifted to a different table entry.  If we were
    2700             :      * to remove A first then ExecHashGetSkewBucket would mistakenly start
    2701             :      * reporting that B is not in the hashtable, because it would hit the NULL
    2702             :      * before finding B.  However, we always remove entries in the reverse
    2703             :      * order of creation, so this failure cannot happen.
    2704             :      */
    2705         102 :     hashtable->skewBucket[bucketToRemove] = NULL;
    2706         102 :     hashtable->nSkewBuckets--;
    2707         102 :     pfree(bucket);
    2708         102 :     hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
    2709         102 :     hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
    2710             : 
    2711             :     /*
    2712             :      * If we have removed all skew buckets then give up on skew optimization.
    2713             :      * Release the arrays since they aren't useful any more.
    2714             :      */
    2715         102 :     if (hashtable->nSkewBuckets == 0)
    2716             :     {
    2717           0 :         hashtable->skewEnabled = false;
    2718           0 :         pfree(hashtable->skewBucket);
    2719           0 :         pfree(hashtable->skewBucketNums);
    2720           0 :         hashtable->skewBucket = NULL;
    2721           0 :         hashtable->skewBucketNums = NULL;
    2722           0 :         hashtable->spaceUsed -= hashtable->spaceUsedSkew;
    2723           0 :         hashtable->spaceUsedSkew = 0;
    2724             :     }
    2725         102 : }
    2726             : 
    2727             : /*
    2728             :  * Reserve space in the DSM segment for instrumentation data.
    2729             :  */
    2730             : void
    2731         192 : ExecHashEstimate(HashState *node, ParallelContext *pcxt)
    2732             : {
    2733             :     size_t      size;
    2734             : 
    2735             :     /* don't need this if not instrumenting or no workers */
    2736         192 :     if (!node->ps.instrument || pcxt->nworkers == 0)
    2737         108 :         return;
    2738             : 
    2739          84 :     size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
    2740          84 :     size = add_size(size, offsetof(SharedHashInfo, hinstrument));
    2741          84 :     shm_toc_estimate_chunk(&pcxt->estimator, size);
    2742          84 :     shm_toc_estimate_keys(&pcxt->estimator, 1);
    2743             : }
    2744             : 
    2745             : /*
    2746             :  * Set up a space in the DSM for all workers to record instrumentation data
    2747             :  * about their hash table.
    2748             :  */
    2749             : void
    2750         192 : ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
    2751             : {
    2752             :     size_t      size;
    2753             : 
    2754             :     /* don't need this if not instrumenting or no workers */
    2755         192 :     if (!node->ps.instrument || pcxt->nworkers == 0)
    2756         108 :         return;
    2757             : 
    2758          84 :     size = offsetof(SharedHashInfo, hinstrument) +
    2759          84 :         pcxt->nworkers * sizeof(HashInstrumentation);
    2760          84 :     node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
    2761             : 
    2762             :     /* Each per-worker area must start out as zeroes. */
    2763          84 :     memset(node->shared_info, 0, size);
    2764             : 
    2765          84 :     node->shared_info->num_workers = pcxt->nworkers;
    2766          84 :     shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
    2767          84 :                    node->shared_info);
    2768             : }
    2769             : 
    2770             : /*
    2771             :  * Locate the DSM space for hash table instrumentation data that we'll write
    2772             :  * to at shutdown time.
    2773             :  */
    2774             : void
    2775         548 : ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
    2776             : {
    2777             :     SharedHashInfo *shared_info;
    2778             : 
    2779             :     /* don't need this if not instrumenting */
    2780         548 :     if (!node->ps.instrument)
    2781         296 :         return;
    2782             : 
    2783             :     /*
    2784             :      * Find our entry in the shared area, and set up a pointer to it so that
    2785             :      * we'll accumulate stats there when shutting down or rebuilding the hash
    2786             :      * table.
    2787             :      */
    2788             :     shared_info = (SharedHashInfo *)
    2789         252 :         shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
    2790         252 :     node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
    2791             : }
    2792             : 
    2793             : /*
    2794             :  * Collect EXPLAIN stats if needed, saving them into DSM memory if
    2795             :  * ExecHashInitializeWorker was called, or local storage if not.  In the
    2796             :  * parallel case, this must be done in ExecShutdownHash() rather than
    2797             :  * ExecEndHash() because the latter runs after we've detached from the DSM
    2798             :  * segment.
    2799             :  */
    2800             : void
    2801       24862 : ExecShutdownHash(HashState *node)
    2802             : {
    2803             :     /* Allocate save space if EXPLAIN'ing and we didn't do so already */
    2804       24862 :     if (node->ps.instrument && !node->hinstrument)
    2805         108 :         node->hinstrument = palloc0_object(HashInstrumentation);
    2806             :     /* Now accumulate data for the current (final) hash table */
    2807       24862 :     if (node->hinstrument && node->hashtable)
    2808         294 :         ExecHashAccumInstrumentation(node->hinstrument, node->hashtable);
    2809       24862 : }
    2810             : 
    2811             : /*
    2812             :  * Retrieve instrumentation data from workers before the DSM segment is
    2813             :  * detached, so that EXPLAIN can access it.
    2814             :  */
    2815             : void
    2816          84 : ExecHashRetrieveInstrumentation(HashState *node)
    2817             : {
    2818          84 :     SharedHashInfo *shared_info = node->shared_info;
    2819             :     size_t      size;
    2820             : 
    2821          84 :     if (shared_info == NULL)
    2822           0 :         return;
    2823             : 
    2824             :     /* Replace node->shared_info with a copy in backend-local memory. */
    2825          84 :     size = offsetof(SharedHashInfo, hinstrument) +
    2826          84 :         shared_info->num_workers * sizeof(HashInstrumentation);
    2827          84 :     node->shared_info = palloc(size);
    2828          84 :     memcpy(node->shared_info, shared_info, size);
    2829             : }
    2830             : 
    2831             : /*
    2832             :  * Accumulate instrumentation data from 'hashtable' into an
    2833             :  * initially-zeroed HashInstrumentation struct.
    2834             :  *
    2835             :  * This is used to merge information across successive hash table instances
    2836             :  * within a single plan node.  We take the maximum values of each interesting
    2837             :  * number.  The largest nbuckets and largest nbatch values might have occurred
    2838             :  * in different instances, so there's some risk of confusion from reporting
    2839             :  * unrelated numbers; but there's a bigger risk of misdiagnosing a performance
    2840             :  * issue if we don't report the largest values.  Similarly, we want to report
    2841             :  * the largest spacePeak regardless of whether it happened in the same
    2842             :  * instance as the largest nbuckets or nbatch.  All the instances should have
    2843             :  * the same nbuckets_original and nbatch_original; but there's little value
    2844             :  * in depending on that here, so handle them the same way.
    2845             :  */
    2846             : void
    2847         294 : ExecHashAccumInstrumentation(HashInstrumentation *instrument,
    2848             :                              HashJoinTable hashtable)
    2849             : {
    2850         294 :     instrument->nbuckets = Max(instrument->nbuckets,
    2851             :                                hashtable->nbuckets);
    2852         294 :     instrument->nbuckets_original = Max(instrument->nbuckets_original,
    2853             :                                         hashtable->nbuckets_original);
    2854         294 :     instrument->nbatch = Max(instrument->nbatch,
    2855             :                              hashtable->nbatch);
    2856         294 :     instrument->nbatch_original = Max(instrument->nbatch_original,
    2857             :                                       hashtable->nbatch_original);
    2858         294 :     instrument->space_peak = Max(instrument->space_peak,
    2859             :                                  hashtable->spacePeak);
    2860         294 : }
    2861             : 
    2862             : /*
    2863             :  * Allocate 'size' bytes from the currently active HashMemoryChunk
    2864             :  */
    2865             : static void *
    2866     9567332 : dense_alloc(HashJoinTable hashtable, Size size)
    2867             : {
    2868             :     HashMemoryChunk newChunk;
    2869             :     char       *ptr;
    2870             : 
    2871             :     /* just in case the size is not already aligned properly */
    2872     9567332 :     size = MAXALIGN(size);
    2873             : 
    2874             :     /*
    2875             :      * If tuple size is larger than threshold, allocate a separate chunk.
    2876             :      */
    2877     9567332 :     if (size > HASH_CHUNK_THRESHOLD)
    2878             :     {
    2879             :         /* allocate new chunk and put it at the beginning of the list */
    2880           0 :         newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
    2881             :                                                         HASH_CHUNK_HEADER_SIZE + size);
    2882           0 :         newChunk->maxlen = size;
    2883           0 :         newChunk->used = size;
    2884           0 :         newChunk->ntuples = 1;
    2885             : 
    2886             :         /*
    2887             :          * Add this chunk to the list after the first existing chunk, so that
    2888             :          * we don't lose the remaining space in the "current" chunk.
    2889             :          */
    2890           0 :         if (hashtable->chunks != NULL)
    2891             :         {
    2892           0 :             newChunk->next = hashtable->chunks->next;
    2893           0 :             hashtable->chunks->next.unshared = newChunk;
    2894             :         }
    2895             :         else
    2896             :         {
    2897           0 :             newChunk->next.unshared = hashtable->chunks;
    2898           0 :             hashtable->chunks = newChunk;
    2899             :         }
    2900             : 
    2901           0 :         return HASH_CHUNK_DATA(newChunk);
    2902             :     }
    2903             : 
    2904             :     /*
    2905             :      * See if we have enough space for it in the current chunk (if any). If
    2906             :      * not, allocate a fresh chunk.
    2907             :      */
    2908     9567332 :     if ((hashtable->chunks == NULL) ||
    2909     9548190 :         (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
    2910             :     {
    2911             :         /* allocate new chunk and put it at the beginning of the list */
    2912       31660 :         newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
    2913             :                                                         HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE);
    2914             : 
    2915       31660 :         newChunk->maxlen = HASH_CHUNK_SIZE;
    2916       31660 :         newChunk->used = size;
    2917       31660 :         newChunk->ntuples = 1;
    2918             : 
    2919       31660 :         newChunk->next.unshared = hashtable->chunks;
    2920       31660 :         hashtable->chunks = newChunk;
    2921             : 
    2922       31660 :         return HASH_CHUNK_DATA(newChunk);
    2923             :     }
    2924             : 
    2925             :     /* There is enough space in the current chunk, let's add the tuple */
    2926     9535672 :     ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
    2927     9535672 :     hashtable->chunks->used += size;
    2928     9535672 :     hashtable->chunks->ntuples += 1;
    2929             : 
    2930             :     /* return pointer to the start of the tuple memory */
    2931     9535672 :     return ptr;
    2932             : }
    2933             : 
    2934             : /*
    2935             :  * Allocate space for a tuple in shared dense storage.  This is equivalent to
    2936             :  * dense_alloc but for Parallel Hash using shared memory.
    2937             :  *
    2938             :  * While loading a tuple into shared memory, we might run out of memory and
    2939             :  * decide to repartition, or determine that the load factor is too high and
    2940             :  * decide to expand the bucket array, or discover that another participant has
    2941             :  * commanded us to help do that.  Return NULL if number of buckets or batches
    2942             :  * has changed, indicating that the caller must retry (considering the
    2943             :  * possibility that the tuple no longer belongs in the same batch).
    2944             :  */
    2945             : static HashJoinTuple
    2946     2382448 : ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
    2947             :                            dsa_pointer *shared)
    2948             : {
    2949     2382448 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    2950             :     dsa_pointer chunk_shared;
    2951             :     HashMemoryChunk chunk;
    2952             :     Size        chunk_size;
    2953             :     HashJoinTuple result;
    2954     2382448 :     int         curbatch = hashtable->curbatch;
    2955             : 
    2956     2382448 :     size = MAXALIGN(size);
    2957             : 
    2958             :     /*
    2959             :      * Fast path: if there is enough space in this backend's current chunk,
    2960             :      * then we can allocate without any locking.
    2961             :      */
    2962     2382448 :     chunk = hashtable->current_chunk;
    2963     2382448 :     if (chunk != NULL &&
    2964     2381468 :         size <= HASH_CHUNK_THRESHOLD &&
    2965     2381468 :         chunk->maxlen - chunk->used >= size)
    2966             :     {
    2967             : 
    2968     2378700 :         chunk_shared = hashtable->current_chunk_shared;
    2969             :         Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
    2970     2378700 :         *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
    2971     2378700 :         result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
    2972     2378700 :         chunk->used += size;
    2973             : 
    2974             :         Assert(chunk->used <= chunk->maxlen);
    2975             :         Assert(result == dsa_get_address(hashtable->area, *shared));
    2976             : 
    2977     2378700 :         return result;
    2978             :     }
    2979             : 
    2980             :     /* Slow path: try to allocate a new chunk. */
    2981        3748 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    2982             : 
    2983             :     /*
    2984             :      * Check if we need to help increase the number of buckets or batches.
    2985             :      */
    2986        3748 :     if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
    2987        3712 :         pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    2988             :     {
    2989         180 :         ParallelHashGrowth growth = pstate->growth;
    2990             : 
    2991         180 :         hashtable->current_chunk = NULL;
    2992         180 :         LWLockRelease(&pstate->lock);
    2993             : 
    2994             :         /* Another participant has commanded us to help grow. */
    2995         180 :         if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
    2996          36 :             ExecParallelHashIncreaseNumBatches(hashtable);
    2997         144 :         else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    2998         144 :             ExecParallelHashIncreaseNumBuckets(hashtable);
    2999             : 
    3000             :         /* The caller must retry. */
    3001         180 :         return NULL;
    3002             :     }
    3003             : 
    3004             :     /* Oversized tuples get their own chunk. */
    3005        3568 :     if (size > HASH_CHUNK_THRESHOLD)
    3006          48 :         chunk_size = size + HASH_CHUNK_HEADER_SIZE;
    3007             :     else
    3008        3520 :         chunk_size = HASH_CHUNK_SIZE;
    3009             : 
    3010             :     /* Check if it's time to grow batches or buckets. */
    3011        3568 :     if (pstate->growth != PHJ_GROWTH_DISABLED)
    3012             :     {
    3013             :         Assert(curbatch == 0);
    3014             :         Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    3015             : 
    3016             :         /*
    3017             :          * Check if our space limit would be exceeded.  To avoid choking on
    3018             :          * very large tuples or very low hash_mem setting, we'll always allow
    3019             :          * each backend to allocate at least one chunk.
    3020             :          */
    3021        1820 :         if (hashtable->batches[0].at_least_one_chunk &&
    3022        1442 :             hashtable->batches[0].shared->size +
    3023        1442 :             chunk_size > pstate->space_allowed)
    3024             :         {
    3025          36 :             pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    3026          36 :             hashtable->batches[0].shared->space_exhausted = true;
    3027          36 :             LWLockRelease(&pstate->lock);
    3028             : 
    3029          36 :             return NULL;
    3030             :         }
    3031             : 
    3032             :         /* Check if our load factor limit would be exceeded. */
    3033        1784 :         if (hashtable->nbatch == 1)
    3034             :         {
    3035        1556 :             hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
    3036        1556 :             hashtable->batches[0].ntuples = 0;
    3037             :             /* Guard against integer overflow and alloc size overflow */
    3038        1556 :             if (hashtable->batches[0].shared->ntuples + 1 >
    3039        1556 :                 hashtable->nbuckets * NTUP_PER_BUCKET &&
    3040         108 :                 hashtable->nbuckets < (INT_MAX / 2) &&
    3041         108 :                 hashtable->nbuckets * 2 <=
    3042             :                 MaxAllocSize / sizeof(dsa_pointer_atomic))
    3043             :             {
    3044         108 :                 pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS;
    3045         108 :                 LWLockRelease(&pstate->lock);
    3046             : 
    3047         108 :                 return NULL;
    3048             :             }
    3049             :         }
    3050             :     }
    3051             : 
    3052             :     /* We are cleared to allocate a new chunk. */
    3053        3424 :     chunk_shared = dsa_allocate(hashtable->area, chunk_size);
    3054        3424 :     hashtable->batches[curbatch].shared->size += chunk_size;
    3055        3424 :     hashtable->batches[curbatch].at_least_one_chunk = true;
    3056             : 
    3057             :     /* Set up the chunk. */
    3058        3424 :     chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
    3059        3424 :     *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
    3060        3424 :     chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
    3061        3424 :     chunk->used = size;
    3062             : 
    3063             :     /*
    3064             :      * Push it onto the list of chunks, so that it can be found if we need to
    3065             :      * increase the number of buckets or batches (batch 0 only) and later for
    3066             :      * freeing the memory (all batches).
    3067             :      */
    3068        3424 :     chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
    3069        3424 :     hashtable->batches[curbatch].shared->chunks = chunk_shared;
    3070             : 
    3071        3424 :     if (size <= HASH_CHUNK_THRESHOLD)
    3072             :     {
    3073             :         /*
    3074             :          * Make this the current chunk so that we can use the fast path to
    3075             :          * fill the rest of it up in future calls.
    3076             :          */
    3077        3388 :         hashtable->current_chunk = chunk;
    3078        3388 :         hashtable->current_chunk_shared = chunk_shared;
    3079             :     }
    3080        3424 :     LWLockRelease(&pstate->lock);
    3081             : 
    3082             :     Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
    3083        3424 :     result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
    3084             : 
    3085        3424 :     return result;
    3086             : }
    3087             : 
    3088             : /*
    3089             :  * One backend needs to set up the shared batch state including tuplestores.
    3090             :  * Other backends will ensure they have correctly configured accessors by
    3091             :  * called ExecParallelHashEnsureBatchAccessors().
    3092             :  */
    3093             : static void
    3094         218 : ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
    3095             : {
    3096         218 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3097             :     ParallelHashJoinBatch *batches;
    3098             :     MemoryContext oldcxt;
    3099             :     int         i;
    3100             : 
    3101             :     Assert(hashtable->batches == NULL);
    3102             : 
    3103             :     /* Allocate space. */
    3104         218 :     pstate->batches =
    3105         218 :         dsa_allocate0(hashtable->area,
    3106             :                       EstimateParallelHashJoinBatch(hashtable) * nbatch);
    3107         218 :     pstate->nbatch = nbatch;
    3108         218 :     batches = dsa_get_address(hashtable->area, pstate->batches);
    3109             : 
    3110             :     /*
    3111             :      * Use hash join spill memory context to allocate accessors, including
    3112             :      * buffers for the temporary files.
    3113             :      */
    3114         218 :     oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
    3115             : 
    3116             :     /* Allocate this backend's accessor array. */
    3117         218 :     hashtable->nbatch = nbatch;
    3118         218 :     hashtable->batches =
    3119         218 :         palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
    3120             : 
    3121             :     /* Set up the shared state, tuplestores and backend-local accessors. */
    3122        1084 :     for (i = 0; i < hashtable->nbatch; ++i)
    3123             :     {
    3124         866 :         ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
    3125         866 :         ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
    3126             :         char        name[MAXPGPATH];
    3127             : 
    3128             :         /*
    3129             :          * All members of shared were zero-initialized.  We just need to set
    3130             :          * up the Barrier.
    3131             :          */
    3132         866 :         BarrierInit(&shared->batch_barrier, 0);
    3133         866 :         if (i == 0)
    3134             :         {
    3135             :             /* Batch 0 doesn't need to be loaded. */
    3136         218 :             BarrierAttach(&shared->batch_barrier);
    3137         872 :             while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBE)
    3138         654 :                 BarrierArriveAndWait(&shared->batch_barrier, 0);
    3139         218 :             BarrierDetach(&shared->batch_barrier);
    3140             :         }
    3141             : 
    3142             :         /* Initialize accessor state.  All members were zero-initialized. */
    3143         866 :         accessor->shared = shared;
    3144             : 
    3145             :         /* Initialize the shared tuplestores. */
    3146         866 :         snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
    3147         866 :         accessor->inner_tuples =
    3148         866 :             sts_initialize(ParallelHashJoinBatchInner(shared),
    3149             :                            pstate->nparticipants,
    3150             :                            ParallelWorkerNumber + 1,
    3151             :                            sizeof(uint32),
    3152             :                            SHARED_TUPLESTORE_SINGLE_PASS,
    3153             :                            &pstate->fileset,
    3154             :                            name);
    3155         866 :         snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
    3156         866 :         accessor->outer_tuples =
    3157         866 :             sts_initialize(ParallelHashJoinBatchOuter(shared,
    3158             :                                                       pstate->nparticipants),
    3159             :                            pstate->nparticipants,
    3160             :                            ParallelWorkerNumber + 1,
    3161             :                            sizeof(uint32),
    3162             :                            SHARED_TUPLESTORE_SINGLE_PASS,
    3163             :                            &pstate->fileset,
    3164             :                            name);
    3165             :     }
    3166             : 
    3167         218 :     MemoryContextSwitchTo(oldcxt);
    3168         218 : }
    3169             : 
    3170             : /*
    3171             :  * Free the current set of ParallelHashJoinBatchAccessor objects.
    3172             :  */
    3173             : static void
    3174          52 : ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
    3175             : {
    3176             :     int         i;
    3177             : 
    3178         168 :     for (i = 0; i < hashtable->nbatch; ++i)
    3179             :     {
    3180             :         /* Make sure no files are left open. */
    3181         116 :         sts_end_write(hashtable->batches[i].inner_tuples);
    3182         116 :         sts_end_write(hashtable->batches[i].outer_tuples);
    3183         116 :         sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
    3184         116 :         sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
    3185             :     }
    3186          52 :     pfree(hashtable->batches);
    3187          52 :     hashtable->batches = NULL;
    3188          52 : }
    3189             : 
    3190             : /*
    3191             :  * Make sure this backend has up-to-date accessors for the current set of
    3192             :  * batches.
    3193             :  */
    3194             : static void
    3195         886 : ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
    3196             : {
    3197         886 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3198             :     ParallelHashJoinBatch *batches;
    3199             :     MemoryContext oldcxt;
    3200             :     int         i;
    3201             : 
    3202         886 :     if (hashtable->batches != NULL)
    3203             :     {
    3204         654 :         if (hashtable->nbatch == pstate->nbatch)
    3205         654 :             return;
    3206           0 :         ExecParallelHashCloseBatchAccessors(hashtable);
    3207             :     }
    3208             : 
    3209             :     /*
    3210             :      * We should never see a state where the batch-tracking array is freed,
    3211             :      * because we should have given up sooner if we join when the build
    3212             :      * barrier has reached the PHJ_BUILD_FREE phase.
    3213             :      */
    3214             :     Assert(DsaPointerIsValid(pstate->batches));
    3215             : 
    3216             :     /*
    3217             :      * Use hash join spill memory context to allocate accessors, including
    3218             :      * buffers for the temporary files.
    3219             :      */
    3220         232 :     oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
    3221             : 
    3222             :     /* Allocate this backend's accessor array. */
    3223         232 :     hashtable->nbatch = pstate->nbatch;
    3224         232 :     hashtable->batches =
    3225         232 :         palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
    3226             : 
    3227             :     /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
    3228             :     batches = (ParallelHashJoinBatch *)
    3229         232 :         dsa_get_address(hashtable->area, pstate->batches);
    3230             : 
    3231             :     /* Set up the accessor array and attach to the tuplestores. */
    3232        1286 :     for (i = 0; i < hashtable->nbatch; ++i)
    3233             :     {
    3234        1054 :         ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
    3235        1054 :         ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
    3236             : 
    3237        1054 :         accessor->shared = shared;
    3238        1054 :         accessor->preallocated = 0;
    3239        1054 :         accessor->done = false;
    3240        1054 :         accessor->outer_eof = false;
    3241        1054 :         accessor->inner_tuples =
    3242        1054 :             sts_attach(ParallelHashJoinBatchInner(shared),
    3243             :                        ParallelWorkerNumber + 1,
    3244             :                        &pstate->fileset);
    3245        1054 :         accessor->outer_tuples =
    3246        1054 :             sts_attach(ParallelHashJoinBatchOuter(shared,
    3247             :                                                   pstate->nparticipants),
    3248             :                        ParallelWorkerNumber + 1,
    3249             :                        &pstate->fileset);
    3250             :     }
    3251             : 
    3252         232 :     MemoryContextSwitchTo(oldcxt);
    3253             : }
    3254             : 
    3255             : /*
    3256             :  * Allocate an empty shared memory hash table for a given batch.
    3257             :  */
    3258             : void
    3259         766 : ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
    3260             : {
    3261         766 :     ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
    3262             :     dsa_pointer_atomic *buckets;
    3263         766 :     int         nbuckets = hashtable->parallel_state->nbuckets;
    3264             :     int         i;
    3265             : 
    3266         766 :     batch->buckets =
    3267         766 :         dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
    3268             :     buckets = (dsa_pointer_atomic *)
    3269         766 :         dsa_get_address(hashtable->area, batch->buckets);
    3270     3148542 :     for (i = 0; i < nbuckets; ++i)
    3271     3147776 :         dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    3272         766 : }
    3273             : 
    3274             : /*
    3275             :  * If we are currently attached to a shared hash join batch, detach.  If we
    3276             :  * are last to detach, clean up.
    3277             :  */
    3278             : void
    3279       19338 : ExecHashTableDetachBatch(HashJoinTable hashtable)
    3280             : {
    3281       19338 :     if (hashtable->parallel_state != NULL &&
    3282        1274 :         hashtable->curbatch >= 0)
    3283             :     {
    3284         876 :         int         curbatch = hashtable->curbatch;
    3285         876 :         ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
    3286         876 :         bool        attached = true;
    3287             : 
    3288             :         /* Make sure any temporary files are closed. */
    3289         876 :         sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
    3290         876 :         sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
    3291             : 
    3292             :         /* After attaching we always get at least to PHJ_BATCH_PROBE. */
    3293             :         Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE ||
    3294             :                BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
    3295             : 
    3296             :         /*
    3297             :          * If we're abandoning the PHJ_BATCH_PROBE phase early without having
    3298             :          * reached the end of it, it means the plan doesn't want any more
    3299             :          * tuples, and it is happy to abandon any tuples buffered in this
    3300             :          * process's subplans.  For correctness, we can't allow any process to
    3301             :          * execute the PHJ_BATCH_SCAN phase, because we will never have the
    3302             :          * complete set of match bits.  Therefore we skip emitting unmatched
    3303             :          * tuples in all backends (if this is a full/right join), as if those
    3304             :          * tuples were all due to be emitted by this process and it has
    3305             :          * abandoned them too.
    3306             :          */
    3307         876 :         if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE &&
    3308         810 :             !hashtable->batches[curbatch].outer_eof)
    3309             :         {
    3310             :             /*
    3311             :              * This flag may be written to by multiple backends during
    3312             :              * PHJ_BATCH_PROBE phase, but will only be read in PHJ_BATCH_SCAN
    3313             :              * phase so requires no extra locking.
    3314             :              */
    3315           0 :             batch->skip_unmatched = true;
    3316             :         }
    3317             : 
    3318             :         /*
    3319             :          * Even if we aren't doing a full/right outer join, we'll step through
    3320             :          * the PHJ_BATCH_SCAN phase just to maintain the invariant that
    3321             :          * freeing happens in PHJ_BATCH_FREE, but that'll be wait-free.
    3322             :          */
    3323         876 :         if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE)
    3324         810 :             attached = BarrierArriveAndDetachExceptLast(&batch->batch_barrier);
    3325         876 :         if (attached && BarrierArriveAndDetach(&batch->batch_barrier))
    3326             :         {
    3327             :             /*
    3328             :              * We are not longer attached to the batch barrier, but we're the
    3329             :              * process that was chosen to free resources and it's safe to
    3330             :              * assert the current phase.  The ParallelHashJoinBatch can't go
    3331             :              * away underneath us while we are attached to the build barrier,
    3332             :              * making this access safe.
    3333             :              */
    3334             :             Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_FREE);
    3335             : 
    3336             :             /* Free shared chunks and buckets. */
    3337        3898 :             while (DsaPointerIsValid(batch->chunks))
    3338             :             {
    3339             :                 HashMemoryChunk chunk =
    3340        3134 :                     dsa_get_address(hashtable->area, batch->chunks);
    3341        3134 :                 dsa_pointer next = chunk->next.shared;
    3342             : 
    3343        3134 :                 dsa_free(hashtable->area, batch->chunks);
    3344        3134 :                 batch->chunks = next;
    3345             :             }
    3346         764 :             if (DsaPointerIsValid(batch->buckets))
    3347             :             {
    3348         764 :                 dsa_free(hashtable->area, batch->buckets);
    3349         764 :                 batch->buckets = InvalidDsaPointer;
    3350             :             }
    3351             :         }
    3352             : 
    3353             :         /*
    3354             :          * Track the largest batch we've been attached to.  Though each
    3355             :          * backend might see a different subset of batches, explain.c will
    3356             :          * scan the results from all backends to find the largest value.
    3357             :          */
    3358         876 :         hashtable->spacePeak =
    3359         876 :             Max(hashtable->spacePeak,
    3360             :                 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
    3361             : 
    3362             :         /* Remember that we are not attached to a batch. */
    3363         876 :         hashtable->curbatch = -1;
    3364             :     }
    3365       19338 : }
    3366             : 
    3367             : /*
    3368             :  * Detach from all shared resources.  If we are last to detach, clean up.
    3369             :  */
    3370             : void
    3371       18462 : ExecHashTableDetach(HashJoinTable hashtable)
    3372             : {
    3373       18462 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3374             : 
    3375             :     /*
    3376             :      * If we're involved in a parallel query, we must either have gotten all
    3377             :      * the way to PHJ_BUILD_RUN, or joined too late and be in PHJ_BUILD_FREE.
    3378             :      */
    3379             :     Assert(!pstate ||
    3380             :            BarrierPhase(&pstate->build_barrier) >= PHJ_BUILD_RUN);
    3381             : 
    3382       18462 :     if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUN)
    3383             :     {
    3384             :         int         i;
    3385             : 
    3386             :         /* Make sure any temporary files are closed. */
    3387         398 :         if (hashtable->batches)
    3388             :         {
    3389        2202 :             for (i = 0; i < hashtable->nbatch; ++i)
    3390             :             {
    3391        1804 :                 sts_end_write(hashtable->batches[i].inner_tuples);
    3392        1804 :                 sts_end_write(hashtable->batches[i].outer_tuples);
    3393        1804 :                 sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
    3394        1804 :                 sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
    3395             :             }
    3396             :         }
    3397             : 
    3398             :         /* If we're last to detach, clean up shared memory. */
    3399         398 :         if (BarrierArriveAndDetach(&pstate->build_barrier))
    3400             :         {
    3401             :             /*
    3402             :              * Late joining processes will see this state and give up
    3403             :              * immediately.
    3404             :              */
    3405             :             Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_FREE);
    3406             : 
    3407         168 :             if (DsaPointerIsValid(pstate->batches))
    3408             :             {
    3409         168 :                 dsa_free(hashtable->area, pstate->batches);
    3410         168 :                 pstate->batches = InvalidDsaPointer;
    3411             :             }
    3412             :         }
    3413             :     }
    3414       18462 :     hashtable->parallel_state = NULL;
    3415       18462 : }
    3416             : 
    3417             : /*
    3418             :  * Get the first tuple in a given bucket identified by number.
    3419             :  */
    3420             : static inline HashJoinTuple
    3421     2774430 : ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
    3422             : {
    3423             :     HashJoinTuple tuple;
    3424             :     dsa_pointer p;
    3425             : 
    3426             :     Assert(hashtable->parallel_state);
    3427     2774430 :     p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
    3428     2774430 :     tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
    3429             : 
    3430     2774430 :     return tuple;
    3431             : }
    3432             : 
    3433             : /*
    3434             :  * Get the next tuple in the same bucket as 'tuple'.
    3435             :  */
    3436             : static inline HashJoinTuple
    3437     3800818 : ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
    3438             : {
    3439             :     HashJoinTuple next;
    3440             : 
    3441             :     Assert(hashtable->parallel_state);
    3442     3800818 :     next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
    3443             : 
    3444     3800818 :     return next;
    3445             : }
    3446             : 
    3447             : /*
    3448             :  * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
    3449             :  */
    3450             : static inline void
    3451     2926206 : ExecParallelHashPushTuple(dsa_pointer_atomic *head,
    3452             :                           HashJoinTuple tuple,
    3453             :                           dsa_pointer tuple_shared)
    3454             : {
    3455             :     for (;;)
    3456             :     {
    3457     2926206 :         tuple->next.shared = dsa_pointer_atomic_read(head);
    3458     2926206 :         if (dsa_pointer_atomic_compare_exchange(head,
    3459     2926206 :                                                 &tuple->next.shared,
    3460             :                                                 tuple_shared))
    3461     2925276 :             break;
    3462             :     }
    3463     2925276 : }
    3464             : 
    3465             : /*
    3466             :  * Prepare to work on a given batch.
    3467             :  */
    3468             : void
    3469        1980 : ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
    3470             : {
    3471             :     Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
    3472             : 
    3473        1980 :     hashtable->curbatch = batchno;
    3474        1980 :     hashtable->buckets.shared = (dsa_pointer_atomic *)
    3475        1980 :         dsa_get_address(hashtable->area,
    3476        1980 :                         hashtable->batches[batchno].shared->buckets);
    3477        1980 :     hashtable->nbuckets = hashtable->parallel_state->nbuckets;
    3478        1980 :     hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
    3479        1980 :     hashtable->current_chunk = NULL;
    3480        1980 :     hashtable->current_chunk_shared = InvalidDsaPointer;
    3481        1980 :     hashtable->batches[batchno].at_least_one_chunk = false;
    3482        1980 : }
    3483             : 
    3484             : /*
    3485             :  * Take the next available chunk from the queue of chunks being worked on in
    3486             :  * parallel.  Return NULL if there are none left.  Otherwise return a pointer
    3487             :  * to the chunk, and set *shared to the DSA pointer to the chunk.
    3488             :  */
    3489             : static HashMemoryChunk
    3490        1150 : ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
    3491             : {
    3492        1150 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3493             :     HashMemoryChunk chunk;
    3494             : 
    3495        1150 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3496        1150 :     if (DsaPointerIsValid(pstate->chunk_work_queue))
    3497             :     {
    3498         954 :         *shared = pstate->chunk_work_queue;
    3499             :         chunk = (HashMemoryChunk)
    3500         954 :             dsa_get_address(hashtable->area, *shared);
    3501         954 :         pstate->chunk_work_queue = chunk->next.shared;
    3502             :     }
    3503             :     else
    3504         196 :         chunk = NULL;
    3505        1150 :     LWLockRelease(&pstate->lock);
    3506             : 
    3507        1150 :     return chunk;
    3508             : }
    3509             : 
    3510             : /*
    3511             :  * Increase the space preallocated in this backend for a given inner batch by
    3512             :  * at least a given amount.  This allows us to track whether a given batch
    3513             :  * would fit in memory when loaded back in.  Also increase the number of
    3514             :  * batches or buckets if required.
    3515             :  *
    3516             :  * This maintains a running estimation of how much space will be taken when we
    3517             :  * load the batch back into memory by simulating the way chunks will be handed
    3518             :  * out to workers.  It's not perfectly accurate because the tuples will be
    3519             :  * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
    3520             :  * it should be pretty close.  It tends to overestimate by a fraction of a
    3521             :  * chunk per worker since all workers gang up to preallocate during hashing,
    3522             :  * but workers tend to reload batches alone if there are enough to go around,
    3523             :  * leaving fewer partially filled chunks.  This effect is bounded by
    3524             :  * nparticipants.
    3525             :  *
    3526             :  * Return false if the number of batches or buckets has changed, and the
    3527             :  * caller should reconsider which batch a given tuple now belongs in and call
    3528             :  * again.
    3529             :  */
    3530             : static bool
    3531        1750 : ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
    3532             : {
    3533        1750 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3534        1750 :     ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
    3535        1750 :     size_t      want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
    3536             : 
    3537             :     Assert(batchno > 0);
    3538             :     Assert(batchno < hashtable->nbatch);
    3539             :     Assert(size == MAXALIGN(size));
    3540             : 
    3541        1750 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3542             : 
    3543             :     /* Has another participant commanded us to help grow? */
    3544        1750 :     if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
    3545        1734 :         pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3546             :     {
    3547          16 :         ParallelHashGrowth growth = pstate->growth;
    3548             : 
    3549          16 :         LWLockRelease(&pstate->lock);
    3550          16 :         if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
    3551          16 :             ExecParallelHashIncreaseNumBatches(hashtable);
    3552           0 :         else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3553           0 :             ExecParallelHashIncreaseNumBuckets(hashtable);
    3554             : 
    3555          16 :         return false;
    3556             :     }
    3557             : 
    3558        1734 :     if (pstate->growth != PHJ_GROWTH_DISABLED &&
    3559        1506 :         batch->at_least_one_chunk &&
    3560         706 :         (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
    3561         706 :          > pstate->space_allowed))
    3562             :     {
    3563             :         /*
    3564             :          * We have determined that this batch would exceed the space budget if
    3565             :          * loaded into memory.  Command all participants to help repartition.
    3566             :          */
    3567          14 :         batch->shared->space_exhausted = true;
    3568          14 :         pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    3569          14 :         LWLockRelease(&pstate->lock);
    3570             : 
    3571          14 :         return false;
    3572             :     }
    3573             : 
    3574        1720 :     batch->at_least_one_chunk = true;
    3575        1720 :     batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE;
    3576        1720 :     batch->preallocated = want;
    3577        1720 :     LWLockRelease(&pstate->lock);
    3578             : 
    3579        1720 :     return true;
    3580             : }
    3581             : 
    3582             : /*
    3583             :  * Calculate the limit on how much memory can be used by Hash and similar
    3584             :  * plan types.  This is work_mem times hash_mem_multiplier, and is
    3585             :  * expressed in bytes.
    3586             :  *
    3587             :  * Exported for use by the planner, as well as other hash-like executor
    3588             :  * nodes.  This is a rather random place for this, but there is no better
    3589             :  * place.
    3590             :  */
    3591             : size_t
    3592      906640 : get_hash_memory_limit(void)
    3593             : {
    3594             :     double      mem_limit;
    3595             : 
    3596             :     /* Do initial calculation in double arithmetic */
    3597      906640 :     mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
    3598             : 
    3599             :     /* Clamp in case it doesn't fit in size_t */
    3600      906640 :     mem_limit = Min(mem_limit, (double) SIZE_MAX);
    3601             : 
    3602      906640 :     return (size_t) mem_limit;
    3603             : }

Generated by: LCOV version 1.14