LCOV - code coverage report
Current view: top level - src/backend/executor - nodeAppend.c (source / functions) Hit Total Coverage
Test: PostgreSQL 15devel Lines: 320 341 93.8 %
Date: 2021-12-03 03:09:03 Functions: 17 18 94.4 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * nodeAppend.c
       4             :  *    routines to handle append nodes.
       5             :  *
       6             :  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/executor/nodeAppend.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : /* INTERFACE ROUTINES
      16             :  *      ExecInitAppend  - initialize the append node
      17             :  *      ExecAppend      - retrieve the next tuple from the node
      18             :  *      ExecEndAppend   - shut down the append node
      19             :  *      ExecReScanAppend - rescan the append node
      20             :  *
      21             :  *   NOTES
      22             :  *      Each append node contains a list of one or more subplans which
      23             :  *      must be iteratively processed (forwards or backwards).
      24             :  *      Tuples are retrieved by executing the 'whichplan'th subplan
      25             :  *      until the subplan stops returning tuples, at which point that
      26             :  *      plan is shut down and the next started up.
      27             :  *
      28             :  *      Append nodes don't make use of their left and right
      29             :  *      subtrees, rather they maintain a list of subplans so
      30             :  *      a typical append node looks like this in the plan tree:
      31             :  *
      32             :  *                 ...
      33             :  *                 /
      34             :  *              Append -------+------+------+--- nil
      35             :  *              /   \         |      |      |
      36             :  *            nil   nil      ...    ...    ...
      37             :  *                               subplans
      38             :  *
      39             :  *      Append nodes are currently used for unions, and to support
      40             :  *      inheritance queries, where several relations need to be scanned.
      41             :  *      For example, in our standard person/student/employee/student-emp
      42             :  *      example, where student and employee inherit from person
      43             :  *      and student-emp inherits from student and employee, the
      44             :  *      query:
      45             :  *
      46             :  *              select name from person
      47             :  *
      48             :  *      generates the plan:
      49             :  *
      50             :  *                |
      51             :  *              Append -------+-------+--------+--------+
      52             :  *              /   \         |       |        |        |
      53             :  *            nil   nil      Scan    Scan     Scan     Scan
      54             :  *                            |       |        |        |
      55             :  *                          person employee student student-emp
      56             :  */
      57             : 
      58             : #include "postgres.h"
      59             : 
      60             : #include "executor/execAsync.h"
      61             : #include "executor/execdebug.h"
      62             : #include "executor/execPartition.h"
      63             : #include "executor/nodeAppend.h"
      64             : #include "miscadmin.h"
      65             : #include "pgstat.h"
      66             : #include "storage/latch.h"
      67             : 
      68             : /* Shared state for parallel-aware Append. */
      69             : struct ParallelAppendState
      70             : {
      71             :     LWLock      pa_lock;        /* mutual exclusion to choose next subplan */
      72             :     int         pa_next_plan;   /* next plan to choose by any worker */
      73             : 
      74             :     /*
      75             :      * pa_finished[i] should be true if no more workers should select subplan
      76             :      * i.  for a non-partial plan, this should be set to true as soon as a
      77             :      * worker selects the plan; for a partial plan, it remains false until
      78             :      * some worker executes the plan to completion.
      79             :      */
      80             :     bool        pa_finished[FLEXIBLE_ARRAY_MEMBER];
      81             : };
      82             : 
      83             : #define INVALID_SUBPLAN_INDEX       -1
      84             : #define EVENT_BUFFER_SIZE           16
      85             : 
      86             : static TupleTableSlot *ExecAppend(PlanState *pstate);
      87             : static bool choose_next_subplan_locally(AppendState *node);
      88             : static bool choose_next_subplan_for_leader(AppendState *node);
      89             : static bool choose_next_subplan_for_worker(AppendState *node);
      90             : static void mark_invalid_subplans_as_finished(AppendState *node);
      91             : static void ExecAppendAsyncBegin(AppendState *node);
      92             : static bool ExecAppendAsyncGetNext(AppendState *node, TupleTableSlot **result);
      93             : static bool ExecAppendAsyncRequest(AppendState *node, TupleTableSlot **result);
      94             : static void ExecAppendAsyncEventWait(AppendState *node);
      95             : static void classify_matching_subplans(AppendState *node);
      96             : 
      97             : /* ----------------------------------------------------------------
      98             :  *      ExecInitAppend
      99             :  *
     100             :  *      Begin all of the subscans of the append node.
     101             :  *
     102             :  *     (This is potentially wasteful, since the entire result of the
     103             :  *      append node may not be scanned, but this way all of the
     104             :  *      structures get allocated in the executor's top level memory
     105             :  *      block instead of that of the call to ExecAppend.)
     106             :  * ----------------------------------------------------------------
     107             :  */
     108             : AppendState *
     109        8386 : ExecInitAppend(Append *node, EState *estate, int eflags)
     110             : {
     111        8386 :     AppendState *appendstate = makeNode(AppendState);
     112             :     PlanState **appendplanstates;
     113             :     Bitmapset  *validsubplans;
     114             :     Bitmapset  *asyncplans;
     115             :     int         nplans;
     116             :     int         nasyncplans;
     117             :     int         firstvalid;
     118             :     int         i,
     119             :                 j;
     120             : 
     121             :     /* check for unsupported flags */
     122             :     Assert(!(eflags & EXEC_FLAG_MARK));
     123             : 
     124             :     /*
     125             :      * create new AppendState for our append node
     126             :      */
     127        8386 :     appendstate->ps.plan = (Plan *) node;
     128        8386 :     appendstate->ps.state = estate;
     129        8386 :     appendstate->ps.ExecProcNode = ExecAppend;
     130             : 
     131             :     /* Let choose_next_subplan_* function handle setting the first subplan */
     132        8386 :     appendstate->as_whichplan = INVALID_SUBPLAN_INDEX;
     133        8386 :     appendstate->as_syncdone = false;
     134        8386 :     appendstate->as_begun = false;
     135             : 
     136             :     /* If run-time partition pruning is enabled, then set that up now */
     137        8386 :     if (node->part_prune_info != NULL)
     138             :     {
     139             :         PartitionPruneState *prunestate;
     140             : 
     141             :         /* We may need an expression context to evaluate partition exprs */
     142         374 :         ExecAssignExprContext(estate, &appendstate->ps);
     143             : 
     144             :         /* Create the working data structure for pruning. */
     145         374 :         prunestate = ExecCreatePartitionPruneState(&appendstate->ps,
     146         374 :                                                    node->part_prune_info);
     147         374 :         appendstate->as_prune_state = prunestate;
     148             : 
     149             :         /* Perform an initial partition prune, if required. */
     150         374 :         if (prunestate->do_initial_prune)
     151             :         {
     152             :             /* Determine which subplans survive initial pruning */
     153         148 :             validsubplans = ExecFindInitialMatchingSubPlans(prunestate,
     154         148 :                                                             list_length(node->appendplans));
     155             : 
     156         148 :             nplans = bms_num_members(validsubplans);
     157             :         }
     158             :         else
     159             :         {
     160             :             /* We'll need to initialize all subplans */
     161         226 :             nplans = list_length(node->appendplans);
     162             :             Assert(nplans > 0);
     163         226 :             validsubplans = bms_add_range(NULL, 0, nplans - 1);
     164             :         }
     165             : 
     166             :         /*
     167             :          * When no run-time pruning is required and there's at least one
     168             :          * subplan, we can fill as_valid_subplans immediately, preventing
     169             :          * later calls to ExecFindMatchingSubPlans.
     170             :          */
     171         374 :         if (!prunestate->do_exec_prune && nplans > 0)
     172          92 :             appendstate->as_valid_subplans = bms_add_range(NULL, 0, nplans - 1);
     173             :     }
     174             :     else
     175             :     {
     176        8012 :         nplans = list_length(node->appendplans);
     177             : 
     178             :         /*
     179             :          * When run-time partition pruning is not enabled we can just mark all
     180             :          * subplans as valid; they must also all be initialized.
     181             :          */
     182             :         Assert(nplans > 0);
     183        8012 :         appendstate->as_valid_subplans = validsubplans =
     184        8012 :             bms_add_range(NULL, 0, nplans - 1);
     185        8012 :         appendstate->as_prune_state = NULL;
     186             :     }
     187             : 
     188             :     /*
     189             :      * Initialize result tuple type and slot.
     190             :      */
     191        8386 :     ExecInitResultTupleSlotTL(&appendstate->ps, &TTSOpsVirtual);
     192             : 
     193             :     /* node returns slots from each of its subnodes, therefore not fixed */
     194        8386 :     appendstate->ps.resultopsset = true;
     195        8386 :     appendstate->ps.resultopsfixed = false;
     196             : 
     197        8386 :     appendplanstates = (PlanState **) palloc(nplans *
     198             :                                              sizeof(PlanState *));
     199             : 
     200             :     /*
     201             :      * call ExecInitNode on each of the valid plans to be executed and save
     202             :      * the results into the appendplanstates array.
     203             :      *
     204             :      * While at it, find out the first valid partial plan.
     205             :      */
     206        8386 :     j = 0;
     207        8386 :     asyncplans = NULL;
     208        8386 :     nasyncplans = 0;
     209        8386 :     firstvalid = nplans;
     210        8386 :     i = -1;
     211       32654 :     while ((i = bms_next_member(validsubplans, i)) >= 0)
     212             :     {
     213       24268 :         Plan       *initNode = (Plan *) list_nth(node->appendplans, i);
     214             : 
     215             :         /*
     216             :          * Record async subplans.  When executing EvalPlanQual, we treat them
     217             :          * as sync ones; don't do this when initializing an EvalPlanQual plan
     218             :          * tree.
     219             :          */
     220       24268 :         if (initNode->async_capable && estate->es_epq_active == NULL)
     221             :         {
     222         140 :             asyncplans = bms_add_member(asyncplans, j);
     223         140 :             nasyncplans++;
     224             :         }
     225             : 
     226             :         /*
     227             :          * Record the lowest appendplans index which is a valid partial plan.
     228             :          */
     229       24268 :         if (i >= node->first_partial_plan && j < firstvalid)
     230         292 :             firstvalid = j;
     231             : 
     232       24268 :         appendplanstates[j++] = ExecInitNode(initNode, estate, eflags);
     233             :     }
     234             : 
     235        8386 :     appendstate->as_first_partial_plan = firstvalid;
     236        8386 :     appendstate->appendplans = appendplanstates;
     237        8386 :     appendstate->as_nplans = nplans;
     238             : 
     239             :     /* Initialize async state */
     240        8386 :     appendstate->as_asyncplans = asyncplans;
     241        8386 :     appendstate->as_nasyncplans = nasyncplans;
     242        8386 :     appendstate->as_asyncrequests = NULL;
     243        8386 :     appendstate->as_asyncresults = NULL;
     244        8386 :     appendstate->as_nasyncresults = 0;
     245        8386 :     appendstate->as_nasyncremain = 0;
     246        8386 :     appendstate->as_needrequest = NULL;
     247        8386 :     appendstate->as_eventset = NULL;
     248        8386 :     appendstate->as_valid_asyncplans = NULL;
     249             : 
     250        8386 :     if (nasyncplans > 0)
     251             :     {
     252          72 :         appendstate->as_asyncrequests = (AsyncRequest **)
     253          72 :             palloc0(nplans * sizeof(AsyncRequest *));
     254             : 
     255          72 :         i = -1;
     256         212 :         while ((i = bms_next_member(asyncplans, i)) >= 0)
     257             :         {
     258             :             AsyncRequest *areq;
     259             : 
     260         140 :             areq = palloc(sizeof(AsyncRequest));
     261         140 :             areq->requestor = (PlanState *) appendstate;
     262         140 :             areq->requestee = appendplanstates[i];
     263         140 :             areq->request_index = i;
     264         140 :             areq->callback_pending = false;
     265         140 :             areq->request_complete = false;
     266         140 :             areq->result = NULL;
     267             : 
     268         140 :             appendstate->as_asyncrequests[i] = areq;
     269             :         }
     270             : 
     271          72 :         appendstate->as_asyncresults = (TupleTableSlot **)
     272          72 :             palloc0(nasyncplans * sizeof(TupleTableSlot *));
     273             : 
     274          72 :         if (appendstate->as_valid_subplans != NULL)
     275          66 :             classify_matching_subplans(appendstate);
     276             :     }
     277             : 
     278             :     /*
     279             :      * Miscellaneous initialization
     280             :      */
     281             : 
     282        8386 :     appendstate->ps.ps_ProjInfo = NULL;
     283             : 
     284             :     /* For parallel query, this will be overridden later. */
     285        8386 :     appendstate->choose_next_subplan = choose_next_subplan_locally;
     286             : 
     287        8386 :     return appendstate;
     288             : }
     289             : 
     290             : /* ----------------------------------------------------------------
     291             :  *     ExecAppend
     292             :  *
     293             :  *      Handles iteration over multiple subplans.
     294             :  * ----------------------------------------------------------------
     295             :  */
     296             : static TupleTableSlot *
     297     2385104 : ExecAppend(PlanState *pstate)
     298             : {
     299     2385104 :     AppendState *node = castNode(AppendState, pstate);
     300             :     TupleTableSlot *result;
     301             : 
     302             :     /*
     303             :      * If this is the first call after Init or ReScan, we need to do the
     304             :      * initialization work.
     305             :      */
     306     2385104 :     if (!node->as_begun)
     307             :     {
     308             :         Assert(node->as_whichplan == INVALID_SUBPLAN_INDEX);
     309             :         Assert(!node->as_syncdone);
     310             : 
     311             :         /* Nothing to do if there are no subplans */
     312       68706 :         if (node->as_nplans == 0)
     313          24 :             return ExecClearTuple(node->ps.ps_ResultTupleSlot);
     314             : 
     315             :         /* If there are any async subplans, begin executing them. */
     316       68682 :         if (node->as_nasyncplans > 0)
     317          58 :             ExecAppendAsyncBegin(node);
     318             : 
     319             :         /*
     320             :          * If no sync subplan has been chosen, we must choose one before
     321             :          * proceeding.
     322             :          */
     323       68682 :         if (!node->choose_next_subplan(node) && node->as_nasyncremain == 0)
     324        2192 :             return ExecClearTuple(node->ps.ps_ResultTupleSlot);
     325             : 
     326             :         Assert(node->as_syncdone ||
     327             :                (node->as_whichplan >= 0 &&
     328             :                 node->as_whichplan < node->as_nplans));
     329             : 
     330             :         /* And we're initialized. */
     331       66490 :         node->as_begun = true;
     332             :     }
     333             : 
     334             :     for (;;)
     335       70684 :     {
     336             :         PlanState  *subnode;
     337             : 
     338     2453572 :         CHECK_FOR_INTERRUPTS();
     339             : 
     340             :         /*
     341             :          * try to get a tuple from an async subplan if any
     342             :          */
     343     2453572 :         if (node->as_syncdone || !bms_is_empty(node->as_needrequest))
     344             :         {
     345       11366 :             if (ExecAppendAsyncGetNext(node, &result))
     346       11366 :                 return result;
     347             :             Assert(!node->as_syncdone);
     348             :             Assert(bms_is_empty(node->as_needrequest));
     349             :         }
     350             : 
     351             :         /*
     352             :          * figure out which sync subplan we are currently processing
     353             :          */
     354             :         Assert(node->as_whichplan >= 0 && node->as_whichplan < node->as_nplans);
     355     2442206 :         subnode = node->appendplans[node->as_whichplan];
     356             : 
     357             :         /*
     358             :          * get a tuple from the subplan
     359             :          */
     360     2442206 :         result = ExecProcNode(subnode);
     361             : 
     362     2442170 :         if (!TupIsNull(result))
     363             :         {
     364             :             /*
     365             :              * If the subplan gave us something then return it as-is. We do
     366             :              * NOT make use of the result slot that was set up in
     367             :              * ExecInitAppend; there's no need for it.
     368             :              */
     369     2305382 :             return result;
     370             :         }
     371             : 
     372             :         /*
     373             :          * wait or poll for async events if any. We do this before checking
     374             :          * for the end of iteration, because it might drain the remaining
     375             :          * async subplans.
     376             :          */
     377      136788 :         if (node->as_nasyncremain > 0)
     378          32 :             ExecAppendAsyncEventWait(node);
     379             : 
     380             :         /* choose new sync subplan; if no sync/async subplans, we're done */
     381      136788 :         if (!node->choose_next_subplan(node) && node->as_nasyncremain == 0)
     382       66104 :             return ExecClearTuple(node->ps.ps_ResultTupleSlot);
     383             :     }
     384             : }
     385             : 
     386             : /* ----------------------------------------------------------------
     387             :  *      ExecEndAppend
     388             :  *
     389             :  *      Shuts down the subscans of the append node.
     390             :  *
     391             :  *      Returns nothing of interest.
     392             :  * ----------------------------------------------------------------
     393             :  */
     394             : void
     395        8218 : ExecEndAppend(AppendState *node)
     396             : {
     397             :     PlanState **appendplans;
     398             :     int         nplans;
     399             :     int         i;
     400             : 
     401             :     /*
     402             :      * get information from the node
     403             :      */
     404        8218 :     appendplans = node->appendplans;
     405        8218 :     nplans = node->as_nplans;
     406             : 
     407             :     /*
     408             :      * shut down each of the subscans
     409             :      */
     410       32102 :     for (i = 0; i < nplans; i++)
     411       23884 :         ExecEndNode(appendplans[i]);
     412        8218 : }
     413             : 
     414             : void
     415       63186 : ExecReScanAppend(AppendState *node)
     416             : {
     417       63186 :     int         nasyncplans = node->as_nasyncplans;
     418             :     int         i;
     419             : 
     420             :     /*
     421             :      * If any PARAM_EXEC Params used in pruning expressions have changed, then
     422             :      * we'd better unset the valid subplans so that they are reselected for
     423             :      * the new parameter values.
     424             :      */
     425       65366 :     if (node->as_prune_state &&
     426        2180 :         bms_overlap(node->ps.chgParam,
     427        2180 :                     node->as_prune_state->execparamids))
     428             :     {
     429        2180 :         bms_free(node->as_valid_subplans);
     430        2180 :         node->as_valid_subplans = NULL;
     431        2180 :         if (nasyncplans > 0)
     432             :         {
     433           4 :             bms_free(node->as_valid_asyncplans);
     434           4 :             node->as_valid_asyncplans = NULL;
     435             :         }
     436             :     }
     437             : 
     438      204594 :     for (i = 0; i < node->as_nplans; i++)
     439             :     {
     440      141408 :         PlanState  *subnode = node->appendplans[i];
     441             : 
     442             :         /*
     443             :          * ExecReScan doesn't know about my subplans, so I have to do
     444             :          * changed-parameter signaling myself.
     445             :          */
     446      141408 :         if (node->ps.chgParam != NULL)
     447      140636 :             UpdateChangedParamSet(subnode, node->ps.chgParam);
     448             : 
     449             :         /*
     450             :          * If chgParam of subnode is not null then plan will be re-scanned by
     451             :          * first ExecProcNode or by first ExecAsyncRequest.
     452             :          */
     453      141408 :         if (subnode->chgParam == NULL)
     454       60098 :             ExecReScan(subnode);
     455             :     }
     456             : 
     457             :     /* Reset async state */
     458       63186 :     if (nasyncplans > 0)
     459             :     {
     460          28 :         i = -1;
     461          84 :         while ((i = bms_next_member(node->as_asyncplans, i)) >= 0)
     462             :         {
     463          56 :             AsyncRequest *areq = node->as_asyncrequests[i];
     464             : 
     465          56 :             areq->callback_pending = false;
     466          56 :             areq->request_complete = false;
     467          56 :             areq->result = NULL;
     468             :         }
     469             : 
     470          28 :         node->as_nasyncresults = 0;
     471          28 :         node->as_nasyncremain = 0;
     472          28 :         bms_free(node->as_needrequest);
     473          28 :         node->as_needrequest = NULL;
     474             :     }
     475             : 
     476             :     /* Let choose_next_subplan_* function handle setting the first subplan */
     477       63186 :     node->as_whichplan = INVALID_SUBPLAN_INDEX;
     478       63186 :     node->as_syncdone = false;
     479       63186 :     node->as_begun = false;
     480       63186 : }
     481             : 
     482             : /* ----------------------------------------------------------------
     483             :  *                      Parallel Append Support
     484             :  * ----------------------------------------------------------------
     485             :  */
     486             : 
     487             : /* ----------------------------------------------------------------
     488             :  *      ExecAppendEstimate
     489             :  *
     490             :  *      Compute the amount of space we'll need in the parallel
     491             :  *      query DSM, and inform pcxt->estimator about our needs.
     492             :  * ----------------------------------------------------------------
     493             :  */
     494             : void
     495          88 : ExecAppendEstimate(AppendState *node,
     496             :                    ParallelContext *pcxt)
     497             : {
     498          88 :     node->pstate_len =
     499          88 :         add_size(offsetof(ParallelAppendState, pa_finished),
     500          88 :                  sizeof(bool) * node->as_nplans);
     501             : 
     502          88 :     shm_toc_estimate_chunk(&pcxt->estimator, node->pstate_len);
     503          88 :     shm_toc_estimate_keys(&pcxt->estimator, 1);
     504          88 : }
     505             : 
     506             : 
     507             : /* ----------------------------------------------------------------
     508             :  *      ExecAppendInitializeDSM
     509             :  *
     510             :  *      Set up shared state for Parallel Append.
     511             :  * ----------------------------------------------------------------
     512             :  */
     513             : void
     514          88 : ExecAppendInitializeDSM(AppendState *node,
     515             :                         ParallelContext *pcxt)
     516             : {
     517             :     ParallelAppendState *pstate;
     518             : 
     519          88 :     pstate = shm_toc_allocate(pcxt->toc, node->pstate_len);
     520          88 :     memset(pstate, 0, node->pstate_len);
     521          88 :     LWLockInitialize(&pstate->pa_lock, LWTRANCHE_PARALLEL_APPEND);
     522          88 :     shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id, pstate);
     523             : 
     524          88 :     node->as_pstate = pstate;
     525          88 :     node->choose_next_subplan = choose_next_subplan_for_leader;
     526          88 : }
     527             : 
     528             : /* ----------------------------------------------------------------
     529             :  *      ExecAppendReInitializeDSM
     530             :  *
     531             :  *      Reset shared state before beginning a fresh scan.
     532             :  * ----------------------------------------------------------------
     533             :  */
     534             : void
     535           0 : ExecAppendReInitializeDSM(AppendState *node, ParallelContext *pcxt)
     536             : {
     537           0 :     ParallelAppendState *pstate = node->as_pstate;
     538             : 
     539           0 :     pstate->pa_next_plan = 0;
     540           0 :     memset(pstate->pa_finished, 0, sizeof(bool) * node->as_nplans);
     541           0 : }
     542             : 
     543             : /* ----------------------------------------------------------------
     544             :  *      ExecAppendInitializeWorker
     545             :  *
     546             :  *      Copy relevant information from TOC into planstate, and initialize
     547             :  *      whatever is required to choose and execute the optimal subplan.
     548             :  * ----------------------------------------------------------------
     549             :  */
     550             : void
     551         204 : ExecAppendInitializeWorker(AppendState *node, ParallelWorkerContext *pwcxt)
     552             : {
     553         204 :     node->as_pstate = shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
     554         204 :     node->choose_next_subplan = choose_next_subplan_for_worker;
     555         204 : }
     556             : 
     557             : /* ----------------------------------------------------------------
     558             :  *      choose_next_subplan_locally
     559             :  *
     560             :  *      Choose next sync subplan for a non-parallel-aware Append,
     561             :  *      returning false if there are no more.
     562             :  * ----------------------------------------------------------------
     563             :  */
     564             : static bool
     565      204904 : choose_next_subplan_locally(AppendState *node)
     566             : {
     567      204904 :     int         whichplan = node->as_whichplan;
     568             :     int         nextplan;
     569             : 
     570             :     /* We should never be called when there are no subplans */
     571             :     Assert(node->as_nplans > 0);
     572             : 
     573             :     /* Nothing to do if syncdone */
     574      204904 :     if (node->as_syncdone)
     575          22 :         return false;
     576             : 
     577             :     /*
     578             :      * If first call then have the bms member function choose the first valid
     579             :      * sync subplan by initializing whichplan to -1.  If there happen to be no
     580             :      * valid sync subplans then the bms member function will handle that by
     581             :      * returning a negative number which will allow us to exit returning a
     582             :      * false value.
     583             :      */
     584      204882 :     if (whichplan == INVALID_SUBPLAN_INDEX)
     585             :     {
     586       68388 :         if (node->as_nasyncplans > 0)
     587             :         {
     588             :             /* We'd have filled as_valid_subplans already */
     589             :             Assert(node->as_valid_subplans);
     590             :         }
     591       68352 :         else if (node->as_valid_subplans == NULL)
     592        2258 :             node->as_valid_subplans =
     593        2258 :                 ExecFindMatchingSubPlans(node->as_prune_state);
     594             : 
     595       68388 :         whichplan = -1;
     596             :     }
     597             : 
     598             :     /* Ensure whichplan is within the expected range */
     599             :     Assert(whichplan >= -1 && whichplan <= node->as_nplans);
     600             : 
     601      204882 :     if (ScanDirectionIsForward(node->ps.state->es_direction))
     602      204870 :         nextplan = bms_next_member(node->as_valid_subplans, whichplan);
     603             :     else
     604          12 :         nextplan = bms_prev_member(node->as_valid_subplans, whichplan);
     605             : 
     606      204882 :     if (nextplan < 0)
     607             :     {
     608             :         /* Set as_syncdone if in async mode */
     609       68054 :         if (node->as_nasyncplans > 0)
     610          32 :             node->as_syncdone = true;
     611       68054 :         return false;
     612             :     }
     613             : 
     614      136828 :     node->as_whichplan = nextplan;
     615             : 
     616      136828 :     return true;
     617             : }
     618             : 
     619             : /* ----------------------------------------------------------------
     620             :  *      choose_next_subplan_for_leader
     621             :  *
     622             :  *      Try to pick a plan which doesn't commit us to doing much
     623             :  *      work locally, so that as much work as possible is done in
     624             :  *      the workers.  Cheapest subplans are at the end.
     625             :  * ----------------------------------------------------------------
     626             :  */
     627             : static bool
     628         330 : choose_next_subplan_for_leader(AppendState *node)
     629             : {
     630         330 :     ParallelAppendState *pstate = node->as_pstate;
     631             : 
     632             :     /* Backward scan is not supported by parallel-aware plans */
     633             :     Assert(ScanDirectionIsForward(node->ps.state->es_direction));
     634             : 
     635             :     /* We should never be called when there are no subplans */
     636             :     Assert(node->as_nplans > 0);
     637             : 
     638         330 :     LWLockAcquire(&pstate->pa_lock, LW_EXCLUSIVE);
     639             : 
     640         330 :     if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
     641             :     {
     642             :         /* Mark just-completed subplan as finished. */
     643         254 :         node->as_pstate->pa_finished[node->as_whichplan] = true;
     644             :     }
     645             :     else
     646             :     {
     647             :         /* Start with last subplan. */
     648          76 :         node->as_whichplan = node->as_nplans - 1;
     649             : 
     650             :         /*
     651             :          * If we've yet to determine the valid subplans then do so now.  If
     652             :          * run-time pruning is disabled then the valid subplans will always be
     653             :          * set to all subplans.
     654             :          */
     655          76 :         if (node->as_valid_subplans == NULL)
     656             :         {
     657          16 :             node->as_valid_subplans =
     658          16 :                 ExecFindMatchingSubPlans(node->as_prune_state);
     659             : 
     660             :             /*
     661             :              * Mark each invalid plan as finished to allow the loop below to
     662             :              * select the first valid subplan.
     663             :              */
     664          16 :             mark_invalid_subplans_as_finished(node);
     665             :         }
     666             :     }
     667             : 
     668             :     /* Loop until we find a subplan to execute. */
     669         526 :     while (pstate->pa_finished[node->as_whichplan])
     670             :     {
     671         272 :         if (node->as_whichplan == 0)
     672             :         {
     673          76 :             pstate->pa_next_plan = INVALID_SUBPLAN_INDEX;
     674          76 :             node->as_whichplan = INVALID_SUBPLAN_INDEX;
     675          76 :             LWLockRelease(&pstate->pa_lock);
     676          76 :             return false;
     677             :         }
     678             : 
     679             :         /*
     680             :          * We needn't pay attention to as_valid_subplans here as all invalid
     681             :          * plans have been marked as finished.
     682             :          */
     683         196 :         node->as_whichplan--;
     684             :     }
     685             : 
     686             :     /* If non-partial, immediately mark as finished. */
     687         254 :     if (node->as_whichplan < node->as_first_partial_plan)
     688          90 :         node->as_pstate->pa_finished[node->as_whichplan] = true;
     689             : 
     690         254 :     LWLockRelease(&pstate->pa_lock);
     691             : 
     692         254 :     return true;
     693             : }
     694             : 
     695             : /* ----------------------------------------------------------------
     696             :  *      choose_next_subplan_for_worker
     697             :  *
     698             :  *      Choose next subplan for a parallel-aware Append, returning
     699             :  *      false if there are no more.
     700             :  *
     701             :  *      We start from the first plan and advance through the list;
     702             :  *      when we get back to the end, we loop back to the first
     703             :  *      partial plan.  This assigns the non-partial plans first in
     704             :  *      order of descending cost and then spreads out the workers
     705             :  *      as evenly as possible across the remaining partial plans.
     706             :  * ----------------------------------------------------------------
     707             :  */
     708             : static bool
     709         236 : choose_next_subplan_for_worker(AppendState *node)
     710             : {
     711         236 :     ParallelAppendState *pstate = node->as_pstate;
     712             : 
     713             :     /* Backward scan is not supported by parallel-aware plans */
     714             :     Assert(ScanDirectionIsForward(node->ps.state->es_direction));
     715             : 
     716             :     /* We should never be called when there are no subplans */
     717             :     Assert(node->as_nplans > 0);
     718             : 
     719         236 :     LWLockAcquire(&pstate->pa_lock, LW_EXCLUSIVE);
     720             : 
     721             :     /* Mark just-completed subplan as finished. */
     722         236 :     if (node->as_whichplan != INVALID_SUBPLAN_INDEX)
     723          40 :         node->as_pstate->pa_finished[node->as_whichplan] = true;
     724             : 
     725             :     /*
     726             :      * If we've yet to determine the valid subplans then do so now.  If
     727             :      * run-time pruning is disabled then the valid subplans will always be set
     728             :      * to all subplans.
     729             :      */
     730         196 :     else if (node->as_valid_subplans == NULL)
     731             :     {
     732          32 :         node->as_valid_subplans =
     733          32 :             ExecFindMatchingSubPlans(node->as_prune_state);
     734          32 :         mark_invalid_subplans_as_finished(node);
     735             :     }
     736             : 
     737             :     /* If all the plans are already done, we have nothing to do */
     738         236 :     if (pstate->pa_next_plan == INVALID_SUBPLAN_INDEX)
     739             :     {
     740         184 :         LWLockRelease(&pstate->pa_lock);
     741         184 :         return false;
     742             :     }
     743             : 
     744             :     /* Save the plan from which we are starting the search. */
     745          52 :     node->as_whichplan = pstate->pa_next_plan;
     746             : 
     747             :     /* Loop until we find a valid subplan to execute. */
     748         106 :     while (pstate->pa_finished[pstate->pa_next_plan])
     749             :     {
     750             :         int         nextplan;
     751             : 
     752          66 :         nextplan = bms_next_member(node->as_valid_subplans,
     753             :                                    pstate->pa_next_plan);
     754          66 :         if (nextplan >= 0)
     755             :         {
     756             :             /* Advance to the next valid plan. */
     757          54 :             pstate->pa_next_plan = nextplan;
     758             :         }
     759          12 :         else if (node->as_whichplan > node->as_first_partial_plan)
     760             :         {
     761             :             /*
     762             :              * Try looping back to the first valid partial plan, if there is
     763             :              * one.  If there isn't, arrange to bail out below.
     764             :              */
     765           0 :             nextplan = bms_next_member(node->as_valid_subplans,
     766           0 :                                        node->as_first_partial_plan - 1);
     767           0 :             pstate->pa_next_plan =
     768           0 :                 nextplan < 0 ? node->as_whichplan : nextplan;
     769             :         }
     770             :         else
     771             :         {
     772             :             /*
     773             :              * At last plan, and either there are no partial plans or we've
     774             :              * tried them all.  Arrange to bail out.
     775             :              */
     776          12 :             pstate->pa_next_plan = node->as_whichplan;
     777             :         }
     778             : 
     779          66 :         if (pstate->pa_next_plan == node->as_whichplan)
     780             :         {
     781             :             /* We've tried everything! */
     782          12 :             pstate->pa_next_plan = INVALID_SUBPLAN_INDEX;
     783          12 :             LWLockRelease(&pstate->pa_lock);
     784          12 :             return false;
     785             :         }
     786             :     }
     787             : 
     788             :     /* Pick the plan we found, and advance pa_next_plan one more time. */
     789          40 :     node->as_whichplan = pstate->pa_next_plan;
     790          40 :     pstate->pa_next_plan = bms_next_member(node->as_valid_subplans,
     791             :                                            pstate->pa_next_plan);
     792             : 
     793             :     /*
     794             :      * If there are no more valid plans then try setting the next plan to the
     795             :      * first valid partial plan.
     796             :      */
     797          40 :     if (pstate->pa_next_plan < 0)
     798             :     {
     799          14 :         int         nextplan = bms_next_member(node->as_valid_subplans,
     800          14 :                                                node->as_first_partial_plan - 1);
     801             : 
     802          14 :         if (nextplan >= 0)
     803          14 :             pstate->pa_next_plan = nextplan;
     804             :         else
     805             :         {
     806             :             /*
     807             :              * There are no valid partial plans, and we already chose the last
     808             :              * non-partial plan; so flag that there's nothing more for our
     809             :              * fellow workers to do.
     810             :              */
     811           0 :             pstate->pa_next_plan = INVALID_SUBPLAN_INDEX;
     812             :         }
     813             :     }
     814             : 
     815             :     /* If non-partial, immediately mark as finished. */
     816          40 :     if (node->as_whichplan < node->as_first_partial_plan)
     817           2 :         node->as_pstate->pa_finished[node->as_whichplan] = true;
     818             : 
     819          40 :     LWLockRelease(&pstate->pa_lock);
     820             : 
     821          40 :     return true;
     822             : }
     823             : 
     824             : /*
     825             :  * mark_invalid_subplans_as_finished
     826             :  *      Marks the ParallelAppendState's pa_finished as true for each invalid
     827             :  *      subplan.
     828             :  *
     829             :  * This function should only be called for parallel Append with run-time
     830             :  * pruning enabled.
     831             :  */
     832             : static void
     833          48 : mark_invalid_subplans_as_finished(AppendState *node)
     834             : {
     835             :     int         i;
     836             : 
     837             :     /* Only valid to call this while in parallel Append mode */
     838             :     Assert(node->as_pstate);
     839             : 
     840             :     /* Shouldn't have been called when run-time pruning is not enabled */
     841             :     Assert(node->as_prune_state);
     842             : 
     843             :     /* Nothing to do if all plans are valid */
     844          48 :     if (bms_num_members(node->as_valid_subplans) == node->as_nplans)
     845           0 :         return;
     846             : 
     847             :     /* Mark all non-valid plans as finished */
     848         156 :     for (i = 0; i < node->as_nplans; i++)
     849             :     {
     850         108 :         if (!bms_is_member(i, node->as_valid_subplans))
     851          48 :             node->as_pstate->pa_finished[i] = true;
     852             :     }
     853             : }
     854             : 
     855             : /* ----------------------------------------------------------------
     856             :  *                      Asynchronous Append Support
     857             :  * ----------------------------------------------------------------
     858             :  */
     859             : 
     860             : /* ----------------------------------------------------------------
     861             :  *      ExecAppendAsyncBegin
     862             :  *
     863             :  *      Begin executing designed async-capable subplans.
     864             :  * ----------------------------------------------------------------
     865             :  */
     866             : static void
     867          58 : ExecAppendAsyncBegin(AppendState *node)
     868             : {
     869             :     int         i;
     870             : 
     871             :     /* Backward scan is not supported by async-aware Appends. */
     872             :     Assert(ScanDirectionIsForward(node->ps.state->es_direction));
     873             : 
     874             :     /* We should never be called when there are no subplans */
     875             :     Assert(node->as_nplans > 0);
     876             : 
     877             :     /* We should never be called when there are no async subplans. */
     878             :     Assert(node->as_nasyncplans > 0);
     879             : 
     880             :     /* If we've yet to determine the valid subplans then do so now. */
     881          58 :     if (node->as_valid_subplans == NULL)
     882             :     {
     883           4 :         node->as_valid_subplans =
     884           4 :             ExecFindMatchingSubPlans(node->as_prune_state);
     885             : 
     886           4 :         classify_matching_subplans(node);
     887             :     }
     888             : 
     889             :     /* Initialize state variables. */
     890          58 :     node->as_syncdone = bms_is_empty(node->as_valid_subplans);
     891          58 :     node->as_nasyncremain = bms_num_members(node->as_valid_asyncplans);
     892             : 
     893             :     /* Nothing to do if there are no valid async subplans. */
     894          58 :     if (node->as_nasyncremain == 0)
     895           0 :         return;
     896             : 
     897             :     /* Make a request for each of the valid async subplans. */
     898          58 :     i = -1;
     899         168 :     while ((i = bms_next_member(node->as_valid_asyncplans, i)) >= 0)
     900             :     {
     901         110 :         AsyncRequest *areq = node->as_asyncrequests[i];
     902             : 
     903             :         Assert(areq->request_index == i);
     904             :         Assert(!areq->callback_pending);
     905             : 
     906             :         /* Do the actual work. */
     907         110 :         ExecAsyncRequest(areq);
     908             :     }
     909             : }
     910             : 
     911             : /* ----------------------------------------------------------------
     912             :  *      ExecAppendAsyncGetNext
     913             :  *
     914             :  *      Get the next tuple from any of the asynchronous subplans.
     915             :  * ----------------------------------------------------------------
     916             :  */
     917             : static bool
     918       11366 : ExecAppendAsyncGetNext(AppendState *node, TupleTableSlot **result)
     919             : {
     920       11366 :     *result = NULL;
     921             : 
     922             :     /* We should never be called when there are no valid async subplans. */
     923             :     Assert(node->as_nasyncremain > 0);
     924             : 
     925             :     /* Request a tuple asynchronously. */
     926       11366 :     if (ExecAppendAsyncRequest(node, result))
     927       11242 :         return true;
     928             : 
     929         186 :     while (node->as_nasyncremain > 0)
     930             :     {
     931         134 :         CHECK_FOR_INTERRUPTS();
     932             : 
     933             :         /* Wait or poll for async events. */
     934         134 :         ExecAppendAsyncEventWait(node);
     935             : 
     936             :         /* Request a tuple asynchronously. */
     937         134 :         if (ExecAppendAsyncRequest(node, result))
     938          72 :             return true;
     939             : 
     940             :         /* Break from loop if there's any sync subplan that isn't complete. */
     941          62 :         if (!node->as_syncdone)
     942           0 :             break;
     943             :     }
     944             : 
     945             :     /*
     946             :      * If all sync subplans are complete, we're totally done scanning the
     947             :      * given node.  Otherwise, we're done with the asynchronous stuff but must
     948             :      * continue scanning the sync subplans.
     949             :      */
     950          52 :     if (node->as_syncdone)
     951             :     {
     952             :         Assert(node->as_nasyncremain == 0);
     953          52 :         *result = ExecClearTuple(node->ps.ps_ResultTupleSlot);
     954          52 :         return true;
     955             :     }
     956             : 
     957           0 :     return false;
     958             : }
     959             : 
     960             : /* ----------------------------------------------------------------
     961             :  *      ExecAppendAsyncRequest
     962             :  *
     963             :  *      Request a tuple asynchronously.
     964             :  * ----------------------------------------------------------------
     965             :  */
     966             : static bool
     967       11500 : ExecAppendAsyncRequest(AppendState *node, TupleTableSlot **result)
     968             : {
     969             :     Bitmapset  *needrequest;
     970             :     int         i;
     971             : 
     972             :     /* Nothing to do if there are no async subplans needing a new request. */
     973       11500 :     if (bms_is_empty(node->as_needrequest))
     974             :     {
     975             :         Assert(node->as_nasyncresults == 0);
     976          86 :         return false;
     977             :     }
     978             : 
     979             :     /*
     980             :      * If there are any asynchronously-generated results that have not yet
     981             :      * been returned, we have nothing to do; just return one of them.
     982             :      */
     983       11414 :     if (node->as_nasyncresults > 0)
     984             :     {
     985        4924 :         --node->as_nasyncresults;
     986        4924 :         *result = node->as_asyncresults[node->as_nasyncresults];
     987        4924 :         return true;
     988             :     }
     989             : 
     990             :     /* Make a new request for each of the async subplans that need it. */
     991        6490 :     needrequest = node->as_needrequest;
     992        6490 :     node->as_needrequest = NULL;
     993        6490 :     i = -1;
     994       17804 :     while ((i = bms_next_member(needrequest, i)) >= 0)
     995             :     {
     996       11314 :         AsyncRequest *areq = node->as_asyncrequests[i];
     997             : 
     998             :         /* Do the actual work. */
     999       11314 :         ExecAsyncRequest(areq);
    1000             :     }
    1001        6490 :     bms_free(needrequest);
    1002             : 
    1003             :     /* Return one of the asynchronously-generated results if any. */
    1004        6490 :     if (node->as_nasyncresults > 0)
    1005             :     {
    1006        6390 :         --node->as_nasyncresults;
    1007        6390 :         *result = node->as_asyncresults[node->as_nasyncresults];
    1008        6390 :         return true;
    1009             :     }
    1010             : 
    1011         100 :     return false;
    1012             : }
    1013             : 
    1014             : /* ----------------------------------------------------------------
    1015             :  *      ExecAppendAsyncEventWait
    1016             :  *
    1017             :  *      Wait or poll for file descriptor events and fire callbacks.
    1018             :  * ----------------------------------------------------------------
    1019             :  */
    1020             : static void
    1021         166 : ExecAppendAsyncEventWait(AppendState *node)
    1022             : {
    1023         166 :     int         nevents = node->as_nasyncplans + 1;
    1024         166 :     long        timeout = node->as_syncdone ? -1 : 0;
    1025             :     WaitEvent   occurred_event[EVENT_BUFFER_SIZE];
    1026             :     int         noccurred;
    1027             :     int         i;
    1028             : 
    1029             :     /* We should never be called when there are no valid async subplans. */
    1030             :     Assert(node->as_nasyncremain > 0);
    1031             : 
    1032         166 :     node->as_eventset = CreateWaitEventSet(CurrentMemoryContext, nevents);
    1033         166 :     AddWaitEventToSet(node->as_eventset, WL_EXIT_ON_PM_DEATH, PGINVALID_SOCKET,
    1034             :                       NULL, NULL);
    1035             : 
    1036             :     /* Give each waiting subplan a chance to add an event. */
    1037         166 :     i = -1;
    1038         504 :     while ((i = bms_next_member(node->as_asyncplans, i)) >= 0)
    1039             :     {
    1040         338 :         AsyncRequest *areq = node->as_asyncrequests[i];
    1041             : 
    1042         338 :         if (areq->callback_pending)
    1043         296 :             ExecAsyncConfigureWait(areq);
    1044             :     }
    1045             : 
    1046             :     /*
    1047             :      * No need for further processing if there are no configured events other
    1048             :      * than the postmaster death event.
    1049             :      */
    1050         166 :     if (GetNumRegisteredWaitEvents(node->as_eventset) == 1)
    1051             :     {
    1052           2 :         FreeWaitEventSet(node->as_eventset);
    1053           2 :         node->as_eventset = NULL;
    1054           2 :         return;
    1055             :     }
    1056             : 
    1057             :     /* We wait on at most EVENT_BUFFER_SIZE events. */
    1058         164 :     if (nevents > EVENT_BUFFER_SIZE)
    1059           0 :         nevents = EVENT_BUFFER_SIZE;
    1060             : 
    1061             :     /*
    1062             :      * If the timeout is -1, wait until at least one event occurs.  If the
    1063             :      * timeout is 0, poll for events, but do not wait at all.
    1064             :      */
    1065         164 :     noccurred = WaitEventSetWait(node->as_eventset, timeout, occurred_event,
    1066             :                                  nevents, WAIT_EVENT_APPEND_READY);
    1067         164 :     FreeWaitEventSet(node->as_eventset);
    1068         164 :     node->as_eventset = NULL;
    1069         164 :     if (noccurred == 0)
    1070           0 :         return;
    1071             : 
    1072             :     /* Deliver notifications. */
    1073         428 :     for (i = 0; i < noccurred; i++)
    1074             :     {
    1075         264 :         WaitEvent  *w = &occurred_event[i];
    1076             : 
    1077             :         /*
    1078             :          * Each waiting subplan should have registered its wait event with
    1079             :          * user_data pointing back to its AsyncRequest.
    1080             :          */
    1081         264 :         if ((w->events & WL_SOCKET_READABLE) != 0)
    1082             :         {
    1083         264 :             AsyncRequest *areq = (AsyncRequest *) w->user_data;
    1084             : 
    1085         264 :             if (areq->callback_pending)
    1086             :             {
    1087             :                 /*
    1088             :                  * Mark it as no longer needing a callback.  We must do this
    1089             :                  * before dispatching the callback in case the callback resets
    1090             :                  * the flag.
    1091             :                  */
    1092         264 :                 areq->callback_pending = false;
    1093             : 
    1094             :                 /* Do the actual work. */
    1095         264 :                 ExecAsyncNotify(areq);
    1096             :             }
    1097             :         }
    1098             :     }
    1099             : }
    1100             : 
    1101             : /* ----------------------------------------------------------------
    1102             :  *      ExecAsyncAppendResponse
    1103             :  *
    1104             :  *      Receive a response from an asynchronous request we made.
    1105             :  * ----------------------------------------------------------------
    1106             :  */
    1107             : void
    1108       11694 : ExecAsyncAppendResponse(AsyncRequest *areq)
    1109             : {
    1110       11694 :     AppendState *node = (AppendState *) areq->requestor;
    1111       11694 :     TupleTableSlot *slot = areq->result;
    1112             : 
    1113             :     /* The result should be a TupleTableSlot or NULL. */
    1114             :     Assert(slot == NULL || IsA(slot, TupleTableSlot));
    1115             : 
    1116             :     /* Nothing to do if the request is pending. */
    1117       11694 :     if (!areq->request_complete)
    1118             :     {
    1119             :         /* The request would have been pending for a callback. */
    1120             :         Assert(areq->callback_pending);
    1121         278 :         return;
    1122             :     }
    1123             : 
    1124             :     /* If the result is NULL or an empty slot, there's nothing more to do. */
    1125       11416 :     if (TupIsNull(slot))
    1126             :     {
    1127             :         /* The ending subplan wouldn't have been pending for a callback. */
    1128             :         Assert(!areq->callback_pending);
    1129         102 :         --node->as_nasyncremain;
    1130         102 :         return;
    1131             :     }
    1132             : 
    1133             :     /* Save result so we can return it. */
    1134             :     Assert(node->as_nasyncresults < node->as_nasyncplans);
    1135       11314 :     node->as_asyncresults[node->as_nasyncresults++] = slot;
    1136             : 
    1137             :     /*
    1138             :      * Mark the subplan that returned a result as ready for a new request.  We
    1139             :      * don't launch another one here immediately because it might complete.
    1140             :      */
    1141       11314 :     node->as_needrequest = bms_add_member(node->as_needrequest,
    1142             :                                           areq->request_index);
    1143             : }
    1144             : 
    1145             : /* ----------------------------------------------------------------
    1146             :  *      classify_matching_subplans
    1147             :  *
    1148             :  *      Classify the node's as_valid_subplans into sync ones and
    1149             :  *      async ones, adjust it to contain sync ones only, and save
    1150             :  *      async ones in the node's as_valid_asyncplans.
    1151             :  * ----------------------------------------------------------------
    1152             :  */
    1153             : static void
    1154          70 : classify_matching_subplans(AppendState *node)
    1155             : {
    1156             :     Bitmapset  *valid_asyncplans;
    1157             : 
    1158             :     Assert(node->as_valid_asyncplans == NULL);
    1159             : 
    1160             :     /* Nothing to do if there are no valid subplans. */
    1161          70 :     if (bms_is_empty(node->as_valid_subplans))
    1162             :     {
    1163           0 :         node->as_syncdone = true;
    1164           0 :         node->as_nasyncremain = 0;
    1165           0 :         return;
    1166             :     }
    1167             : 
    1168             :     /* Nothing to do if there are no valid async subplans. */
    1169          70 :     if (!bms_overlap(node->as_valid_subplans, node->as_asyncplans))
    1170             :     {
    1171           0 :         node->as_nasyncremain = 0;
    1172           0 :         return;
    1173             :     }
    1174             : 
    1175             :     /* Get valid async subplans. */
    1176          70 :     valid_asyncplans = bms_copy(node->as_asyncplans);
    1177          70 :     valid_asyncplans = bms_int_members(valid_asyncplans,
    1178          70 :                                        node->as_valid_subplans);
    1179             : 
    1180             :     /* Adjust the valid subplans to contain sync subplans only. */
    1181          70 :     node->as_valid_subplans = bms_del_members(node->as_valid_subplans,
    1182             :                                               valid_asyncplans);
    1183             : 
    1184             :     /* Save valid async subplans. */
    1185          70 :     node->as_valid_asyncplans = valid_asyncplans;
    1186             : }

Generated by: LCOV version 1.14