LCOV - code coverage report
Current view: top level - src/backend/storage/lmgr - proc.c (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 529 572 92.5 %
Date: 2025-11-26 20:18:15 Functions: 28 28 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * proc.c
       4             :  *    routines to manage per-process shared memory data structure
       5             :  *
       6             :  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/storage/lmgr/proc.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : /*
      16             :  * Interface (a):
      17             :  *      JoinWaitQueue(), ProcSleep(), ProcWakeup()
      18             :  *
      19             :  * Waiting for a lock causes the backend to be put to sleep.  Whoever releases
      20             :  * the lock wakes the process up again (and gives it an error code so it knows
      21             :  * whether it was awoken on an error condition).
      22             :  *
      23             :  * Interface (b):
      24             :  *
      25             :  * ProcReleaseLocks -- frees the locks associated with current transaction
      26             :  *
      27             :  * ProcKill -- destroys the shared memory state (and locks)
      28             :  * associated with the process.
      29             :  */
      30             : #include "postgres.h"
      31             : 
      32             : #include <signal.h>
      33             : #include <unistd.h>
      34             : #include <sys/time.h>
      35             : 
      36             : #include "access/transam.h"
      37             : #include "access/twophase.h"
      38             : #include "access/xlogutils.h"
      39             : #include "access/xlogwait.h"
      40             : #include "miscadmin.h"
      41             : #include "pgstat.h"
      42             : #include "postmaster/autovacuum.h"
      43             : #include "replication/slotsync.h"
      44             : #include "replication/syncrep.h"
      45             : #include "storage/condition_variable.h"
      46             : #include "storage/ipc.h"
      47             : #include "storage/lmgr.h"
      48             : #include "storage/pmsignal.h"
      49             : #include "storage/proc.h"
      50             : #include "storage/procarray.h"
      51             : #include "storage/procsignal.h"
      52             : #include "storage/spin.h"
      53             : #include "storage/standby.h"
      54             : #include "utils/timeout.h"
      55             : #include "utils/timestamp.h"
      56             : 
      57             : /* GUC variables */
      58             : int         DeadlockTimeout = 1000;
      59             : int         StatementTimeout = 0;
      60             : int         LockTimeout = 0;
      61             : int         IdleInTransactionSessionTimeout = 0;
      62             : int         TransactionTimeout = 0;
      63             : int         IdleSessionTimeout = 0;
      64             : bool        log_lock_waits = true;
      65             : 
      66             : /* Pointer to this process's PGPROC struct, if any */
      67             : PGPROC     *MyProc = NULL;
      68             : 
      69             : /*
      70             :  * This spinlock protects the freelist of recycled PGPROC structures.
      71             :  * We cannot use an LWLock because the LWLock manager depends on already
      72             :  * having a PGPROC and a wait semaphore!  But these structures are touched
      73             :  * relatively infrequently (only at backend startup or shutdown) and not for
      74             :  * very long, so a spinlock is okay.
      75             :  */
      76             : NON_EXEC_STATIC slock_t *ProcStructLock = NULL;
      77             : 
      78             : /* Pointers to shared-memory structures */
      79             : PROC_HDR   *ProcGlobal = NULL;
      80             : NON_EXEC_STATIC PGPROC *AuxiliaryProcs = NULL;
      81             : PGPROC     *PreparedXactProcs = NULL;
      82             : 
      83             : static DeadLockState deadlock_state = DS_NOT_YET_CHECKED;
      84             : 
      85             : /* Is a deadlock check pending? */
      86             : static volatile sig_atomic_t got_deadlock_timeout;
      87             : 
      88             : static void RemoveProcFromArray(int code, Datum arg);
      89             : static void ProcKill(int code, Datum arg);
      90             : static void AuxiliaryProcKill(int code, Datum arg);
      91             : static void CheckDeadLock(void);
      92             : 
      93             : 
      94             : /*
      95             :  * Report shared-memory space needed by PGPROC.
      96             :  */
      97             : static Size
      98        6312 : PGProcShmemSize(void)
      99             : {
     100        6312 :     Size        size = 0;
     101             :     Size        TotalProcs =
     102        6312 :         add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
     103             : 
     104        6312 :     size = add_size(size, mul_size(TotalProcs, sizeof(PGPROC)));
     105        6312 :     size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->xids)));
     106        6312 :     size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->subxidStates)));
     107        6312 :     size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->statusFlags)));
     108             : 
     109        6312 :     return size;
     110             : }
     111             : 
     112             : /*
     113             :  * Report shared-memory space needed by Fast-Path locks.
     114             :  */
     115             : static Size
     116        6312 : FastPathLockShmemSize(void)
     117             : {
     118        6312 :     Size        size = 0;
     119             :     Size        TotalProcs =
     120        6312 :         add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
     121             :     Size        fpLockBitsSize,
     122             :                 fpRelIdSize;
     123             : 
     124             :     /*
     125             :      * Memory needed for PGPROC fast-path lock arrays. Make sure the sizes are
     126             :      * nicely aligned in each backend.
     127             :      */
     128        6312 :     fpLockBitsSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(uint64));
     129        6312 :     fpRelIdSize = MAXALIGN(FastPathLockSlotsPerBackend() * sizeof(Oid));
     130             : 
     131        6312 :     size = add_size(size, mul_size(TotalProcs, (fpLockBitsSize + fpRelIdSize)));
     132             : 
     133        6312 :     return size;
     134             : }
     135             : 
     136             : /*
     137             :  * Report shared-memory space needed by InitProcGlobal.
     138             :  */
     139             : Size
     140        4108 : ProcGlobalShmemSize(void)
     141             : {
     142        4108 :     Size        size = 0;
     143             : 
     144             :     /* ProcGlobal */
     145        4108 :     size = add_size(size, sizeof(PROC_HDR));
     146        4108 :     size = add_size(size, sizeof(slock_t));
     147             : 
     148        4108 :     size = add_size(size, PGSemaphoreShmemSize(ProcGlobalSemas()));
     149        4108 :     size = add_size(size, PGProcShmemSize());
     150        4108 :     size = add_size(size, FastPathLockShmemSize());
     151             : 
     152        4108 :     return size;
     153             : }
     154             : 
     155             : /*
     156             :  * Report number of semaphores needed by InitProcGlobal.
     157             :  */
     158             : int
     159        8212 : ProcGlobalSemas(void)
     160             : {
     161             :     /*
     162             :      * We need a sema per backend (including autovacuum), plus one for each
     163             :      * auxiliary process.
     164             :      */
     165        8212 :     return MaxBackends + NUM_AUXILIARY_PROCS;
     166             : }
     167             : 
     168             : /*
     169             :  * InitProcGlobal -
     170             :  *    Initialize the global process table during postmaster or standalone
     171             :  *    backend startup.
     172             :  *
     173             :  *    We also create all the per-process semaphores we will need to support
     174             :  *    the requested number of backends.  We used to allocate semaphores
     175             :  *    only when backends were actually started up, but that is bad because
     176             :  *    it lets Postgres fail under load --- a lot of Unix systems are
     177             :  *    (mis)configured with small limits on the number of semaphores, and
     178             :  *    running out when trying to start another backend is a common failure.
     179             :  *    So, now we grab enough semaphores to support the desired max number
     180             :  *    of backends immediately at initialization --- if the sysadmin has set
     181             :  *    MaxConnections, max_worker_processes, max_wal_senders, or
     182             :  *    autovacuum_worker_slots higher than his kernel will support, he'll
     183             :  *    find out sooner rather than later.
     184             :  *
     185             :  *    Another reason for creating semaphores here is that the semaphore
     186             :  *    implementation typically requires us to create semaphores in the
     187             :  *    postmaster, not in backends.
     188             :  *
     189             :  * Note: this is NOT called by individual backends under a postmaster,
     190             :  * not even in the EXEC_BACKEND case.  The ProcGlobal and AuxiliaryProcs
     191             :  * pointers must be propagated specially for EXEC_BACKEND operation.
     192             :  */
     193             : void
     194        2204 : InitProcGlobal(void)
     195             : {
     196             :     PGPROC     *procs;
     197             :     int         i,
     198             :                 j;
     199             :     bool        found;
     200        2204 :     uint32      TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
     201             : 
     202             :     /* Used for setup of per-backend fast-path slots. */
     203             :     char       *fpPtr,
     204             :                *fpEndPtr PG_USED_FOR_ASSERTS_ONLY;
     205             :     Size        fpLockBitsSize,
     206             :                 fpRelIdSize;
     207             :     Size        requestSize;
     208             :     char       *ptr;
     209             : 
     210             :     /* Create the ProcGlobal shared structure */
     211        2204 :     ProcGlobal = (PROC_HDR *)
     212        2204 :         ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
     213             :     Assert(!found);
     214             : 
     215             :     /*
     216             :      * Initialize the data structures.
     217             :      */
     218        2204 :     ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
     219        2204 :     dlist_init(&ProcGlobal->freeProcs);
     220        2204 :     dlist_init(&ProcGlobal->autovacFreeProcs);
     221        2204 :     dlist_init(&ProcGlobal->bgworkerFreeProcs);
     222        2204 :     dlist_init(&ProcGlobal->walsenderFreeProcs);
     223        2204 :     ProcGlobal->startupBufferPinWaitBufId = -1;
     224        2204 :     ProcGlobal->walwriterProc = INVALID_PROC_NUMBER;
     225        2204 :     ProcGlobal->checkpointerProc = INVALID_PROC_NUMBER;
     226        2204 :     pg_atomic_init_u32(&ProcGlobal->procArrayGroupFirst, INVALID_PROC_NUMBER);
     227        2204 :     pg_atomic_init_u32(&ProcGlobal->clogGroupFirst, INVALID_PROC_NUMBER);
     228             : 
     229             :     /*
     230             :      * Create and initialize all the PGPROC structures we'll need.  There are
     231             :      * six separate consumers: (1) normal backends, (2) autovacuum workers and
     232             :      * special workers, (3) background workers, (4) walsenders, (5) auxiliary
     233             :      * processes, and (6) prepared transactions.  (For largely-historical
     234             :      * reasons, we combine autovacuum and special workers into one category
     235             :      * with a single freelist.)  Each PGPROC structure is dedicated to exactly
     236             :      * one of these purposes, and they do not move between groups.
     237             :      */
     238        2204 :     requestSize = PGProcShmemSize();
     239             : 
     240        2204 :     ptr = ShmemInitStruct("PGPROC structures",
     241             :                           requestSize,
     242             :                           &found);
     243             : 
     244        2204 :     MemSet(ptr, 0, requestSize);
     245             : 
     246        2204 :     procs = (PGPROC *) ptr;
     247        2204 :     ptr = (char *) ptr + TotalProcs * sizeof(PGPROC);
     248             : 
     249        2204 :     ProcGlobal->allProcs = procs;
     250             :     /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
     251        2204 :     ProcGlobal->allProcCount = MaxBackends + NUM_AUXILIARY_PROCS;
     252             : 
     253             :     /*
     254             :      * Allocate arrays mirroring PGPROC fields in a dense manner. See
     255             :      * PROC_HDR.
     256             :      *
     257             :      * XXX: It might make sense to increase padding for these arrays, given
     258             :      * how hotly they are accessed.
     259             :      */
     260        2204 :     ProcGlobal->xids = (TransactionId *) ptr;
     261        2204 :     ptr = (char *) ptr + (TotalProcs * sizeof(*ProcGlobal->xids));
     262             : 
     263        2204 :     ProcGlobal->subxidStates = (XidCacheStatus *) ptr;
     264        2204 :     ptr = (char *) ptr + (TotalProcs * sizeof(*ProcGlobal->subxidStates));
     265             : 
     266        2204 :     ProcGlobal->statusFlags = (uint8 *) ptr;
     267        2204 :     ptr = (char *) ptr + (TotalProcs * sizeof(*ProcGlobal->statusFlags));
     268             : 
     269             :     /* make sure wer didn't overflow */
     270             :     Assert((ptr > (char *) procs) && (ptr <= (char *) procs + requestSize));
     271             : 
     272             :     /*
     273             :      * Allocate arrays for fast-path locks. Those are variable-length, so
     274             :      * can't be included in PGPROC directly. We allocate a separate piece of
     275             :      * shared memory and then divide that between backends.
     276             :      */
     277        2204 :     fpLockBitsSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(uint64));
     278        2204 :     fpRelIdSize = MAXALIGN(FastPathLockSlotsPerBackend() * sizeof(Oid));
     279             : 
     280        2204 :     requestSize = FastPathLockShmemSize();
     281             : 
     282        2204 :     fpPtr = ShmemInitStruct("Fast-Path Lock Array",
     283             :                             requestSize,
     284             :                             &found);
     285             : 
     286        2204 :     MemSet(fpPtr, 0, requestSize);
     287             : 
     288             :     /* For asserts checking we did not overflow. */
     289        2204 :     fpEndPtr = fpPtr + requestSize;
     290             : 
     291             :     /* Reserve space for semaphores. */
     292        2204 :     PGReserveSemaphores(ProcGlobalSemas());
     293             : 
     294      291134 :     for (i = 0; i < TotalProcs; i++)
     295             :     {
     296      288930 :         PGPROC     *proc = &procs[i];
     297             : 
     298             :         /* Common initialization for all PGPROCs, regardless of type. */
     299             : 
     300             :         /*
     301             :          * Set the fast-path lock arrays, and move the pointer. We interleave
     302             :          * the two arrays, to (hopefully) get some locality for each backend.
     303             :          */
     304      288930 :         proc->fpLockBits = (uint64 *) fpPtr;
     305      288930 :         fpPtr += fpLockBitsSize;
     306             : 
     307      288930 :         proc->fpRelId = (Oid *) fpPtr;
     308      288930 :         fpPtr += fpRelIdSize;
     309             : 
     310             :         Assert(fpPtr <= fpEndPtr);
     311             : 
     312             :         /*
     313             :          * Set up per-PGPROC semaphore, latch, and fpInfoLock.  Prepared xact
     314             :          * dummy PGPROCs don't need these though - they're never associated
     315             :          * with a real process
     316             :          */
     317      288930 :         if (i < MaxBackends + NUM_AUXILIARY_PROCS)
     318             :         {
     319      287222 :             proc->sem = PGSemaphoreCreate();
     320      287222 :             InitSharedLatch(&(proc->procLatch));
     321      287222 :             LWLockInitialize(&(proc->fpInfoLock), LWTRANCHE_LOCK_FASTPATH);
     322             :         }
     323             : 
     324             :         /*
     325             :          * Newly created PGPROCs for normal backends, autovacuum workers,
     326             :          * special workers, bgworkers, and walsenders must be queued up on the
     327             :          * appropriate free list.  Because there can only ever be a small,
     328             :          * fixed number of auxiliary processes, no free list is used in that
     329             :          * case; InitAuxiliaryProcess() instead uses a linear search.  PGPROCs
     330             :          * for prepared transactions are added to a free list by
     331             :          * TwoPhaseShmemInit().
     332             :          */
     333      288930 :         if (i < MaxConnections)
     334             :         {
     335             :             /* PGPROC for normal backend, add to freeProcs list */
     336      142882 :             dlist_push_tail(&ProcGlobal->freeProcs, &proc->links);
     337      142882 :             proc->procgloballist = &ProcGlobal->freeProcs;
     338             :         }
     339      146048 :         else if (i < MaxConnections + autovacuum_worker_slots + NUM_SPECIAL_WORKER_PROCS)
     340             :         {
     341             :             /* PGPROC for AV or special worker, add to autovacFreeProcs list */
     342       28544 :             dlist_push_tail(&ProcGlobal->autovacFreeProcs, &proc->links);
     343       28544 :             proc->procgloballist = &ProcGlobal->autovacFreeProcs;
     344             :         }
     345      117504 :         else if (i < MaxConnections + autovacuum_worker_slots + NUM_SPECIAL_WORKER_PROCS + max_worker_processes)
     346             :         {
     347             :             /* PGPROC for bgworker, add to bgworkerFreeProcs list */
     348       17628 :             dlist_push_tail(&ProcGlobal->bgworkerFreeProcs, &proc->links);
     349       17628 :             proc->procgloballist = &ProcGlobal->bgworkerFreeProcs;
     350             :         }
     351       99876 :         else if (i < MaxBackends)
     352             :         {
     353             :             /* PGPROC for walsender, add to walsenderFreeProcs list */
     354       14416 :             dlist_push_tail(&ProcGlobal->walsenderFreeProcs, &proc->links);
     355       14416 :             proc->procgloballist = &ProcGlobal->walsenderFreeProcs;
     356             :         }
     357             : 
     358             :         /* Initialize myProcLocks[] shared memory queues. */
     359     4911810 :         for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
     360     4622880 :             dlist_init(&(proc->myProcLocks[j]));
     361             : 
     362             :         /* Initialize lockGroupMembers list. */
     363      288930 :         dlist_init(&proc->lockGroupMembers);
     364             : 
     365             :         /*
     366             :          * Initialize the atomic variables, otherwise, it won't be safe to
     367             :          * access them for backends that aren't currently in use.
     368             :          */
     369      288930 :         pg_atomic_init_u32(&(proc->procArrayGroupNext), INVALID_PROC_NUMBER);
     370      288930 :         pg_atomic_init_u32(&(proc->clogGroupNext), INVALID_PROC_NUMBER);
     371      288930 :         pg_atomic_init_u64(&(proc->waitStart), 0);
     372             :     }
     373             : 
     374             :     /* Should have consumed exactly the expected amount of fast-path memory. */
     375             :     Assert(fpPtr == fpEndPtr);
     376             : 
     377             :     /*
     378             :      * Save pointers to the blocks of PGPROC structures reserved for auxiliary
     379             :      * processes and prepared transactions.
     380             :      */
     381        2204 :     AuxiliaryProcs = &procs[MaxBackends];
     382        2204 :     PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
     383             : 
     384             :     /* Create ProcStructLock spinlock, too */
     385        2204 :     ProcStructLock = (slock_t *) ShmemInitStruct("ProcStructLock spinlock",
     386             :                                                  sizeof(slock_t),
     387             :                                                  &found);
     388        2204 :     SpinLockInit(ProcStructLock);
     389        2204 : }
     390             : 
     391             : /*
     392             :  * InitProcess -- initialize a per-process PGPROC entry for this backend
     393             :  */
     394             : void
     395       36416 : InitProcess(void)
     396             : {
     397             :     dlist_head *procgloballist;
     398             : 
     399             :     /*
     400             :      * ProcGlobal should be set up already (if we are a backend, we inherit
     401             :      * this by fork() or EXEC_BACKEND mechanism from the postmaster).
     402             :      */
     403       36416 :     if (ProcGlobal == NULL)
     404           0 :         elog(PANIC, "proc header uninitialized");
     405             : 
     406       36416 :     if (MyProc != NULL)
     407           0 :         elog(ERROR, "you already exist");
     408             : 
     409             :     /*
     410             :      * Before we start accessing the shared memory in a serious way, mark
     411             :      * ourselves as an active postmaster child; this is so that the postmaster
     412             :      * can detect it if we exit without cleaning up.
     413             :      */
     414       36416 :     if (IsUnderPostmaster)
     415       36178 :         RegisterPostmasterChildActive();
     416             : 
     417             :     /*
     418             :      * Decide which list should supply our PGPROC.  This logic must match the
     419             :      * way the freelists were constructed in InitProcGlobal().
     420             :      */
     421       36416 :     if (AmAutoVacuumWorkerProcess() || AmSpecialWorkerProcess())
     422        4986 :         procgloballist = &ProcGlobal->autovacFreeProcs;
     423       31430 :     else if (AmBackgroundWorkerProcess())
     424        4772 :         procgloballist = &ProcGlobal->bgworkerFreeProcs;
     425       26658 :     else if (AmWalSenderProcess())
     426        2358 :         procgloballist = &ProcGlobal->walsenderFreeProcs;
     427             :     else
     428       24300 :         procgloballist = &ProcGlobal->freeProcs;
     429             : 
     430             :     /*
     431             :      * Try to get a proc struct from the appropriate free list.  If this
     432             :      * fails, we must be out of PGPROC structures (not to mention semaphores).
     433             :      *
     434             :      * While we are holding the ProcStructLock, also copy the current shared
     435             :      * estimate of spins_per_delay to local storage.
     436             :      */
     437       36416 :     SpinLockAcquire(ProcStructLock);
     438             : 
     439       36416 :     set_spins_per_delay(ProcGlobal->spins_per_delay);
     440             : 
     441       36416 :     if (!dlist_is_empty(procgloballist))
     442             :     {
     443       36410 :         MyProc = dlist_container(PGPROC, links, dlist_pop_head_node(procgloballist));
     444       36410 :         SpinLockRelease(ProcStructLock);
     445             :     }
     446             :     else
     447             :     {
     448             :         /*
     449             :          * If we reach here, all the PGPROCs are in use.  This is one of the
     450             :          * possible places to detect "too many backends", so give the standard
     451             :          * error message.  XXX do we need to give a different failure message
     452             :          * in the autovacuum case?
     453             :          */
     454           6 :         SpinLockRelease(ProcStructLock);
     455           6 :         if (AmWalSenderProcess())
     456           4 :             ereport(FATAL,
     457             :                     (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
     458             :                      errmsg("number of requested standby connections exceeds \"max_wal_senders\" (currently %d)",
     459             :                             max_wal_senders)));
     460           2 :         ereport(FATAL,
     461             :                 (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
     462             :                  errmsg("sorry, too many clients already")));
     463             :     }
     464       36410 :     MyProcNumber = GetNumberFromPGProc(MyProc);
     465             : 
     466             :     /*
     467             :      * Cross-check that the PGPROC is of the type we expect; if this were not
     468             :      * the case, it would get returned to the wrong list.
     469             :      */
     470             :     Assert(MyProc->procgloballist == procgloballist);
     471             : 
     472             :     /*
     473             :      * Initialize all fields of MyProc, except for those previously
     474             :      * initialized by InitProcGlobal.
     475             :      */
     476       36410 :     dlist_node_init(&MyProc->links);
     477       36410 :     MyProc->waitStatus = PROC_WAIT_STATUS_OK;
     478       36410 :     MyProc->fpVXIDLock = false;
     479       36410 :     MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
     480       36410 :     MyProc->xid = InvalidTransactionId;
     481       36410 :     MyProc->xmin = InvalidTransactionId;
     482       36410 :     MyProc->pid = MyProcPid;
     483       36410 :     MyProc->vxid.procNumber = MyProcNumber;
     484       36410 :     MyProc->vxid.lxid = InvalidLocalTransactionId;
     485             :     /* databaseId and roleId will be filled in later */
     486       36410 :     MyProc->databaseId = InvalidOid;
     487       36410 :     MyProc->roleId = InvalidOid;
     488       36410 :     MyProc->tempNamespaceId = InvalidOid;
     489       36410 :     MyProc->isRegularBackend = AmRegularBackendProcess();
     490       36410 :     MyProc->delayChkptFlags = 0;
     491       36410 :     MyProc->statusFlags = 0;
     492             :     /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
     493       36410 :     if (AmAutoVacuumWorkerProcess())
     494        4172 :         MyProc->statusFlags |= PROC_IS_AUTOVACUUM;
     495       36410 :     MyProc->lwWaiting = LW_WS_NOT_WAITING;
     496       36410 :     MyProc->lwWaitMode = 0;
     497       36410 :     MyProc->waitLock = NULL;
     498       36410 :     MyProc->waitProcLock = NULL;
     499       36410 :     pg_atomic_write_u64(&MyProc->waitStart, 0);
     500             : #ifdef USE_ASSERT_CHECKING
     501             :     {
     502             :         int         i;
     503             : 
     504             :         /* Last process should have released all locks. */
     505             :         for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
     506             :             Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
     507             :     }
     508             : #endif
     509       36410 :     MyProc->recoveryConflictPending = false;
     510             : 
     511             :     /* Initialize fields for sync rep */
     512       36410 :     MyProc->waitLSN = 0;
     513       36410 :     MyProc->syncRepState = SYNC_REP_NOT_WAITING;
     514       36410 :     dlist_node_init(&MyProc->syncRepLinks);
     515             : 
     516             :     /* Initialize fields for group XID clearing. */
     517       36410 :     MyProc->procArrayGroupMember = false;
     518       36410 :     MyProc->procArrayGroupMemberXid = InvalidTransactionId;
     519             :     Assert(pg_atomic_read_u32(&MyProc->procArrayGroupNext) == INVALID_PROC_NUMBER);
     520             : 
     521             :     /* Check that group locking fields are in a proper initial state. */
     522             :     Assert(MyProc->lockGroupLeader == NULL);
     523             :     Assert(dlist_is_empty(&MyProc->lockGroupMembers));
     524             : 
     525             :     /* Initialize wait event information. */
     526       36410 :     MyProc->wait_event_info = 0;
     527             : 
     528             :     /* Initialize fields for group transaction status update. */
     529       36410 :     MyProc->clogGroupMember = false;
     530       36410 :     MyProc->clogGroupMemberXid = InvalidTransactionId;
     531       36410 :     MyProc->clogGroupMemberXidStatus = TRANSACTION_STATUS_IN_PROGRESS;
     532       36410 :     MyProc->clogGroupMemberPage = -1;
     533       36410 :     MyProc->clogGroupMemberLsn = InvalidXLogRecPtr;
     534             :     Assert(pg_atomic_read_u32(&MyProc->clogGroupNext) == INVALID_PROC_NUMBER);
     535             : 
     536             :     /*
     537             :      * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
     538             :      * on it.  That allows us to repoint the process latch, which so far
     539             :      * points to process local one, to the shared one.
     540             :      */
     541       36410 :     OwnLatch(&MyProc->procLatch);
     542       36410 :     SwitchToSharedLatch();
     543             : 
     544             :     /* now that we have a proc, report wait events to shared memory */
     545       36410 :     pgstat_set_wait_event_storage(&MyProc->wait_event_info);
     546             : 
     547             :     /*
     548             :      * We might be reusing a semaphore that belonged to a failed process. So
     549             :      * be careful and reinitialize its value here.  (This is not strictly
     550             :      * necessary anymore, but seems like a good idea for cleanliness.)
     551             :      */
     552       36410 :     PGSemaphoreReset(MyProc->sem);
     553             : 
     554             :     /*
     555             :      * Arrange to clean up at backend exit.
     556             :      */
     557       36410 :     on_shmem_exit(ProcKill, 0);
     558             : 
     559             :     /*
     560             :      * Now that we have a PGPROC, we could try to acquire locks, so initialize
     561             :      * local state needed for LWLocks, and the deadlock checker.
     562             :      */
     563       36410 :     InitLWLockAccess();
     564       36410 :     InitDeadLockChecking();
     565             : 
     566             : #ifdef EXEC_BACKEND
     567             : 
     568             :     /*
     569             :      * Initialize backend-local pointers to all the shared data structures.
     570             :      * (We couldn't do this until now because it needs LWLocks.)
     571             :      */
     572             :     if (IsUnderPostmaster)
     573             :         AttachSharedMemoryStructs();
     574             : #endif
     575       36410 : }
     576             : 
     577             : /*
     578             :  * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
     579             :  *
     580             :  * This is separate from InitProcess because we can't acquire LWLocks until
     581             :  * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
     582             :  * work until after we've done AttachSharedMemoryStructs.
     583             :  */
     584             : void
     585       36392 : InitProcessPhase2(void)
     586             : {
     587             :     Assert(MyProc != NULL);
     588             : 
     589             :     /*
     590             :      * Add our PGPROC to the PGPROC array in shared memory.
     591             :      */
     592       36392 :     ProcArrayAdd(MyProc);
     593             : 
     594             :     /*
     595             :      * Arrange to clean that up at backend exit.
     596             :      */
     597       36392 :     on_shmem_exit(RemoveProcFromArray, 0);
     598       36392 : }
     599             : 
     600             : /*
     601             :  * InitAuxiliaryProcess -- create a PGPROC entry for an auxiliary process
     602             :  *
     603             :  * This is called by bgwriter and similar processes so that they will have a
     604             :  * MyProc value that's real enough to let them wait for LWLocks.  The PGPROC
     605             :  * and sema that are assigned are one of the extra ones created during
     606             :  * InitProcGlobal.
     607             :  *
     608             :  * Auxiliary processes are presently not expected to wait for real (lockmgr)
     609             :  * locks, so we need not set up the deadlock checker.  They are never added
     610             :  * to the ProcArray or the sinval messaging mechanism, either.  They also
     611             :  * don't get a VXID assigned, since this is only useful when we actually
     612             :  * hold lockmgr locks.
     613             :  *
     614             :  * Startup process however uses locks but never waits for them in the
     615             :  * normal backend sense. Startup process also takes part in sinval messaging
     616             :  * as a sendOnly process, so never reads messages from sinval queue. So
     617             :  * Startup process does have a VXID and does show up in pg_locks.
     618             :  */
     619             : void
     620        8692 : InitAuxiliaryProcess(void)
     621             : {
     622             :     PGPROC     *auxproc;
     623             :     int         proctype;
     624             : 
     625             :     /*
     626             :      * ProcGlobal should be set up already (if we are a backend, we inherit
     627             :      * this by fork() or EXEC_BACKEND mechanism from the postmaster).
     628             :      */
     629        8692 :     if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
     630           0 :         elog(PANIC, "proc header uninitialized");
     631             : 
     632        8692 :     if (MyProc != NULL)
     633           0 :         elog(ERROR, "you already exist");
     634             : 
     635        8692 :     if (IsUnderPostmaster)
     636        8692 :         RegisterPostmasterChildActive();
     637             : 
     638             :     /*
     639             :      * We use the ProcStructLock to protect assignment and releasing of
     640             :      * AuxiliaryProcs entries.
     641             :      *
     642             :      * While we are holding the ProcStructLock, also copy the current shared
     643             :      * estimate of spins_per_delay to local storage.
     644             :      */
     645        8692 :     SpinLockAcquire(ProcStructLock);
     646             : 
     647        8692 :     set_spins_per_delay(ProcGlobal->spins_per_delay);
     648             : 
     649             :     /*
     650             :      * Find a free auxproc ... *big* trouble if there isn't one ...
     651             :      */
     652       34128 :     for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
     653             :     {
     654       34128 :         auxproc = &AuxiliaryProcs[proctype];
     655       34128 :         if (auxproc->pid == 0)
     656        8692 :             break;
     657             :     }
     658        8692 :     if (proctype >= NUM_AUXILIARY_PROCS)
     659             :     {
     660           0 :         SpinLockRelease(ProcStructLock);
     661           0 :         elog(FATAL, "all AuxiliaryProcs are in use");
     662             :     }
     663             : 
     664             :     /* Mark auxiliary proc as in use by me */
     665             :     /* use volatile pointer to prevent code rearrangement */
     666        8692 :     ((volatile PGPROC *) auxproc)->pid = MyProcPid;
     667             : 
     668        8692 :     SpinLockRelease(ProcStructLock);
     669             : 
     670        8692 :     MyProc = auxproc;
     671        8692 :     MyProcNumber = GetNumberFromPGProc(MyProc);
     672             : 
     673             :     /*
     674             :      * Initialize all fields of MyProc, except for those previously
     675             :      * initialized by InitProcGlobal.
     676             :      */
     677        8692 :     dlist_node_init(&MyProc->links);
     678        8692 :     MyProc->waitStatus = PROC_WAIT_STATUS_OK;
     679        8692 :     MyProc->fpVXIDLock = false;
     680        8692 :     MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
     681        8692 :     MyProc->xid = InvalidTransactionId;
     682        8692 :     MyProc->xmin = InvalidTransactionId;
     683        8692 :     MyProc->vxid.procNumber = INVALID_PROC_NUMBER;
     684        8692 :     MyProc->vxid.lxid = InvalidLocalTransactionId;
     685        8692 :     MyProc->databaseId = InvalidOid;
     686        8692 :     MyProc->roleId = InvalidOid;
     687        8692 :     MyProc->tempNamespaceId = InvalidOid;
     688        8692 :     MyProc->isRegularBackend = false;
     689        8692 :     MyProc->delayChkptFlags = 0;
     690        8692 :     MyProc->statusFlags = 0;
     691        8692 :     MyProc->lwWaiting = LW_WS_NOT_WAITING;
     692        8692 :     MyProc->lwWaitMode = 0;
     693        8692 :     MyProc->waitLock = NULL;
     694        8692 :     MyProc->waitProcLock = NULL;
     695        8692 :     pg_atomic_write_u64(&MyProc->waitStart, 0);
     696             : #ifdef USE_ASSERT_CHECKING
     697             :     {
     698             :         int         i;
     699             : 
     700             :         /* Last process should have released all locks. */
     701             :         for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
     702             :             Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
     703             :     }
     704             : #endif
     705             : 
     706             :     /*
     707             :      * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
     708             :      * on it.  That allows us to repoint the process latch, which so far
     709             :      * points to process local one, to the shared one.
     710             :      */
     711        8692 :     OwnLatch(&MyProc->procLatch);
     712        8692 :     SwitchToSharedLatch();
     713             : 
     714             :     /* now that we have a proc, report wait events to shared memory */
     715        8692 :     pgstat_set_wait_event_storage(&MyProc->wait_event_info);
     716             : 
     717             :     /* Check that group locking fields are in a proper initial state. */
     718             :     Assert(MyProc->lockGroupLeader == NULL);
     719             :     Assert(dlist_is_empty(&MyProc->lockGroupMembers));
     720             : 
     721             :     /*
     722             :      * We might be reusing a semaphore that belonged to a failed process. So
     723             :      * be careful and reinitialize its value here.  (This is not strictly
     724             :      * necessary anymore, but seems like a good idea for cleanliness.)
     725             :      */
     726        8692 :     PGSemaphoreReset(MyProc->sem);
     727             : 
     728             :     /*
     729             :      * Arrange to clean up at process exit.
     730             :      */
     731        8692 :     on_shmem_exit(AuxiliaryProcKill, Int32GetDatum(proctype));
     732             : 
     733             :     /*
     734             :      * Now that we have a PGPROC, we could try to acquire lightweight locks.
     735             :      * Initialize local state needed for them.  (Heavyweight locks cannot be
     736             :      * acquired in aux processes.)
     737             :      */
     738        8692 :     InitLWLockAccess();
     739             : 
     740             : #ifdef EXEC_BACKEND
     741             : 
     742             :     /*
     743             :      * Initialize backend-local pointers to all the shared data structures.
     744             :      * (We couldn't do this until now because it needs LWLocks.)
     745             :      */
     746             :     if (IsUnderPostmaster)
     747             :         AttachSharedMemoryStructs();
     748             : #endif
     749        8692 : }
     750             : 
     751             : /*
     752             :  * Used from bufmgr to share the value of the buffer that Startup waits on,
     753             :  * or to reset the value to "not waiting" (-1). This allows processing
     754             :  * of recovery conflicts for buffer pins. Set is made before backends look
     755             :  * at this value, so locking not required, especially since the set is
     756             :  * an atomic integer set operation.
     757             :  */
     758             : void
     759          40 : SetStartupBufferPinWaitBufId(int bufid)
     760             : {
     761             :     /* use volatile pointer to prevent code rearrangement */
     762          40 :     volatile PROC_HDR *procglobal = ProcGlobal;
     763             : 
     764          40 :     procglobal->startupBufferPinWaitBufId = bufid;
     765          40 : }
     766             : 
     767             : /*
     768             :  * Used by backends when they receive a request to check for buffer pin waits.
     769             :  */
     770             : int
     771           6 : GetStartupBufferPinWaitBufId(void)
     772             : {
     773             :     /* use volatile pointer to prevent code rearrangement */
     774           6 :     volatile PROC_HDR *procglobal = ProcGlobal;
     775             : 
     776           6 :     return procglobal->startupBufferPinWaitBufId;
     777             : }
     778             : 
     779             : /*
     780             :  * Check whether there are at least N free PGPROC objects.  If false is
     781             :  * returned, *nfree will be set to the number of free PGPROC objects.
     782             :  * Otherwise, *nfree will be set to n.
     783             :  *
     784             :  * Note: this is designed on the assumption that N will generally be small.
     785             :  */
     786             : bool
     787         488 : HaveNFreeProcs(int n, int *nfree)
     788             : {
     789             :     dlist_iter  iter;
     790             : 
     791             :     Assert(n > 0);
     792             :     Assert(nfree);
     793             : 
     794         488 :     SpinLockAcquire(ProcStructLock);
     795             : 
     796         488 :     *nfree = 0;
     797        1458 :     dlist_foreach(iter, &ProcGlobal->freeProcs)
     798             :     {
     799        1450 :         (*nfree)++;
     800        1450 :         if (*nfree == n)
     801         480 :             break;
     802             :     }
     803             : 
     804         488 :     SpinLockRelease(ProcStructLock);
     805             : 
     806         488 :     return (*nfree == n);
     807             : }
     808             : 
     809             : /*
     810             :  * Cancel any pending wait for lock, when aborting a transaction, and revert
     811             :  * any strong lock count acquisition for a lock being acquired.
     812             :  *
     813             :  * (Normally, this would only happen if we accept a cancel/die
     814             :  * interrupt while waiting; but an ereport(ERROR) before or during the lock
     815             :  * wait is within the realm of possibility, too.)
     816             :  */
     817             : void
     818     1167382 : LockErrorCleanup(void)
     819             : {
     820             :     LOCALLOCK  *lockAwaited;
     821             :     LWLock     *partitionLock;
     822             :     DisableTimeoutParams timeouts[2];
     823             : 
     824     1167382 :     HOLD_INTERRUPTS();
     825             : 
     826     1167382 :     AbortStrongLockAcquire();
     827             : 
     828             :     /* Nothing to do if we weren't waiting for a lock */
     829     1167382 :     lockAwaited = GetAwaitedLock();
     830     1167382 :     if (lockAwaited == NULL)
     831             :     {
     832     1167300 :         RESUME_INTERRUPTS();
     833     1167300 :         return;
     834             :     }
     835             : 
     836             :     /*
     837             :      * Turn off the deadlock and lock timeout timers, if they are still
     838             :      * running (see ProcSleep).  Note we must preserve the LOCK_TIMEOUT
     839             :      * indicator flag, since this function is executed before
     840             :      * ProcessInterrupts when responding to SIGINT; else we'd lose the
     841             :      * knowledge that the SIGINT came from a lock timeout and not an external
     842             :      * source.
     843             :      */
     844          82 :     timeouts[0].id = DEADLOCK_TIMEOUT;
     845          82 :     timeouts[0].keep_indicator = false;
     846          82 :     timeouts[1].id = LOCK_TIMEOUT;
     847          82 :     timeouts[1].keep_indicator = true;
     848          82 :     disable_timeouts(timeouts, 2);
     849             : 
     850             :     /* Unlink myself from the wait queue, if on it (might not be anymore!) */
     851          82 :     partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
     852          82 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
     853             : 
     854          82 :     if (!dlist_node_is_detached(&MyProc->links))
     855             :     {
     856             :         /* We could not have been granted the lock yet */
     857          80 :         RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
     858             :     }
     859             :     else
     860             :     {
     861             :         /*
     862             :          * Somebody kicked us off the lock queue already.  Perhaps they
     863             :          * granted us the lock, or perhaps they detected a deadlock. If they
     864             :          * did grant us the lock, we'd better remember it in our local lock
     865             :          * table.
     866             :          */
     867           2 :         if (MyProc->waitStatus == PROC_WAIT_STATUS_OK)
     868           2 :             GrantAwaitedLock();
     869             :     }
     870             : 
     871          82 :     ResetAwaitedLock();
     872             : 
     873          82 :     LWLockRelease(partitionLock);
     874             : 
     875          82 :     RESUME_INTERRUPTS();
     876             : }
     877             : 
     878             : 
     879             : /*
     880             :  * ProcReleaseLocks() -- release locks associated with current transaction
     881             :  *          at main transaction commit or abort
     882             :  *
     883             :  * At main transaction commit, we release standard locks except session locks.
     884             :  * At main transaction abort, we release all locks including session locks.
     885             :  *
     886             :  * Advisory locks are released only if they are transaction-level;
     887             :  * session-level holds remain, whether this is a commit or not.
     888             :  *
     889             :  * At subtransaction commit, we don't release any locks (so this func is not
     890             :  * needed at all); we will defer the releasing to the parent transaction.
     891             :  * At subtransaction abort, we release all locks held by the subtransaction;
     892             :  * this is implemented by retail releasing of the locks under control of
     893             :  * the ResourceOwner mechanism.
     894             :  */
     895             : void
     896     1105678 : ProcReleaseLocks(bool isCommit)
     897             : {
     898     1105678 :     if (!MyProc)
     899           0 :         return;
     900             :     /* If waiting, get off wait queue (should only be needed after error) */
     901     1105678 :     LockErrorCleanup();
     902             :     /* Release standard locks, including session-level if aborting */
     903     1105678 :     LockReleaseAll(DEFAULT_LOCKMETHOD, !isCommit);
     904             :     /* Release transaction-level advisory locks */
     905     1105678 :     LockReleaseAll(USER_LOCKMETHOD, false);
     906             : }
     907             : 
     908             : 
     909             : /*
     910             :  * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
     911             :  */
     912             : static void
     913       36392 : RemoveProcFromArray(int code, Datum arg)
     914             : {
     915             :     Assert(MyProc != NULL);
     916       36392 :     ProcArrayRemove(MyProc, InvalidTransactionId);
     917       36392 : }
     918             : 
     919             : /*
     920             :  * ProcKill() -- Destroy the per-proc data structure for
     921             :  *      this process. Release any of its held LW locks.
     922             :  */
     923             : static void
     924       36410 : ProcKill(int code, Datum arg)
     925             : {
     926             :     PGPROC     *proc;
     927             :     dlist_head *procgloballist;
     928             : 
     929             :     Assert(MyProc != NULL);
     930             : 
     931             :     /* not safe if forked by system(), etc. */
     932       36410 :     if (MyProc->pid != (int) getpid())
     933           0 :         elog(PANIC, "ProcKill() called in child process");
     934             : 
     935             :     /* Make sure we're out of the sync rep lists */
     936       36410 :     SyncRepCleanupAtProcExit();
     937             : 
     938             : #ifdef USE_ASSERT_CHECKING
     939             :     {
     940             :         int         i;
     941             : 
     942             :         /* Last process should have released all locks. */
     943             :         for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
     944             :             Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
     945             :     }
     946             : #endif
     947             : 
     948             :     /*
     949             :      * Release any LW locks I am holding.  There really shouldn't be any, but
     950             :      * it's cheap to check again before we cut the knees off the LWLock
     951             :      * facility by releasing our PGPROC ...
     952             :      */
     953       36410 :     LWLockReleaseAll();
     954             : 
     955             :     /*
     956             :      * Cleanup waiting for LSN if any.
     957             :      */
     958       36410 :     WaitLSNCleanup();
     959             : 
     960             :     /* Cancel any pending condition variable sleep, too */
     961       36410 :     ConditionVariableCancelSleep();
     962             : 
     963             :     /*
     964             :      * Detach from any lock group of which we are a member.  If the leader
     965             :      * exits before all other group members, its PGPROC will remain allocated
     966             :      * until the last group process exits; that process must return the
     967             :      * leader's PGPROC to the appropriate list.
     968             :      */
     969       36410 :     if (MyProc->lockGroupLeader != NULL)
     970             :     {
     971        2892 :         PGPROC     *leader = MyProc->lockGroupLeader;
     972        2892 :         LWLock     *leader_lwlock = LockHashPartitionLockByProc(leader);
     973             : 
     974        2892 :         LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
     975             :         Assert(!dlist_is_empty(&leader->lockGroupMembers));
     976        2892 :         dlist_delete(&MyProc->lockGroupLink);
     977        2892 :         if (dlist_is_empty(&leader->lockGroupMembers))
     978             :         {
     979         144 :             leader->lockGroupLeader = NULL;
     980         144 :             if (leader != MyProc)
     981             :             {
     982           0 :                 procgloballist = leader->procgloballist;
     983             : 
     984             :                 /* Leader exited first; return its PGPROC. */
     985           0 :                 SpinLockAcquire(ProcStructLock);
     986           0 :                 dlist_push_head(procgloballist, &leader->links);
     987           0 :                 SpinLockRelease(ProcStructLock);
     988             :             }
     989             :         }
     990        2748 :         else if (leader != MyProc)
     991        2748 :             MyProc->lockGroupLeader = NULL;
     992        2892 :         LWLockRelease(leader_lwlock);
     993             :     }
     994             : 
     995             :     /*
     996             :      * Reset MyLatch to the process local one.  This is so that signal
     997             :      * handlers et al can continue using the latch after the shared latch
     998             :      * isn't ours anymore.
     999             :      *
    1000             :      * Similarly, stop reporting wait events to MyProc->wait_event_info.
    1001             :      *
    1002             :      * After that clear MyProc and disown the shared latch.
    1003             :      */
    1004       36410 :     SwitchBackToLocalLatch();
    1005       36410 :     pgstat_reset_wait_event_storage();
    1006             : 
    1007       36410 :     proc = MyProc;
    1008       36410 :     MyProc = NULL;
    1009       36410 :     MyProcNumber = INVALID_PROC_NUMBER;
    1010       36410 :     DisownLatch(&proc->procLatch);
    1011             : 
    1012             :     /* Mark the proc no longer in use */
    1013       36410 :     proc->pid = 0;
    1014       36410 :     proc->vxid.procNumber = INVALID_PROC_NUMBER;
    1015       36410 :     proc->vxid.lxid = InvalidTransactionId;
    1016             : 
    1017       36410 :     procgloballist = proc->procgloballist;
    1018       36410 :     SpinLockAcquire(ProcStructLock);
    1019             : 
    1020             :     /*
    1021             :      * If we're still a member of a locking group, that means we're a leader
    1022             :      * which has somehow exited before its children.  The last remaining child
    1023             :      * will release our PGPROC.  Otherwise, release it now.
    1024             :      */
    1025       36410 :     if (proc->lockGroupLeader == NULL)
    1026             :     {
    1027             :         /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
    1028             :         Assert(dlist_is_empty(&proc->lockGroupMembers));
    1029             : 
    1030             :         /* Return PGPROC structure (and semaphore) to appropriate freelist */
    1031       36410 :         dlist_push_tail(procgloballist, &proc->links);
    1032             :     }
    1033             : 
    1034             :     /* Update shared estimate of spins_per_delay */
    1035       36410 :     ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
    1036             : 
    1037       36410 :     SpinLockRelease(ProcStructLock);
    1038             : 
    1039             :     /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
    1040       36410 :     if (AutovacuumLauncherPid != 0)
    1041        4170 :         kill(AutovacuumLauncherPid, SIGUSR2);
    1042       36410 : }
    1043             : 
    1044             : /*
    1045             :  * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
    1046             :  *      processes (bgwriter, etc).  The PGPROC and sema are not released, only
    1047             :  *      marked as not-in-use.
    1048             :  */
    1049             : static void
    1050        8692 : AuxiliaryProcKill(int code, Datum arg)
    1051             : {
    1052        8692 :     int         proctype = DatumGetInt32(arg);
    1053             :     PGPROC     *auxproc PG_USED_FOR_ASSERTS_ONLY;
    1054             :     PGPROC     *proc;
    1055             : 
    1056             :     Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
    1057             : 
    1058             :     /* not safe if forked by system(), etc. */
    1059        8692 :     if (MyProc->pid != (int) getpid())
    1060           0 :         elog(PANIC, "AuxiliaryProcKill() called in child process");
    1061             : 
    1062        8692 :     auxproc = &AuxiliaryProcs[proctype];
    1063             : 
    1064             :     Assert(MyProc == auxproc);
    1065             : 
    1066             :     /* Release any LW locks I am holding (see notes above) */
    1067        8692 :     LWLockReleaseAll();
    1068             : 
    1069             :     /* Cancel any pending condition variable sleep, too */
    1070        8692 :     ConditionVariableCancelSleep();
    1071             : 
    1072             :     /* look at the equivalent ProcKill() code for comments */
    1073        8692 :     SwitchBackToLocalLatch();
    1074        8692 :     pgstat_reset_wait_event_storage();
    1075             : 
    1076        8692 :     proc = MyProc;
    1077        8692 :     MyProc = NULL;
    1078        8692 :     MyProcNumber = INVALID_PROC_NUMBER;
    1079        8692 :     DisownLatch(&proc->procLatch);
    1080             : 
    1081        8692 :     SpinLockAcquire(ProcStructLock);
    1082             : 
    1083             :     /* Mark auxiliary proc no longer in use */
    1084        8692 :     proc->pid = 0;
    1085        8692 :     proc->vxid.procNumber = INVALID_PROC_NUMBER;
    1086        8692 :     proc->vxid.lxid = InvalidTransactionId;
    1087             : 
    1088             :     /* Update shared estimate of spins_per_delay */
    1089        8692 :     ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
    1090             : 
    1091        8692 :     SpinLockRelease(ProcStructLock);
    1092        8692 : }
    1093             : 
    1094             : /*
    1095             :  * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
    1096             :  * given its PID
    1097             :  *
    1098             :  * Returns NULL if not found.
    1099             :  */
    1100             : PGPROC *
    1101       12554 : AuxiliaryPidGetProc(int pid)
    1102             : {
    1103       12554 :     PGPROC     *result = NULL;
    1104             :     int         index;
    1105             : 
    1106       12554 :     if (pid == 0)               /* never match dummy PGPROCs */
    1107           6 :         return NULL;
    1108             : 
    1109       60740 :     for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
    1110             :     {
    1111       60740 :         PGPROC     *proc = &AuxiliaryProcs[index];
    1112             : 
    1113       60740 :         if (proc->pid == pid)
    1114             :         {
    1115       12548 :             result = proc;
    1116       12548 :             break;
    1117             :         }
    1118             :     }
    1119       12548 :     return result;
    1120             : }
    1121             : 
    1122             : 
    1123             : /*
    1124             :  * JoinWaitQueue -- join the wait queue on the specified lock
    1125             :  *
    1126             :  * It's not actually guaranteed that we need to wait when this function is
    1127             :  * called, because it could be that when we try to find a position at which
    1128             :  * to insert ourself into the wait queue, we discover that we must be inserted
    1129             :  * ahead of everyone who wants a lock that conflict with ours. In that case,
    1130             :  * we get the lock immediately. Because of this, it's sensible for this function
    1131             :  * to have a dontWait argument, despite the name.
    1132             :  *
    1133             :  * On entry, the caller has already set up LOCK and PROCLOCK entries to
    1134             :  * reflect that we have "requested" the lock.  The caller is responsible for
    1135             :  * cleaning that up, if we end up not joining the queue after all.
    1136             :  *
    1137             :  * The lock table's partition lock must be held at entry, and is still held
    1138             :  * at exit.  The caller must release it before calling ProcSleep().
    1139             :  *
    1140             :  * Result is one of the following:
    1141             :  *
    1142             :  *  PROC_WAIT_STATUS_OK       - lock was immediately granted
    1143             :  *  PROC_WAIT_STATUS_WAITING  - joined the wait queue; call ProcSleep()
    1144             :  *  PROC_WAIT_STATUS_ERROR    - immediate deadlock was detected, or would
    1145             :  *                              need to wait and dontWait == true
    1146             :  *
    1147             :  * NOTES: The process queue is now a priority queue for locking.
    1148             :  */
    1149             : ProcWaitStatus
    1150        4124 : JoinWaitQueue(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
    1151             : {
    1152        4124 :     LOCKMODE    lockmode = locallock->tag.mode;
    1153        4124 :     LOCK       *lock = locallock->lock;
    1154        4124 :     PROCLOCK   *proclock = locallock->proclock;
    1155        4124 :     uint32      hashcode = locallock->hashcode;
    1156        4124 :     LWLock     *partitionLock PG_USED_FOR_ASSERTS_ONLY = LockHashPartitionLock(hashcode);
    1157        4124 :     dclist_head *waitQueue = &lock->waitProcs;
    1158        4124 :     PGPROC     *insert_before = NULL;
    1159             :     LOCKMASK    myProcHeldLocks;
    1160             :     LOCKMASK    myHeldLocks;
    1161        4124 :     bool        early_deadlock = false;
    1162        4124 :     PGPROC     *leader = MyProc->lockGroupLeader;
    1163             : 
    1164             :     Assert(LWLockHeldByMeInMode(partitionLock, LW_EXCLUSIVE));
    1165             : 
    1166             :     /*
    1167             :      * Set bitmask of locks this process already holds on this object.
    1168             :      */
    1169        4124 :     myHeldLocks = MyProc->heldLocks = proclock->holdMask;
    1170             : 
    1171             :     /*
    1172             :      * Determine which locks we're already holding.
    1173             :      *
    1174             :      * If group locking is in use, locks held by members of my locking group
    1175             :      * need to be included in myHeldLocks.  This is not required for relation
    1176             :      * extension lock which conflict among group members. However, including
    1177             :      * them in myHeldLocks will give group members the priority to get those
    1178             :      * locks as compared to other backends which are also trying to acquire
    1179             :      * those locks.  OTOH, we can avoid giving priority to group members for
    1180             :      * that kind of locks, but there doesn't appear to be a clear advantage of
    1181             :      * the same.
    1182             :      */
    1183        4124 :     myProcHeldLocks = proclock->holdMask;
    1184        4124 :     myHeldLocks = myProcHeldLocks;
    1185        4124 :     if (leader != NULL)
    1186             :     {
    1187             :         dlist_iter  iter;
    1188             : 
    1189          80 :         dlist_foreach(iter, &lock->procLocks)
    1190             :         {
    1191             :             PROCLOCK   *otherproclock;
    1192             : 
    1193          60 :             otherproclock = dlist_container(PROCLOCK, lockLink, iter.cur);
    1194             : 
    1195          60 :             if (otherproclock->groupLeader == leader)
    1196          28 :                 myHeldLocks |= otherproclock->holdMask;
    1197             :         }
    1198             :     }
    1199             : 
    1200             :     /*
    1201             :      * Determine where to add myself in the wait queue.
    1202             :      *
    1203             :      * Normally I should go at the end of the queue.  However, if I already
    1204             :      * hold locks that conflict with the request of any previous waiter, put
    1205             :      * myself in the queue just in front of the first such waiter. This is not
    1206             :      * a necessary step, since deadlock detection would move me to before that
    1207             :      * waiter anyway; but it's relatively cheap to detect such a conflict
    1208             :      * immediately, and avoid delaying till deadlock timeout.
    1209             :      *
    1210             :      * Special case: if I find I should go in front of some waiter, check to
    1211             :      * see if I conflict with already-held locks or the requests before that
    1212             :      * waiter.  If not, then just grant myself the requested lock immediately.
    1213             :      * This is the same as the test for immediate grant in LockAcquire, except
    1214             :      * we are only considering the part of the wait queue before my insertion
    1215             :      * point.
    1216             :      */
    1217        4124 :     if (myHeldLocks != 0 && !dclist_is_empty(waitQueue))
    1218             :     {
    1219          12 :         LOCKMASK    aheadRequests = 0;
    1220             :         dlist_iter  iter;
    1221             : 
    1222          12 :         dclist_foreach(iter, waitQueue)
    1223             :         {
    1224          12 :             PGPROC     *proc = dlist_container(PGPROC, links, iter.cur);
    1225             : 
    1226             :             /*
    1227             :              * If we're part of the same locking group as this waiter, its
    1228             :              * locks neither conflict with ours nor contribute to
    1229             :              * aheadRequests.
    1230             :              */
    1231          12 :             if (leader != NULL && leader == proc->lockGroupLeader)
    1232           0 :                 continue;
    1233             : 
    1234             :             /* Must he wait for me? */
    1235          12 :             if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
    1236             :             {
    1237             :                 /* Must I wait for him ? */
    1238          12 :                 if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
    1239             :                 {
    1240             :                     /*
    1241             :                      * Yes, so we have a deadlock.  Easiest way to clean up
    1242             :                      * correctly is to call RemoveFromWaitQueue(), but we
    1243             :                      * can't do that until we are *on* the wait queue. So, set
    1244             :                      * a flag to check below, and break out of loop.  Also,
    1245             :                      * record deadlock info for later message.
    1246             :                      */
    1247           2 :                     RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
    1248           2 :                     early_deadlock = true;
    1249           2 :                     break;
    1250             :                 }
    1251             :                 /* I must go before this waiter.  Check special case. */
    1252          10 :                 if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
    1253          10 :                     !LockCheckConflicts(lockMethodTable, lockmode, lock,
    1254             :                                         proclock))
    1255             :                 {
    1256             :                     /* Skip the wait and just grant myself the lock. */
    1257          10 :                     GrantLock(lock, proclock, lockmode);
    1258          10 :                     return PROC_WAIT_STATUS_OK;
    1259             :                 }
    1260             : 
    1261             :                 /* Put myself into wait queue before conflicting process */
    1262           0 :                 insert_before = proc;
    1263           0 :                 break;
    1264             :             }
    1265             :             /* Nope, so advance to next waiter */
    1266           0 :             aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
    1267             :         }
    1268             :     }
    1269             : 
    1270             :     /*
    1271             :      * If we detected deadlock, give up without waiting.  This must agree with
    1272             :      * CheckDeadLock's recovery code.
    1273             :      */
    1274        4114 :     if (early_deadlock)
    1275           2 :         return PROC_WAIT_STATUS_ERROR;
    1276             : 
    1277             :     /*
    1278             :      * At this point we know that we'd really need to sleep. If we've been
    1279             :      * commanded not to do that, bail out.
    1280             :      */
    1281        4112 :     if (dontWait)
    1282        1496 :         return PROC_WAIT_STATUS_ERROR;
    1283             : 
    1284             :     /*
    1285             :      * Insert self into queue, at the position determined above.
    1286             :      */
    1287        2616 :     if (insert_before)
    1288           0 :         dclist_insert_before(waitQueue, &insert_before->links, &MyProc->links);
    1289             :     else
    1290        2616 :         dclist_push_tail(waitQueue, &MyProc->links);
    1291             : 
    1292        2616 :     lock->waitMask |= LOCKBIT_ON(lockmode);
    1293             : 
    1294             :     /* Set up wait information in PGPROC object, too */
    1295        2616 :     MyProc->heldLocks = myProcHeldLocks;
    1296        2616 :     MyProc->waitLock = lock;
    1297        2616 :     MyProc->waitProcLock = proclock;
    1298        2616 :     MyProc->waitLockMode = lockmode;
    1299             : 
    1300        2616 :     MyProc->waitStatus = PROC_WAIT_STATUS_WAITING;
    1301             : 
    1302        2616 :     return PROC_WAIT_STATUS_WAITING;
    1303             : }
    1304             : 
    1305             : /*
    1306             :  * ProcSleep -- put process to sleep waiting on lock
    1307             :  *
    1308             :  * This must be called when JoinWaitQueue() returns PROC_WAIT_STATUS_WAITING.
    1309             :  * Returns after the lock has been granted, or if a deadlock is detected.  Can
    1310             :  * also bail out with ereport(ERROR), if some other error condition, or a
    1311             :  * timeout or cancellation is triggered.
    1312             :  *
    1313             :  * Result is one of the following:
    1314             :  *
    1315             :  *  PROC_WAIT_STATUS_OK      - lock was granted
    1316             :  *  PROC_WAIT_STATUS_ERROR   - a deadlock was detected
    1317             :  */
    1318             : ProcWaitStatus
    1319        2616 : ProcSleep(LOCALLOCK *locallock)
    1320             : {
    1321        2616 :     LOCKMODE    lockmode = locallock->tag.mode;
    1322        2616 :     LOCK       *lock = locallock->lock;
    1323        2616 :     uint32      hashcode = locallock->hashcode;
    1324        2616 :     LWLock     *partitionLock = LockHashPartitionLock(hashcode);
    1325        2616 :     TimestampTz standbyWaitStart = 0;
    1326        2616 :     bool        allow_autovacuum_cancel = true;
    1327        2616 :     bool        logged_recovery_conflict = false;
    1328             :     ProcWaitStatus myWaitStatus;
    1329             : 
    1330             :     /* The caller must've armed the on-error cleanup mechanism */
    1331             :     Assert(GetAwaitedLock() == locallock);
    1332             :     Assert(!LWLockHeldByMe(partitionLock));
    1333             : 
    1334             :     /*
    1335             :      * Now that we will successfully clean up after an ereport, it's safe to
    1336             :      * check to see if there's a buffer pin deadlock against the Startup
    1337             :      * process.  Of course, that's only necessary if we're doing Hot Standby
    1338             :      * and are not the Startup process ourselves.
    1339             :      */
    1340        2616 :     if (RecoveryInProgress() && !InRecovery)
    1341           2 :         CheckRecoveryConflictDeadlock();
    1342             : 
    1343             :     /* Reset deadlock_state before enabling the timeout handler */
    1344        2616 :     deadlock_state = DS_NOT_YET_CHECKED;
    1345        2616 :     got_deadlock_timeout = false;
    1346             : 
    1347             :     /*
    1348             :      * Set timer so we can wake up after awhile and check for a deadlock. If a
    1349             :      * deadlock is detected, the handler sets MyProc->waitStatus =
    1350             :      * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure
    1351             :      * rather than success.
    1352             :      *
    1353             :      * By delaying the check until we've waited for a bit, we can avoid
    1354             :      * running the rather expensive deadlock-check code in most cases.
    1355             :      *
    1356             :      * If LockTimeout is set, also enable the timeout for that.  We can save a
    1357             :      * few cycles by enabling both timeout sources in one call.
    1358             :      *
    1359             :      * If InHotStandby we set lock waits slightly later for clarity with other
    1360             :      * code.
    1361             :      */
    1362        2616 :     if (!InHotStandby)
    1363             :     {
    1364        2614 :         if (LockTimeout > 0)
    1365             :         {
    1366             :             EnableTimeoutParams timeouts[2];
    1367             : 
    1368         158 :             timeouts[0].id = DEADLOCK_TIMEOUT;
    1369         158 :             timeouts[0].type = TMPARAM_AFTER;
    1370         158 :             timeouts[0].delay_ms = DeadlockTimeout;
    1371         158 :             timeouts[1].id = LOCK_TIMEOUT;
    1372         158 :             timeouts[1].type = TMPARAM_AFTER;
    1373         158 :             timeouts[1].delay_ms = LockTimeout;
    1374         158 :             enable_timeouts(timeouts, 2);
    1375             :         }
    1376             :         else
    1377        2456 :             enable_timeout_after(DEADLOCK_TIMEOUT, DeadlockTimeout);
    1378             : 
    1379             :         /*
    1380             :          * Use the current time obtained for the deadlock timeout timer as
    1381             :          * waitStart (i.e., the time when this process started waiting for the
    1382             :          * lock). Since getting the current time newly can cause overhead, we
    1383             :          * reuse the already-obtained time to avoid that overhead.
    1384             :          *
    1385             :          * Note that waitStart is updated without holding the lock table's
    1386             :          * partition lock, to avoid the overhead by additional lock
    1387             :          * acquisition. This can cause "waitstart" in pg_locks to become NULL
    1388             :          * for a very short period of time after the wait started even though
    1389             :          * "granted" is false. This is OK in practice because we can assume
    1390             :          * that users are likely to look at "waitstart" when waiting for the
    1391             :          * lock for a long time.
    1392             :          */
    1393        2614 :         pg_atomic_write_u64(&MyProc->waitStart,
    1394        2614 :                             get_timeout_start_time(DEADLOCK_TIMEOUT));
    1395             :     }
    1396           2 :     else if (log_recovery_conflict_waits)
    1397             :     {
    1398             :         /*
    1399             :          * Set the wait start timestamp if logging is enabled and in hot
    1400             :          * standby.
    1401             :          */
    1402           2 :         standbyWaitStart = GetCurrentTimestamp();
    1403             :     }
    1404             : 
    1405             :     /*
    1406             :      * If somebody wakes us between LWLockRelease and WaitLatch, the latch
    1407             :      * will not wait. But a set latch does not necessarily mean that the lock
    1408             :      * is free now, as there are many other sources for latch sets than
    1409             :      * somebody releasing the lock.
    1410             :      *
    1411             :      * We process interrupts whenever the latch has been set, so cancel/die
    1412             :      * interrupts are processed quickly. This means we must not mind losing
    1413             :      * control to a cancel/die interrupt here.  We don't, because we have no
    1414             :      * shared-state-change work to do after being granted the lock (the
    1415             :      * grantor did it all).  We do have to worry about canceling the deadlock
    1416             :      * timeout and updating the locallock table, but if we lose control to an
    1417             :      * error, LockErrorCleanup will fix that up.
    1418             :      */
    1419             :     do
    1420             :     {
    1421        3930 :         if (InHotStandby)
    1422             :         {
    1423           6 :             bool        maybe_log_conflict =
    1424           6 :                 (standbyWaitStart != 0 && !logged_recovery_conflict);
    1425             : 
    1426             :             /* Set a timer and wait for that or for the lock to be granted */
    1427           6 :             ResolveRecoveryConflictWithLock(locallock->tag.lock,
    1428             :                                             maybe_log_conflict);
    1429             : 
    1430             :             /*
    1431             :              * Emit the log message if the startup process is waiting longer
    1432             :              * than deadlock_timeout for recovery conflict on lock.
    1433             :              */
    1434           6 :             if (maybe_log_conflict)
    1435             :             {
    1436           2 :                 TimestampTz now = GetCurrentTimestamp();
    1437             : 
    1438           2 :                 if (TimestampDifferenceExceeds(standbyWaitStart, now,
    1439             :                                                DeadlockTimeout))
    1440             :                 {
    1441             :                     VirtualTransactionId *vxids;
    1442             :                     int         cnt;
    1443             : 
    1444           2 :                     vxids = GetLockConflicts(&locallock->tag.lock,
    1445             :                                              AccessExclusiveLock, &cnt);
    1446             : 
    1447             :                     /*
    1448             :                      * Log the recovery conflict and the list of PIDs of
    1449             :                      * backends holding the conflicting lock. Note that we do
    1450             :                      * logging even if there are no such backends right now
    1451             :                      * because the startup process here has already waited
    1452             :                      * longer than deadlock_timeout.
    1453             :                      */
    1454           2 :                     LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_LOCK,
    1455             :                                         standbyWaitStart, now,
    1456           2 :                                         cnt > 0 ? vxids : NULL, true);
    1457           2 :                     logged_recovery_conflict = true;
    1458             :                 }
    1459             :             }
    1460             :         }
    1461             :         else
    1462             :         {
    1463        3924 :             (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
    1464        3924 :                              PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
    1465        3924 :             ResetLatch(MyLatch);
    1466             :             /* check for deadlocks first, as that's probably log-worthy */
    1467        3924 :             if (got_deadlock_timeout)
    1468             :             {
    1469          62 :                 CheckDeadLock();
    1470          62 :                 got_deadlock_timeout = false;
    1471             :             }
    1472        3924 :             CHECK_FOR_INTERRUPTS();
    1473             :         }
    1474             : 
    1475             :         /*
    1476             :          * waitStatus could change from PROC_WAIT_STATUS_WAITING to something
    1477             :          * else asynchronously.  Read it just once per loop to prevent
    1478             :          * surprising behavior (such as missing log messages).
    1479             :          */
    1480        3848 :         myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus);
    1481             : 
    1482             :         /*
    1483             :          * If we are not deadlocked, but are waiting on an autovacuum-induced
    1484             :          * task, send a signal to interrupt it.
    1485             :          */
    1486        3848 :         if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
    1487             :         {
    1488           0 :             PGPROC     *autovac = GetBlockingAutoVacuumPgproc();
    1489             :             uint8       statusFlags;
    1490             :             uint8       lockmethod_copy;
    1491             :             LOCKTAG     locktag_copy;
    1492             : 
    1493             :             /*
    1494             :              * Grab info we need, then release lock immediately.  Note this
    1495             :              * coding means that there is a tiny chance that the process
    1496             :              * terminates its current transaction and starts a different one
    1497             :              * before we have a change to send the signal; the worst possible
    1498             :              * consequence is that a for-wraparound vacuum is canceled.  But
    1499             :              * that could happen in any case unless we were to do kill() with
    1500             :              * the lock held, which is much more undesirable.
    1501             :              */
    1502           0 :             LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
    1503           0 :             statusFlags = ProcGlobal->statusFlags[autovac->pgxactoff];
    1504           0 :             lockmethod_copy = lock->tag.locktag_lockmethodid;
    1505           0 :             locktag_copy = lock->tag;
    1506           0 :             LWLockRelease(ProcArrayLock);
    1507             : 
    1508             :             /*
    1509             :              * Only do it if the worker is not working to protect against Xid
    1510             :              * wraparound.
    1511             :              */
    1512           0 :             if ((statusFlags & PROC_IS_AUTOVACUUM) &&
    1513           0 :                 !(statusFlags & PROC_VACUUM_FOR_WRAPAROUND))
    1514             :             {
    1515           0 :                 int         pid = autovac->pid;
    1516             : 
    1517             :                 /* report the case, if configured to do so */
    1518           0 :                 if (message_level_is_interesting(DEBUG1))
    1519             :                 {
    1520             :                     StringInfoData locktagbuf;
    1521             :                     StringInfoData logbuf;  /* errdetail for server log */
    1522             : 
    1523           0 :                     initStringInfo(&locktagbuf);
    1524           0 :                     initStringInfo(&logbuf);
    1525           0 :                     DescribeLockTag(&locktagbuf, &locktag_copy);
    1526           0 :                     appendStringInfo(&logbuf,
    1527             :                                      "Process %d waits for %s on %s.",
    1528             :                                      MyProcPid,
    1529             :                                      GetLockmodeName(lockmethod_copy, lockmode),
    1530             :                                      locktagbuf.data);
    1531             : 
    1532           0 :                     ereport(DEBUG1,
    1533             :                             (errmsg_internal("sending cancel to blocking autovacuum PID %d",
    1534             :                                              pid),
    1535             :                              errdetail_log("%s", logbuf.data)));
    1536             : 
    1537           0 :                     pfree(locktagbuf.data);
    1538           0 :                     pfree(logbuf.data);
    1539             :                 }
    1540             : 
    1541             :                 /* send the autovacuum worker Back to Old Kent Road */
    1542           0 :                 if (kill(pid, SIGINT) < 0)
    1543             :                 {
    1544             :                     /*
    1545             :                      * There's a race condition here: once we release the
    1546             :                      * ProcArrayLock, it's possible for the autovac worker to
    1547             :                      * close up shop and exit before we can do the kill().
    1548             :                      * Therefore, we do not whinge about no-such-process.
    1549             :                      * Other errors such as EPERM could conceivably happen if
    1550             :                      * the kernel recycles the PID fast enough, but such cases
    1551             :                      * seem improbable enough that it's probably best to issue
    1552             :                      * a warning if we see some other errno.
    1553             :                      */
    1554           0 :                     if (errno != ESRCH)
    1555           0 :                         ereport(WARNING,
    1556             :                                 (errmsg("could not send signal to process %d: %m",
    1557             :                                         pid)));
    1558             :                 }
    1559             :             }
    1560             : 
    1561             :             /* prevent signal from being sent again more than once */
    1562           0 :             allow_autovacuum_cancel = false;
    1563             :         }
    1564             : 
    1565             :         /*
    1566             :          * If awoken after the deadlock check interrupt has run, and
    1567             :          * log_lock_waits is on, then report about the wait.
    1568             :          */
    1569        3848 :         if (log_lock_waits && deadlock_state != DS_NOT_YET_CHECKED)
    1570             :         {
    1571             :             StringInfoData buf,
    1572             :                         lock_waiters_sbuf,
    1573             :                         lock_holders_sbuf;
    1574             :             const char *modename;
    1575             :             long        secs;
    1576             :             int         usecs;
    1577             :             long        msecs;
    1578        1048 :             int         lockHoldersNum = 0;
    1579             : 
    1580        1048 :             initStringInfo(&buf);
    1581        1048 :             initStringInfo(&lock_waiters_sbuf);
    1582        1048 :             initStringInfo(&lock_holders_sbuf);
    1583             : 
    1584        1048 :             DescribeLockTag(&buf, &locallock->tag.lock);
    1585        1048 :             modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
    1586             :                                        lockmode);
    1587        1048 :             TimestampDifference(get_timeout_start_time(DEADLOCK_TIMEOUT),
    1588             :                                 GetCurrentTimestamp(),
    1589             :                                 &secs, &usecs);
    1590        1048 :             msecs = secs * 1000 + usecs / 1000;
    1591        1048 :             usecs = usecs % 1000;
    1592             : 
    1593             :             /* Gather a list of all lock holders and waiters */
    1594        1048 :             LWLockAcquire(partitionLock, LW_SHARED);
    1595        1048 :             GetLockHoldersAndWaiters(locallock, &lock_holders_sbuf,
    1596             :                                      &lock_waiters_sbuf, &lockHoldersNum);
    1597        1048 :             LWLockRelease(partitionLock);
    1598             : 
    1599        1048 :             if (deadlock_state == DS_SOFT_DEADLOCK)
    1600           6 :                 ereport(LOG,
    1601             :                         (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
    1602             :                                 MyProcPid, modename, buf.data, msecs, usecs),
    1603             :                          (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
    1604             :                                                "Processes holding the lock: %s. Wait queue: %s.",
    1605             :                                                lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
    1606        1042 :             else if (deadlock_state == DS_HARD_DEADLOCK)
    1607             :             {
    1608             :                 /*
    1609             :                  * This message is a bit redundant with the error that will be
    1610             :                  * reported subsequently, but in some cases the error report
    1611             :                  * might not make it to the log (eg, if it's caught by an
    1612             :                  * exception handler), and we want to ensure all long-wait
    1613             :                  * events get logged.
    1614             :                  */
    1615          10 :                 ereport(LOG,
    1616             :                         (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
    1617             :                                 MyProcPid, modename, buf.data, msecs, usecs),
    1618             :                          (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
    1619             :                                                "Processes holding the lock: %s. Wait queue: %s.",
    1620             :                                                lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
    1621             :             }
    1622             : 
    1623        1048 :             if (myWaitStatus == PROC_WAIT_STATUS_WAITING)
    1624         992 :                 ereport(LOG,
    1625             :                         (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
    1626             :                                 MyProcPid, modename, buf.data, msecs, usecs),
    1627             :                          (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
    1628             :                                                "Processes holding the lock: %s. Wait queue: %s.",
    1629             :                                                lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
    1630          56 :             else if (myWaitStatus == PROC_WAIT_STATUS_OK)
    1631          46 :                 ereport(LOG,
    1632             :                         (errmsg("process %d acquired %s on %s after %ld.%03d ms",
    1633             :                                 MyProcPid, modename, buf.data, msecs, usecs)));
    1634             :             else
    1635             :             {
    1636             :                 Assert(myWaitStatus == PROC_WAIT_STATUS_ERROR);
    1637             : 
    1638             :                 /*
    1639             :                  * Currently, the deadlock checker always kicks its own
    1640             :                  * process, which means that we'll only see
    1641             :                  * PROC_WAIT_STATUS_ERROR when deadlock_state ==
    1642             :                  * DS_HARD_DEADLOCK, and there's no need to print redundant
    1643             :                  * messages.  But for completeness and future-proofing, print
    1644             :                  * a message if it looks like someone else kicked us off the
    1645             :                  * lock.
    1646             :                  */
    1647          10 :                 if (deadlock_state != DS_HARD_DEADLOCK)
    1648           0 :                     ereport(LOG,
    1649             :                             (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
    1650             :                                     MyProcPid, modename, buf.data, msecs, usecs),
    1651             :                              (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
    1652             :                                                    "Processes holding the lock: %s. Wait queue: %s.",
    1653             :                                                    lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
    1654             :             }
    1655             : 
    1656             :             /*
    1657             :              * At this point we might still need to wait for the lock. Reset
    1658             :              * state so we don't print the above messages again.
    1659             :              */
    1660        1048 :             deadlock_state = DS_NO_DEADLOCK;
    1661             : 
    1662        1048 :             pfree(buf.data);
    1663        1048 :             pfree(lock_holders_sbuf.data);
    1664        1048 :             pfree(lock_waiters_sbuf.data);
    1665             :         }
    1666        3848 :     } while (myWaitStatus == PROC_WAIT_STATUS_WAITING);
    1667             : 
    1668             :     /*
    1669             :      * Disable the timers, if they are still running.  As in LockErrorCleanup,
    1670             :      * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
    1671             :      * already caused QueryCancelPending to become set, we want the cancel to
    1672             :      * be reported as a lock timeout, not a user cancel.
    1673             :      */
    1674        2534 :     if (!InHotStandby)
    1675             :     {
    1676        2532 :         if (LockTimeout > 0)
    1677             :         {
    1678             :             DisableTimeoutParams timeouts[2];
    1679             : 
    1680         146 :             timeouts[0].id = DEADLOCK_TIMEOUT;
    1681         146 :             timeouts[0].keep_indicator = false;
    1682         146 :             timeouts[1].id = LOCK_TIMEOUT;
    1683         146 :             timeouts[1].keep_indicator = true;
    1684         146 :             disable_timeouts(timeouts, 2);
    1685             :         }
    1686             :         else
    1687        2386 :             disable_timeout(DEADLOCK_TIMEOUT, false);
    1688             :     }
    1689             : 
    1690             :     /*
    1691             :      * Emit the log message if recovery conflict on lock was resolved but the
    1692             :      * startup process waited longer than deadlock_timeout for it.
    1693             :      */
    1694        2534 :     if (InHotStandby && logged_recovery_conflict)
    1695           2 :         LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_LOCK,
    1696             :                             standbyWaitStart, GetCurrentTimestamp(),
    1697             :                             NULL, false);
    1698             : 
    1699             :     /*
    1700             :      * We don't have to do anything else, because the awaker did all the
    1701             :      * necessary updates of the lock table and MyProc. (The caller is
    1702             :      * responsible for updating the local lock table.)
    1703             :      */
    1704        2534 :     return myWaitStatus;
    1705             : }
    1706             : 
    1707             : 
    1708             : /*
    1709             :  * ProcWakeup -- wake up a process by setting its latch.
    1710             :  *
    1711             :  *   Also remove the process from the wait queue and set its links invalid.
    1712             :  *
    1713             :  * The appropriate lock partition lock must be held by caller.
    1714             :  *
    1715             :  * XXX: presently, this code is only used for the "success" case, and only
    1716             :  * works correctly for that case.  To clean up in failure case, would need
    1717             :  * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
    1718             :  * Hence, in practice the waitStatus parameter must be PROC_WAIT_STATUS_OK.
    1719             :  */
    1720             : void
    1721        2526 : ProcWakeup(PGPROC *proc, ProcWaitStatus waitStatus)
    1722             : {
    1723        2526 :     if (dlist_node_is_detached(&proc->links))
    1724           0 :         return;
    1725             : 
    1726             :     Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
    1727             : 
    1728             :     /* Remove process from wait queue */
    1729        2526 :     dclist_delete_from_thoroughly(&proc->waitLock->waitProcs, &proc->links);
    1730             : 
    1731             :     /* Clean up process' state and pass it the ok/fail signal */
    1732        2526 :     proc->waitLock = NULL;
    1733        2526 :     proc->waitProcLock = NULL;
    1734        2526 :     proc->waitStatus = waitStatus;
    1735        2526 :     pg_atomic_write_u64(&MyProc->waitStart, 0);
    1736             : 
    1737             :     /* And awaken it */
    1738        2526 :     SetLatch(&proc->procLatch);
    1739             : }
    1740             : 
    1741             : /*
    1742             :  * ProcLockWakeup -- routine for waking up processes when a lock is
    1743             :  *      released (or a prior waiter is aborted).  Scan all waiters
    1744             :  *      for lock, waken any that are no longer blocked.
    1745             :  *
    1746             :  * The appropriate lock partition lock must be held by caller.
    1747             :  */
    1748             : void
    1749        2558 : ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
    1750             : {
    1751        2558 :     dclist_head *waitQueue = &lock->waitProcs;
    1752        2558 :     LOCKMASK    aheadRequests = 0;
    1753             :     dlist_mutable_iter miter;
    1754             : 
    1755        2558 :     if (dclist_is_empty(waitQueue))
    1756          90 :         return;
    1757             : 
    1758        5698 :     dclist_foreach_modify(miter, waitQueue)
    1759             :     {
    1760        3230 :         PGPROC     *proc = dlist_container(PGPROC, links, miter.cur);
    1761        3230 :         LOCKMODE    lockmode = proc->waitLockMode;
    1762             : 
    1763             :         /*
    1764             :          * Waken if (a) doesn't conflict with requests of earlier waiters, and
    1765             :          * (b) doesn't conflict with already-held locks.
    1766             :          */
    1767        3230 :         if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
    1768        2928 :             !LockCheckConflicts(lockMethodTable, lockmode, lock,
    1769             :                                 proc->waitProcLock))
    1770             :         {
    1771             :             /* OK to waken */
    1772        2526 :             GrantLock(lock, proc->waitProcLock, lockmode);
    1773             :             /* removes proc from the lock's waiting process queue */
    1774        2526 :             ProcWakeup(proc, PROC_WAIT_STATUS_OK);
    1775             :         }
    1776             :         else
    1777             :         {
    1778             :             /*
    1779             :              * Lock conflicts: Don't wake, but remember requested mode for
    1780             :              * later checks.
    1781             :              */
    1782         704 :             aheadRequests |= LOCKBIT_ON(lockmode);
    1783             :         }
    1784             :     }
    1785             : }
    1786             : 
    1787             : /*
    1788             :  * CheckDeadLock
    1789             :  *
    1790             :  * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
    1791             :  * lock to be released by some other process.  Check if there's a deadlock; if
    1792             :  * not, just return.  (But signal ProcSleep to log a message, if
    1793             :  * log_lock_waits is true.)  If we have a real deadlock, remove ourselves from
    1794             :  * the lock's wait queue and signal an error to ProcSleep.
    1795             :  */
    1796             : static void
    1797          62 : CheckDeadLock(void)
    1798             : {
    1799             :     int         i;
    1800             : 
    1801             :     /*
    1802             :      * Acquire exclusive lock on the entire shared lock data structures. Must
    1803             :      * grab LWLocks in partition-number order to avoid LWLock deadlock.
    1804             :      *
    1805             :      * Note that the deadlock check interrupt had better not be enabled
    1806             :      * anywhere that this process itself holds lock partition locks, else this
    1807             :      * will wait forever.  Also note that LWLockAcquire creates a critical
    1808             :      * section, so that this routine cannot be interrupted by cancel/die
    1809             :      * interrupts.
    1810             :      */
    1811        1054 :     for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    1812         992 :         LWLockAcquire(LockHashPartitionLockByIndex(i), LW_EXCLUSIVE);
    1813             : 
    1814             :     /*
    1815             :      * Check to see if we've been awoken by anyone in the interim.
    1816             :      *
    1817             :      * If we have, we can return and resume our transaction -- happy day.
    1818             :      * Before we are awoken the process releasing the lock grants it to us so
    1819             :      * we know that we don't have to wait anymore.
    1820             :      *
    1821             :      * We check by looking to see if we've been unlinked from the wait queue.
    1822             :      * This is safe because we hold the lock partition lock.
    1823             :      */
    1824          62 :     if (MyProc->links.prev == NULL ||
    1825          62 :         MyProc->links.next == NULL)
    1826           0 :         goto check_done;
    1827             : 
    1828             : #ifdef LOCK_DEBUG
    1829             :     if (Debug_deadlocks)
    1830             :         DumpAllLocks();
    1831             : #endif
    1832             : 
    1833             :     /* Run the deadlock check, and set deadlock_state for use by ProcSleep */
    1834          62 :     deadlock_state = DeadLockCheck(MyProc);
    1835             : 
    1836          62 :     if (deadlock_state == DS_HARD_DEADLOCK)
    1837             :     {
    1838             :         /*
    1839             :          * Oops.  We have a deadlock.
    1840             :          *
    1841             :          * Get this process out of wait state. (Note: we could do this more
    1842             :          * efficiently by relying on lockAwaited, but use this coding to
    1843             :          * preserve the flexibility to kill some other transaction than the
    1844             :          * one detecting the deadlock.)
    1845             :          *
    1846             :          * RemoveFromWaitQueue sets MyProc->waitStatus to
    1847             :          * PROC_WAIT_STATUS_ERROR, so ProcSleep will report an error after we
    1848             :          * return from the signal handler.
    1849             :          */
    1850             :         Assert(MyProc->waitLock != NULL);
    1851          10 :         RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
    1852             : 
    1853             :         /*
    1854             :          * We're done here.  Transaction abort caused by the error that
    1855             :          * ProcSleep will raise will cause any other locks we hold to be
    1856             :          * released, thus allowing other processes to wake up; we don't need
    1857             :          * to do that here.  NOTE: an exception is that releasing locks we
    1858             :          * hold doesn't consider the possibility of waiters that were blocked
    1859             :          * behind us on the lock we just failed to get, and might now be
    1860             :          * wakable because we're not in front of them anymore.  However,
    1861             :          * RemoveFromWaitQueue took care of waking up any such processes.
    1862             :          */
    1863             :     }
    1864             : 
    1865             :     /*
    1866             :      * And release locks.  We do this in reverse order for two reasons: (1)
    1867             :      * Anyone else who needs more than one of the locks will be trying to lock
    1868             :      * them in increasing order; we don't want to release the other process
    1869             :      * until it can get all the locks it needs. (2) This avoids O(N^2)
    1870             :      * behavior inside LWLockRelease.
    1871             :      */
    1872          52 : check_done:
    1873        1054 :     for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
    1874         992 :         LWLockRelease(LockHashPartitionLockByIndex(i));
    1875          62 : }
    1876             : 
    1877             : /*
    1878             :  * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
    1879             :  *
    1880             :  * NB: Runs inside a signal handler, be careful.
    1881             :  */
    1882             : void
    1883          62 : CheckDeadLockAlert(void)
    1884             : {
    1885          62 :     int         save_errno = errno;
    1886             : 
    1887          62 :     got_deadlock_timeout = true;
    1888             : 
    1889             :     /*
    1890             :      * Have to set the latch again, even if handle_sig_alarm already did. Back
    1891             :      * then got_deadlock_timeout wasn't yet set... It's unlikely that this
    1892             :      * ever would be a problem, but setting a set latch again is cheap.
    1893             :      *
    1894             :      * Note that, when this function runs inside procsignal_sigusr1_handler(),
    1895             :      * the handler function sets the latch again after the latch is set here.
    1896             :      */
    1897          62 :     SetLatch(MyLatch);
    1898          62 :     errno = save_errno;
    1899          62 : }
    1900             : 
    1901             : /*
    1902             :  * GetLockHoldersAndWaiters - get lock holders and waiters for a lock
    1903             :  *
    1904             :  * Fill lock_holders_sbuf and lock_waiters_sbuf with the PIDs of processes holding
    1905             :  * and waiting for the lock, and set lockHoldersNum to the number of lock holders.
    1906             :  *
    1907             :  * The lock table's partition lock must be held on entry and remains held on exit.
    1908             :  */
    1909             : void
    1910        1048 : GetLockHoldersAndWaiters(LOCALLOCK *locallock, StringInfo lock_holders_sbuf,
    1911             :                          StringInfo lock_waiters_sbuf, int *lockHoldersNum)
    1912             : {
    1913             :     dlist_iter  proc_iter;
    1914             :     PROCLOCK   *curproclock;
    1915        1048 :     LOCK       *lock = locallock->lock;
    1916        1048 :     bool        first_holder = true,
    1917        1048 :                 first_waiter = true;
    1918             : 
    1919             : #ifdef USE_ASSERT_CHECKING
    1920             :     {
    1921             :         uint32      hashcode = locallock->hashcode;
    1922             :         LWLock     *partitionLock = LockHashPartitionLock(hashcode);
    1923             : 
    1924             :         Assert(LWLockHeldByMe(partitionLock));
    1925             :     }
    1926             : #endif
    1927             : 
    1928        1048 :     *lockHoldersNum = 0;
    1929             : 
    1930             :     /*
    1931             :      * Loop over the lock's procLocks to gather a list of all holders and
    1932             :      * waiters. Thus we will be able to provide more detailed information for
    1933             :      * lock debugging purposes.
    1934             :      *
    1935             :      * lock->procLocks contains all processes which hold or wait for this
    1936             :      * lock.
    1937             :      */
    1938        3154 :     dlist_foreach(proc_iter, &lock->procLocks)
    1939             :     {
    1940        2106 :         curproclock =
    1941        2106 :             dlist_container(PROCLOCK, lockLink, proc_iter.cur);
    1942             : 
    1943             :         /*
    1944             :          * We are a waiter if myProc->waitProcLock == curproclock; we are a
    1945             :          * holder if it is NULL or something different.
    1946             :          */
    1947        2106 :         if (curproclock->tag.myProc->waitProcLock == curproclock)
    1948             :         {
    1949        1032 :             if (first_waiter)
    1950             :             {
    1951         994 :                 appendStringInfo(lock_waiters_sbuf, "%d",
    1952         994 :                                  curproclock->tag.myProc->pid);
    1953         994 :                 first_waiter = false;
    1954             :             }
    1955             :             else
    1956          38 :                 appendStringInfo(lock_waiters_sbuf, ", %d",
    1957          38 :                                  curproclock->tag.myProc->pid);
    1958             :         }
    1959             :         else
    1960             :         {
    1961        1074 :             if (first_holder)
    1962             :             {
    1963        1048 :                 appendStringInfo(lock_holders_sbuf, "%d",
    1964        1048 :                                  curproclock->tag.myProc->pid);
    1965        1048 :                 first_holder = false;
    1966             :             }
    1967             :             else
    1968          26 :                 appendStringInfo(lock_holders_sbuf, ", %d",
    1969          26 :                                  curproclock->tag.myProc->pid);
    1970             : 
    1971        1074 :             (*lockHoldersNum)++;
    1972             :         }
    1973             :     }
    1974        1048 : }
    1975             : 
    1976             : /*
    1977             :  * ProcWaitForSignal - wait for a signal from another backend.
    1978             :  *
    1979             :  * As this uses the generic process latch the caller has to be robust against
    1980             :  * unrelated wakeups: Always check that the desired state has occurred, and
    1981             :  * wait again if not.
    1982             :  */
    1983             : void
    1984         182 : ProcWaitForSignal(uint32 wait_event_info)
    1985             : {
    1986         182 :     (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
    1987             :                      wait_event_info);
    1988         182 :     ResetLatch(MyLatch);
    1989         182 :     CHECK_FOR_INTERRUPTS();
    1990         182 : }
    1991             : 
    1992             : /*
    1993             :  * ProcSendSignal - set the latch of a backend identified by ProcNumber
    1994             :  */
    1995             : void
    1996         158 : ProcSendSignal(ProcNumber procNumber)
    1997             : {
    1998         158 :     if (procNumber < 0 || procNumber >= ProcGlobal->allProcCount)
    1999           0 :         elog(ERROR, "procNumber out of range");
    2000             : 
    2001         158 :     SetLatch(&ProcGlobal->allProcs[procNumber].procLatch);
    2002         158 : }
    2003             : 
    2004             : /*
    2005             :  * BecomeLockGroupLeader - designate process as lock group leader
    2006             :  *
    2007             :  * Once this function has returned, other processes can join the lock group
    2008             :  * by calling BecomeLockGroupMember.
    2009             :  */
    2010             : void
    2011        1174 : BecomeLockGroupLeader(void)
    2012             : {
    2013             :     LWLock     *leader_lwlock;
    2014             : 
    2015             :     /* If we already did it, we don't need to do it again. */
    2016        1174 :     if (MyProc->lockGroupLeader == MyProc)
    2017        1030 :         return;
    2018             : 
    2019             :     /* We had better not be a follower. */
    2020             :     Assert(MyProc->lockGroupLeader == NULL);
    2021             : 
    2022             :     /* Create single-member group, containing only ourselves. */
    2023         144 :     leader_lwlock = LockHashPartitionLockByProc(MyProc);
    2024         144 :     LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
    2025         144 :     MyProc->lockGroupLeader = MyProc;
    2026         144 :     dlist_push_head(&MyProc->lockGroupMembers, &MyProc->lockGroupLink);
    2027         144 :     LWLockRelease(leader_lwlock);
    2028             : }
    2029             : 
    2030             : /*
    2031             :  * BecomeLockGroupMember - designate process as lock group member
    2032             :  *
    2033             :  * This is pretty straightforward except for the possibility that the leader
    2034             :  * whose group we're trying to join might exit before we manage to do so;
    2035             :  * and the PGPROC might get recycled for an unrelated process.  To avoid
    2036             :  * that, we require the caller to pass the PID of the intended PGPROC as
    2037             :  * an interlock.  Returns true if we successfully join the intended lock
    2038             :  * group, and false if not.
    2039             :  */
    2040             : bool
    2041        2748 : BecomeLockGroupMember(PGPROC *leader, int pid)
    2042             : {
    2043             :     LWLock     *leader_lwlock;
    2044        2748 :     bool        ok = false;
    2045             : 
    2046             :     /* Group leader can't become member of group */
    2047             :     Assert(MyProc != leader);
    2048             : 
    2049             :     /* Can't already be a member of a group */
    2050             :     Assert(MyProc->lockGroupLeader == NULL);
    2051             : 
    2052             :     /* PID must be valid. */
    2053             :     Assert(pid != 0);
    2054             : 
    2055             :     /*
    2056             :      * Get lock protecting the group fields.  Note LockHashPartitionLockByProc
    2057             :      * calculates the proc number based on the PGPROC slot without looking at
    2058             :      * its contents, so we will acquire the correct lock even if the leader
    2059             :      * PGPROC is in process of being recycled.
    2060             :      */
    2061        2748 :     leader_lwlock = LockHashPartitionLockByProc(leader);
    2062        2748 :     LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
    2063             : 
    2064             :     /* Is this the leader we're looking for? */
    2065        2748 :     if (leader->pid == pid && leader->lockGroupLeader == leader)
    2066             :     {
    2067             :         /* OK, join the group */
    2068        2748 :         ok = true;
    2069        2748 :         MyProc->lockGroupLeader = leader;
    2070        2748 :         dlist_push_tail(&leader->lockGroupMembers, &MyProc->lockGroupLink);
    2071             :     }
    2072        2748 :     LWLockRelease(leader_lwlock);
    2073             : 
    2074        2748 :     return ok;
    2075             : }

Generated by: LCOV version 1.16