LCOV - code coverage report
Current view: top level - src/backend/storage/lmgr - lock.c (source / functions) Coverage Total Hit
Test: PostgreSQL 19devel Lines: 87.2 % 1294 1128
Test Date: 2026-03-24 13:16:20 Functions: 95.1 % 61 58
Legend: Lines:     hit not hit

            Line data    Source code
       1              : /*-------------------------------------------------------------------------
       2              :  *
       3              :  * lock.c
       4              :  *    POSTGRES primary lock mechanism
       5              :  *
       6              :  * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
       7              :  * Portions Copyright (c) 1994, Regents of the University of California
       8              :  *
       9              :  *
      10              :  * IDENTIFICATION
      11              :  *    src/backend/storage/lmgr/lock.c
      12              :  *
      13              :  * NOTES
      14              :  *    A lock table is a shared memory hash table.  When
      15              :  *    a process tries to acquire a lock of a type that conflicts
      16              :  *    with existing locks, it is put to sleep using the routines
      17              :  *    in storage/lmgr/proc.c.
      18              :  *
      19              :  *    For the most part, this code should be invoked via lmgr.c
      20              :  *    or another lock-management module, not directly.
      21              :  *
      22              :  *  Interface:
      23              :  *
      24              :  *  LockManagerShmemInit(), GetLocksMethodTable(), GetLockTagsMethodTable(),
      25              :  *  LockAcquire(), LockRelease(), LockReleaseAll(),
      26              :  *  LockCheckConflicts(), GrantLock()
      27              :  *
      28              :  *-------------------------------------------------------------------------
      29              :  */
      30              : #include "postgres.h"
      31              : 
      32              : #include <signal.h>
      33              : #include <unistd.h>
      34              : 
      35              : #include "access/transam.h"
      36              : #include "access/twophase.h"
      37              : #include "access/twophase_rmgr.h"
      38              : #include "access/xlog.h"
      39              : #include "access/xlogutils.h"
      40              : #include "miscadmin.h"
      41              : #include "pg_trace.h"
      42              : #include "pgstat.h"
      43              : #include "storage/lmgr.h"
      44              : #include "storage/proc.h"
      45              : #include "storage/procarray.h"
      46              : #include "storage/spin.h"
      47              : #include "storage/standby.h"
      48              : #include "utils/memutils.h"
      49              : #include "utils/ps_status.h"
      50              : #include "utils/resowner.h"
      51              : 
      52              : 
      53              : /* GUC variables */
      54              : int         max_locks_per_xact; /* used to set the lock table size */
      55              : bool        log_lock_failures = false;
      56              : 
      57              : #define NLOCKENTS() \
      58              :     mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
      59              : 
      60              : 
      61              : /*
      62              :  * Data structures defining the semantics of the standard lock methods.
      63              :  *
      64              :  * The conflict table defines the semantics of the various lock modes.
      65              :  */
      66              : static const LOCKMASK LockConflicts[] = {
      67              :     0,
      68              : 
      69              :     /* AccessShareLock */
      70              :     LOCKBIT_ON(AccessExclusiveLock),
      71              : 
      72              :     /* RowShareLock */
      73              :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      74              : 
      75              :     /* RowExclusiveLock */
      76              :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
      77              :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      78              : 
      79              :     /* ShareUpdateExclusiveLock */
      80              :     LOCKBIT_ON(ShareUpdateExclusiveLock) |
      81              :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
      82              :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      83              : 
      84              :     /* ShareLock */
      85              :     LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
      86              :     LOCKBIT_ON(ShareRowExclusiveLock) |
      87              :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      88              : 
      89              :     /* ShareRowExclusiveLock */
      90              :     LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
      91              :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
      92              :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      93              : 
      94              :     /* ExclusiveLock */
      95              :     LOCKBIT_ON(RowShareLock) |
      96              :     LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
      97              :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
      98              :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      99              : 
     100              :     /* AccessExclusiveLock */
     101              :     LOCKBIT_ON(AccessShareLock) | LOCKBIT_ON(RowShareLock) |
     102              :     LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
     103              :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
     104              :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock)
     105              : 
     106              : };
     107              : 
     108              : /* Names of lock modes, for debug printouts */
     109              : static const char *const lock_mode_names[] =
     110              : {
     111              :     "INVALID",
     112              :     "AccessShareLock",
     113              :     "RowShareLock",
     114              :     "RowExclusiveLock",
     115              :     "ShareUpdateExclusiveLock",
     116              :     "ShareLock",
     117              :     "ShareRowExclusiveLock",
     118              :     "ExclusiveLock",
     119              :     "AccessExclusiveLock"
     120              : };
     121              : 
     122              : #ifndef LOCK_DEBUG
     123              : static bool Dummy_trace = false;
     124              : #endif
     125              : 
     126              : static const LockMethodData default_lockmethod = {
     127              :     MaxLockMode,
     128              :     LockConflicts,
     129              :     lock_mode_names,
     130              : #ifdef LOCK_DEBUG
     131              :     &Trace_locks
     132              : #else
     133              :     &Dummy_trace
     134              : #endif
     135              : };
     136              : 
     137              : static const LockMethodData user_lockmethod = {
     138              :     MaxLockMode,
     139              :     LockConflicts,
     140              :     lock_mode_names,
     141              : #ifdef LOCK_DEBUG
     142              :     &Trace_userlocks
     143              : #else
     144              :     &Dummy_trace
     145              : #endif
     146              : };
     147              : 
     148              : /*
     149              :  * map from lock method id to the lock table data structures
     150              :  */
     151              : static const LockMethod LockMethods[] = {
     152              :     NULL,
     153              :     &default_lockmethod,
     154              :     &user_lockmethod
     155              : };
     156              : 
     157              : 
     158              : /* Record that's written to 2PC state file when a lock is persisted */
     159              : typedef struct TwoPhaseLockRecord
     160              : {
     161              :     LOCKTAG     locktag;
     162              :     LOCKMODE    lockmode;
     163              : } TwoPhaseLockRecord;
     164              : 
     165              : 
     166              : /*
     167              :  * Count of the number of fast path lock slots we believe to be used.  This
     168              :  * might be higher than the real number if another backend has transferred
     169              :  * our locks to the primary lock table, but it can never be lower than the
     170              :  * real value, since only we can acquire locks on our own behalf.
     171              :  *
     172              :  * XXX Allocate a static array of the maximum size. We could use a pointer
     173              :  * and then allocate just the right size to save a couple kB, but then we
     174              :  * would have to initialize that, while for the static array that happens
     175              :  * automatically. Doesn't seem worth the extra complexity.
     176              :  */
     177              : static int  FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX];
     178              : 
     179              : /*
     180              :  * Flag to indicate if the relation extension lock is held by this backend.
     181              :  * This flag is used to ensure that while holding the relation extension lock
     182              :  * we don't try to acquire a heavyweight lock on any other object.  This
     183              :  * restriction implies that the relation extension lock won't ever participate
     184              :  * in the deadlock cycle because we can never wait for any other heavyweight
     185              :  * lock after acquiring this lock.
     186              :  *
     187              :  * Such a restriction is okay for relation extension locks as unlike other
     188              :  * heavyweight locks these are not held till the transaction end.  These are
     189              :  * taken for a short duration to extend a particular relation and then
     190              :  * released.
     191              :  */
     192              : static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
     193              : 
     194              : /*
     195              :  * Number of fast-path locks per backend - size of the arrays in PGPROC.
     196              :  * This is set only once during start, before initializing shared memory,
     197              :  * and remains constant after that.
     198              :  *
     199              :  * We set the limit based on max_locks_per_transaction GUC, because that's
     200              :  * the best information about expected number of locks per backend we have.
     201              :  * See InitializeFastPathLocks() for details.
     202              :  */
     203              : int         FastPathLockGroupsPerBackend = 0;
     204              : 
     205              : /*
     206              :  * Macros to calculate the fast-path group and index for a relation.
     207              :  *
     208              :  * The formula is a simple hash function, designed to spread the OIDs a bit,
     209              :  * so that even contiguous values end up in different groups. In most cases
     210              :  * there will be gaps anyway, but the multiplication should help a bit.
     211              :  *
     212              :  * The selected constant (49157) is a prime not too close to 2^k, and it's
     213              :  * small enough to not cause overflows (in 64-bit).
     214              :  *
     215              :  * We can assume that FastPathLockGroupsPerBackend is a power-of-two per
     216              :  * InitializeFastPathLocks().
     217              :  */
     218              : #define FAST_PATH_REL_GROUP(rel) \
     219              :     (((uint64) (rel) * 49157) & (FastPathLockGroupsPerBackend - 1))
     220              : 
     221              : /*
     222              :  * Given the group/slot indexes, calculate the slot index in the whole array
     223              :  * of fast-path lock slots.
     224              :  */
     225              : #define FAST_PATH_SLOT(group, index) \
     226              :     (AssertMacro((uint32) (group) < FastPathLockGroupsPerBackend), \
     227              :      AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_GROUP), \
     228              :      ((group) * FP_LOCK_SLOTS_PER_GROUP + (index)))
     229              : 
     230              : /*
     231              :  * Given a slot index (into the whole per-backend array), calculated using
     232              :  * the FAST_PATH_SLOT macro, split it into group and index (in the group).
     233              :  */
     234              : #define FAST_PATH_GROUP(index)  \
     235              :     (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
     236              :      ((index) / FP_LOCK_SLOTS_PER_GROUP))
     237              : #define FAST_PATH_INDEX(index)  \
     238              :     (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
     239              :      ((index) % FP_LOCK_SLOTS_PER_GROUP))
     240              : 
     241              : /* Macros for manipulating proc->fpLockBits */
     242              : #define FAST_PATH_BITS_PER_SLOT         3
     243              : #define FAST_PATH_LOCKNUMBER_OFFSET     1
     244              : #define FAST_PATH_MASK                  ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
     245              : #define FAST_PATH_BITS(proc, n)         (proc)->fpLockBits[FAST_PATH_GROUP(n)]
     246              : #define FAST_PATH_GET_BITS(proc, n) \
     247              :     ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)
     248              : #define FAST_PATH_BIT_POSITION(n, l) \
     249              :     (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
     250              :      AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
     251              :      AssertMacro((n) < FastPathLockSlotsPerBackend()), \
     252              :      ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (FAST_PATH_INDEX(n))))
     253              : #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
     254              :      FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
     255              : #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
     256              :      FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
     257              : #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
     258              :      (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
     259              : 
     260              : /*
     261              :  * The fast-path lock mechanism is concerned only with relation locks on
     262              :  * unshared relations by backends bound to a database.  The fast-path
     263              :  * mechanism exists mostly to accelerate acquisition and release of locks
     264              :  * that rarely conflict.  Because ShareUpdateExclusiveLock is
     265              :  * self-conflicting, it can't use the fast-path mechanism; but it also does
     266              :  * not conflict with any of the locks that do, so we can ignore it completely.
     267              :  */
     268              : #define EligibleForRelationFastPath(locktag, mode) \
     269              :     ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
     270              :     (locktag)->locktag_type == LOCKTAG_RELATION && \
     271              :     (locktag)->locktag_field1 == MyDatabaseId && \
     272              :     MyDatabaseId != InvalidOid && \
     273              :     (mode) < ShareUpdateExclusiveLock)
     274              : #define ConflictsWithRelationFastPath(locktag, mode) \
     275              :     ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
     276              :     (locktag)->locktag_type == LOCKTAG_RELATION && \
     277              :     (locktag)->locktag_field1 != InvalidOid && \
     278              :     (mode) > ShareUpdateExclusiveLock)
     279              : 
     280              : static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
     281              : static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
     282              : static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
     283              :                                           const LOCKTAG *locktag, uint32 hashcode);
     284              : static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
     285              : 
     286              : /*
     287              :  * To make the fast-path lock mechanism work, we must have some way of
     288              :  * preventing the use of the fast-path when a conflicting lock might be present.
     289              :  * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
     290              :  * and maintain an integer count of the number of "strong" lockers
     291              :  * in each partition.  When any "strong" lockers are present (which is
     292              :  * hopefully not very often), the fast-path mechanism can't be used, and we
     293              :  * must fall back to the slower method of pushing matching locks directly
     294              :  * into the main lock tables.
     295              :  *
     296              :  * The deadlock detector does not know anything about the fast path mechanism,
     297              :  * so any locks that might be involved in a deadlock must be transferred from
     298              :  * the fast-path queues to the main lock table.
     299              :  */
     300              : 
     301              : #define FAST_PATH_STRONG_LOCK_HASH_BITS         10
     302              : #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
     303              :     (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
     304              : #define FastPathStrongLockHashPartition(hashcode) \
     305              :     ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
     306              : 
     307              : typedef struct
     308              : {
     309              :     slock_t     mutex;
     310              :     uint32      count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
     311              : } FastPathStrongRelationLockData;
     312              : 
     313              : static volatile FastPathStrongRelationLockData *FastPathStrongRelationLocks;
     314              : 
     315              : 
     316              : /*
     317              :  * Pointers to hash tables containing lock state
     318              :  *
     319              :  * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
     320              :  * shared memory; LockMethodLocalHash is local to each backend.
     321              :  */
     322              : static HTAB *LockMethodLockHash;
     323              : static HTAB *LockMethodProcLockHash;
     324              : static HTAB *LockMethodLocalHash;
     325              : 
     326              : 
     327              : /* private state for error cleanup */
     328              : static LOCALLOCK *StrongLockInProgress;
     329              : static LOCALLOCK *awaitedLock;
     330              : static ResourceOwner awaitedOwner;
     331              : 
     332              : 
     333              : #ifdef LOCK_DEBUG
     334              : 
     335              : /*------
     336              :  * The following configuration options are available for lock debugging:
     337              :  *
     338              :  *     TRACE_LOCKS      -- give a bunch of output what's going on in this file
     339              :  *     TRACE_USERLOCKS  -- same but for user locks
     340              :  *     TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
     341              :  *                         (use to avoid output on system tables)
     342              :  *     TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
     343              :  *     DEBUG_DEADLOCKS  -- currently dumps locks at untimely occasions ;)
     344              :  *
     345              :  * Furthermore, but in storage/lmgr/lwlock.c:
     346              :  *     TRACE_LWLOCKS    -- trace lightweight locks (pretty useless)
     347              :  *
     348              :  * Define LOCK_DEBUG at compile time to get all these enabled.
     349              :  * --------
     350              :  */
     351              : 
     352              : int         Trace_lock_oidmin = FirstNormalObjectId;
     353              : bool        Trace_locks = false;
     354              : bool        Trace_userlocks = false;
     355              : int         Trace_lock_table = 0;
     356              : bool        Debug_deadlocks = false;
     357              : 
     358              : 
     359              : inline static bool
     360              : LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
     361              : {
     362              :     return
     363              :         (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
     364              :          ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
     365              :         || (Trace_lock_table &&
     366              :             (tag->locktag_field2 == Trace_lock_table));
     367              : }
     368              : 
     369              : 
     370              : inline static void
     371              : LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
     372              : {
     373              :     if (LOCK_DEBUG_ENABLED(&lock->tag))
     374              :         elog(LOG,
     375              :              "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
     376              :              "req(%d,%d,%d,%d,%d,%d,%d)=%d "
     377              :              "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
     378              :              where, lock,
     379              :              lock->tag.locktag_field1, lock->tag.locktag_field2,
     380              :              lock->tag.locktag_field3, lock->tag.locktag_field4,
     381              :              lock->tag.locktag_type, lock->tag.locktag_lockmethodid,
     382              :              lock->grantMask,
     383              :              lock->requested[1], lock->requested[2], lock->requested[3],
     384              :              lock->requested[4], lock->requested[5], lock->requested[6],
     385              :              lock->requested[7], lock->nRequested,
     386              :              lock->granted[1], lock->granted[2], lock->granted[3],
     387              :              lock->granted[4], lock->granted[5], lock->granted[6],
     388              :              lock->granted[7], lock->nGranted,
     389              :              dclist_count(&lock->waitProcs),
     390              :              LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
     391              : }
     392              : 
     393              : 
     394              : inline static void
     395              : PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
     396              : {
     397              :     if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
     398              :         elog(LOG,
     399              :              "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
     400              :              where, proclockP, proclockP->tag.myLock,
     401              :              PROCLOCK_LOCKMETHOD(*(proclockP)),
     402              :              proclockP->tag.myProc, (int) proclockP->holdMask);
     403              : }
     404              : #else                           /* not LOCK_DEBUG */
     405              : 
     406              : #define LOCK_PRINT(where, lock, type)  ((void) 0)
     407              : #define PROCLOCK_PRINT(where, proclockP)  ((void) 0)
     408              : #endif                          /* not LOCK_DEBUG */
     409              : 
     410              : 
     411              : static uint32 proclock_hash(const void *key, Size keysize);
     412              : static void RemoveLocalLock(LOCALLOCK *locallock);
     413              : static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
     414              :                                   const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
     415              : static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
     416              : static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
     417              : static void FinishStrongLockAcquire(void);
     418              : static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
     419              : static void waitonlock_error_callback(void *arg);
     420              : static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
     421              : static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
     422              : static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
     423              :                         PROCLOCK *proclock, LockMethod lockMethodTable);
     424              : static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
     425              :                         LockMethod lockMethodTable, uint32 hashcode,
     426              :                         bool wakeupNeeded);
     427              : static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
     428              :                                  LOCKTAG *locktag, LOCKMODE lockmode,
     429              :                                  bool decrement_strong_lock_count);
     430              : static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
     431              :                                            BlockedProcsData *data);
     432              : 
     433              : 
     434              : /*
     435              :  * Initialize the lock manager's shmem data structures.
     436              :  *
     437              :  * This is called from CreateSharedMemoryAndSemaphores(), which see for more
     438              :  * comments.  In the normal postmaster case, the shared hash tables are
     439              :  * created here, and backends inherit pointers to them via fork().  In the
     440              :  * EXEC_BACKEND case, each backend re-executes this code to obtain pointers to
     441              :  * the already existing shared hash tables.  In either case, each backend must
     442              :  * also call InitLockManagerAccess() to create the locallock hash table.
     443              :  */
     444              : void
     445         1180 : LockManagerShmemInit(void)
     446              : {
     447              :     HASHCTL     info;
     448              :     int64       init_table_size,
     449              :                 max_table_size;
     450              :     bool        found;
     451              : 
     452              :     /*
     453              :      * Compute init/max size to request for lock hashtables.  Note these
     454              :      * calculations must agree with LockManagerShmemSize!
     455              :      */
     456         1180 :     max_table_size = NLOCKENTS();
     457         1180 :     init_table_size = max_table_size / 2;
     458              : 
     459              :     /*
     460              :      * Allocate hash table for LOCK structs.  This stores per-locked-object
     461              :      * information.
     462              :      */
     463         1180 :     info.keysize = sizeof(LOCKTAG);
     464         1180 :     info.entrysize = sizeof(LOCK);
     465         1180 :     info.num_partitions = NUM_LOCK_PARTITIONS;
     466              : 
     467         1180 :     LockMethodLockHash = ShmemInitHash("LOCK hash",
     468              :                                        init_table_size,
     469              :                                        max_table_size,
     470              :                                        &info,
     471              :                                        HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
     472              : 
     473              :     /* Assume an average of 2 holders per lock */
     474         1180 :     max_table_size *= 2;
     475         1180 :     init_table_size *= 2;
     476              : 
     477              :     /*
     478              :      * Allocate hash table for PROCLOCK structs.  This stores
     479              :      * per-lock-per-holder information.
     480              :      */
     481         1180 :     info.keysize = sizeof(PROCLOCKTAG);
     482         1180 :     info.entrysize = sizeof(PROCLOCK);
     483         1180 :     info.hash = proclock_hash;
     484         1180 :     info.num_partitions = NUM_LOCK_PARTITIONS;
     485              : 
     486         1180 :     LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
     487              :                                            init_table_size,
     488              :                                            max_table_size,
     489              :                                            &info,
     490              :                                            HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
     491              : 
     492              :     /*
     493              :      * Allocate fast-path structures.
     494              :      */
     495         1180 :     FastPathStrongRelationLocks =
     496         1180 :         ShmemInitStruct("Fast Path Strong Relation Lock Data",
     497              :                         sizeof(FastPathStrongRelationLockData), &found);
     498         1180 :     if (!found)
     499         1180 :         SpinLockInit(&FastPathStrongRelationLocks->mutex);
     500         1180 : }
     501              : 
     502              : /*
     503              :  * Initialize the lock manager's backend-private data structures.
     504              :  */
     505              : void
     506        24238 : InitLockManagerAccess(void)
     507              : {
     508              :     /*
     509              :      * Allocate non-shared hash table for LOCALLOCK structs.  This stores lock
     510              :      * counts and resource owner information.
     511              :      */
     512              :     HASHCTL     info;
     513              : 
     514        24238 :     info.keysize = sizeof(LOCALLOCKTAG);
     515        24238 :     info.entrysize = sizeof(LOCALLOCK);
     516              : 
     517        24238 :     LockMethodLocalHash = hash_create("LOCALLOCK hash",
     518              :                                       16,
     519              :                                       &info,
     520              :                                       HASH_ELEM | HASH_BLOBS);
     521        24238 : }
     522              : 
     523              : 
     524              : /*
     525              :  * Fetch the lock method table associated with a given lock
     526              :  */
     527              : LockMethod
     528          104 : GetLocksMethodTable(const LOCK *lock)
     529              : {
     530          104 :     LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
     531              : 
     532              :     Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
     533          104 :     return LockMethods[lockmethodid];
     534              : }
     535              : 
     536              : /*
     537              :  * Fetch the lock method table associated with a given locktag
     538              :  */
     539              : LockMethod
     540         1215 : GetLockTagsMethodTable(const LOCKTAG *locktag)
     541              : {
     542         1215 :     LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
     543              : 
     544              :     Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
     545         1215 :     return LockMethods[lockmethodid];
     546              : }
     547              : 
     548              : 
     549              : /*
     550              :  * Compute the hash code associated with a LOCKTAG.
     551              :  *
     552              :  * To avoid unnecessary recomputations of the hash code, we try to do this
     553              :  * just once per function, and then pass it around as needed.  Aside from
     554              :  * passing the hashcode to hash_search_with_hash_value(), we can extract
     555              :  * the lock partition number from the hashcode.
     556              :  */
     557              : uint32
     558     23837466 : LockTagHashCode(const LOCKTAG *locktag)
     559              : {
     560     23837466 :     return get_hash_value(LockMethodLockHash, locktag);
     561              : }
     562              : 
     563              : /*
     564              :  * Compute the hash code associated with a PROCLOCKTAG.
     565              :  *
     566              :  * Because we want to use just one set of partition locks for both the
     567              :  * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
     568              :  * fall into the same partition number as their associated LOCKs.
     569              :  * dynahash.c expects the partition number to be the low-order bits of
     570              :  * the hash code, and therefore a PROCLOCKTAG's hash code must have the
     571              :  * same low-order bits as the associated LOCKTAG's hash code.  We achieve
     572              :  * this with this specialized hash function.
     573              :  */
     574              : static uint32
     575          790 : proclock_hash(const void *key, Size keysize)
     576              : {
     577          790 :     const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
     578              :     uint32      lockhash;
     579              :     Datum       procptr;
     580              : 
     581              :     Assert(keysize == sizeof(PROCLOCKTAG));
     582              : 
     583              :     /* Look into the associated LOCK object, and compute its hash code */
     584          790 :     lockhash = LockTagHashCode(&proclocktag->myLock->tag);
     585              : 
     586              :     /*
     587              :      * To make the hash code also depend on the PGPROC, we xor the proc
     588              :      * struct's address into the hash code, left-shifted so that the
     589              :      * partition-number bits don't change.  Since this is only a hash, we
     590              :      * don't care if we lose high-order bits of the address; use an
     591              :      * intermediate variable to suppress cast-pointer-to-int warnings.
     592              :      */
     593          790 :     procptr = PointerGetDatum(proclocktag->myProc);
     594          790 :     lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS;
     595              : 
     596          790 :     return lockhash;
     597              : }
     598              : 
     599              : /*
     600              :  * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
     601              :  * for its underlying LOCK.
     602              :  *
     603              :  * We use this just to avoid redundant calls of LockTagHashCode().
     604              :  */
     605              : static inline uint32
     606      5483486 : ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
     607              : {
     608      5483486 :     uint32      lockhash = hashcode;
     609              :     Datum       procptr;
     610              : 
     611              :     /*
     612              :      * This must match proclock_hash()!
     613              :      */
     614      5483486 :     procptr = PointerGetDatum(proclocktag->myProc);
     615      5483486 :     lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS;
     616              : 
     617      5483486 :     return lockhash;
     618              : }
     619              : 
     620              : /*
     621              :  * Given two lock modes, return whether they would conflict.
     622              :  */
     623              : bool
     624        39023 : DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
     625              : {
     626        39023 :     LockMethod  lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
     627              : 
     628        39023 :     if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
     629          144 :         return true;
     630              : 
     631        38879 :     return false;
     632              : }
     633              : 
     634              : /*
     635              :  * LockHeldByMe -- test whether lock 'locktag' is held by the current
     636              :  *      transaction
     637              :  *
     638              :  * Returns true if current transaction holds a lock on 'tag' of mode
     639              :  * 'lockmode'.  If 'orstronger' is true, a stronger lockmode is also OK.
     640              :  * ("Stronger" is defined as "numerically higher", which is a bit
     641              :  * semantically dubious but is OK for the purposes we use this for.)
     642              :  */
     643              : bool
     644            0 : LockHeldByMe(const LOCKTAG *locktag,
     645              :              LOCKMODE lockmode, bool orstronger)
     646              : {
     647              :     LOCALLOCKTAG localtag;
     648              :     LOCALLOCK  *locallock;
     649              : 
     650              :     /*
     651              :      * See if there is a LOCALLOCK entry for this lock and lockmode
     652              :      */
     653            0 :     MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
     654            0 :     localtag.lock = *locktag;
     655            0 :     localtag.mode = lockmode;
     656              : 
     657            0 :     locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
     658              :                                           &localtag,
     659              :                                           HASH_FIND, NULL);
     660              : 
     661            0 :     if (locallock && locallock->nLocks > 0)
     662            0 :         return true;
     663              : 
     664            0 :     if (orstronger)
     665              :     {
     666              :         LOCKMODE    slockmode;
     667              : 
     668            0 :         for (slockmode = lockmode + 1;
     669            0 :              slockmode <= MaxLockMode;
     670            0 :              slockmode++)
     671              :         {
     672            0 :             if (LockHeldByMe(locktag, slockmode, false))
     673            0 :                 return true;
     674              :         }
     675              :     }
     676              : 
     677            0 :     return false;
     678              : }
     679              : 
     680              : #ifdef USE_ASSERT_CHECKING
     681              : /*
     682              :  * GetLockMethodLocalHash -- return the hash of local locks, for modules that
     683              :  *      evaluate assertions based on all locks held.
     684              :  */
     685              : HTAB *
     686              : GetLockMethodLocalHash(void)
     687              : {
     688              :     return LockMethodLocalHash;
     689              : }
     690              : #endif
     691              : 
     692              : /*
     693              :  * LockHasWaiters -- look up 'locktag' and check if releasing this
     694              :  *      lock would wake up other processes waiting for it.
     695              :  */
     696              : bool
     697            0 : LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
     698              : {
     699            0 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
     700              :     LockMethod  lockMethodTable;
     701              :     LOCALLOCKTAG localtag;
     702              :     LOCALLOCK  *locallock;
     703              :     LOCK       *lock;
     704              :     PROCLOCK   *proclock;
     705              :     LWLock     *partitionLock;
     706            0 :     bool        hasWaiters = false;
     707              : 
     708            0 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
     709            0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
     710            0 :     lockMethodTable = LockMethods[lockmethodid];
     711            0 :     if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
     712            0 :         elog(ERROR, "unrecognized lock mode: %d", lockmode);
     713              : 
     714              : #ifdef LOCK_DEBUG
     715              :     if (LOCK_DEBUG_ENABLED(locktag))
     716              :         elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
     717              :              locktag->locktag_field1, locktag->locktag_field2,
     718              :              lockMethodTable->lockModeNames[lockmode]);
     719              : #endif
     720              : 
     721              :     /*
     722              :      * Find the LOCALLOCK entry for this lock and lockmode
     723              :      */
     724            0 :     MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
     725            0 :     localtag.lock = *locktag;
     726            0 :     localtag.mode = lockmode;
     727              : 
     728            0 :     locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
     729              :                                           &localtag,
     730              :                                           HASH_FIND, NULL);
     731              : 
     732              :     /*
     733              :      * let the caller print its own error message, too. Do not ereport(ERROR).
     734              :      */
     735            0 :     if (!locallock || locallock->nLocks <= 0)
     736              :     {
     737            0 :         elog(WARNING, "you don't own a lock of type %s",
     738              :              lockMethodTable->lockModeNames[lockmode]);
     739            0 :         return false;
     740              :     }
     741              : 
     742              :     /*
     743              :      * Check the shared lock table.
     744              :      */
     745            0 :     partitionLock = LockHashPartitionLock(locallock->hashcode);
     746              : 
     747            0 :     LWLockAcquire(partitionLock, LW_SHARED);
     748              : 
     749              :     /*
     750              :      * We don't need to re-find the lock or proclock, since we kept their
     751              :      * addresses in the locallock table, and they couldn't have been removed
     752              :      * while we were holding a lock on them.
     753              :      */
     754            0 :     lock = locallock->lock;
     755              :     LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
     756            0 :     proclock = locallock->proclock;
     757              :     PROCLOCK_PRINT("LockHasWaiters: found", proclock);
     758              : 
     759              :     /*
     760              :      * Double-check that we are actually holding a lock of the type we want to
     761              :      * release.
     762              :      */
     763            0 :     if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
     764              :     {
     765              :         PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
     766            0 :         LWLockRelease(partitionLock);
     767            0 :         elog(WARNING, "you don't own a lock of type %s",
     768              :              lockMethodTable->lockModeNames[lockmode]);
     769            0 :         RemoveLocalLock(locallock);
     770            0 :         return false;
     771              :     }
     772              : 
     773              :     /*
     774              :      * Do the checking.
     775              :      */
     776            0 :     if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
     777            0 :         hasWaiters = true;
     778              : 
     779            0 :     LWLockRelease(partitionLock);
     780              : 
     781            0 :     return hasWaiters;
     782              : }
     783              : 
     784              : /*
     785              :  * LockAcquire -- Check for lock conflicts, sleep if conflict found,
     786              :  *      set lock if/when no conflicts.
     787              :  *
     788              :  * Inputs:
     789              :  *  locktag: unique identifier for the lockable object
     790              :  *  lockmode: lock mode to acquire
     791              :  *  sessionLock: if true, acquire lock for session not current transaction
     792              :  *  dontWait: if true, don't wait to acquire lock
     793              :  *
     794              :  * Returns one of:
     795              :  *      LOCKACQUIRE_NOT_AVAIL       lock not available, and dontWait=true
     796              :  *      LOCKACQUIRE_OK              lock successfully acquired
     797              :  *      LOCKACQUIRE_ALREADY_HELD    incremented count for lock already held
     798              :  *      LOCKACQUIRE_ALREADY_CLEAR   incremented count for lock already clear
     799              :  *
     800              :  * In the normal case where dontWait=false and the caller doesn't need to
     801              :  * distinguish a freshly acquired lock from one already taken earlier in
     802              :  * this same transaction, there is no need to examine the return value.
     803              :  *
     804              :  * Side Effects: The lock is acquired and recorded in lock tables.
     805              :  *
     806              :  * NOTE: if we wait for the lock, there is no way to abort the wait
     807              :  * short of aborting the transaction.
     808              :  */
     809              : LockAcquireResult
     810      1026616 : LockAcquire(const LOCKTAG *locktag,
     811              :             LOCKMODE lockmode,
     812              :             bool sessionLock,
     813              :             bool dontWait)
     814              : {
     815      1026616 :     return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
     816              :                                true, NULL, false);
     817              : }
     818              : 
     819              : /*
     820              :  * LockAcquireExtended - allows us to specify additional options
     821              :  *
     822              :  * reportMemoryError specifies whether a lock request that fills the lock
     823              :  * table should generate an ERROR or not.  Passing "false" allows the caller
     824              :  * to attempt to recover from lock-table-full situations, perhaps by forcibly
     825              :  * canceling other lock holders and then retrying.  Note, however, that the
     826              :  * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
     827              :  * in combination with dontWait = true, as the cause of failure couldn't be
     828              :  * distinguished.
     829              :  *
     830              :  * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
     831              :  * table entry if a lock is successfully acquired, or NULL if not.
     832              :  *
     833              :  * logLockFailure indicates whether to log details when a lock acquisition
     834              :  * fails with dontWait = true.
     835              :  */
     836              : LockAcquireResult
     837     27607603 : LockAcquireExtended(const LOCKTAG *locktag,
     838              :                     LOCKMODE lockmode,
     839              :                     bool sessionLock,
     840              :                     bool dontWait,
     841              :                     bool reportMemoryError,
     842              :                     LOCALLOCK **locallockp,
     843              :                     bool logLockFailure)
     844              : {
     845     27607603 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
     846              :     LockMethod  lockMethodTable;
     847              :     LOCALLOCKTAG localtag;
     848              :     LOCALLOCK  *locallock;
     849              :     LOCK       *lock;
     850              :     PROCLOCK   *proclock;
     851              :     bool        found;
     852              :     ResourceOwner owner;
     853              :     uint32      hashcode;
     854              :     LWLock     *partitionLock;
     855              :     bool        found_conflict;
     856              :     ProcWaitStatus waitResult;
     857     27607603 :     bool        log_lock = false;
     858              : 
     859     27607603 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
     860            0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
     861     27607603 :     lockMethodTable = LockMethods[lockmethodid];
     862     27607603 :     if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
     863            0 :         elog(ERROR, "unrecognized lock mode: %d", lockmode);
     864              : 
     865     27607603 :     if (RecoveryInProgress() && !InRecovery &&
     866       330409 :         (locktag->locktag_type == LOCKTAG_OBJECT ||
     867       330409 :          locktag->locktag_type == LOCKTAG_RELATION) &&
     868              :         lockmode > RowExclusiveLock)
     869            0 :         ereport(ERROR,
     870              :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
     871              :                  errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
     872              :                         lockMethodTable->lockModeNames[lockmode]),
     873              :                  errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
     874              : 
     875              : #ifdef LOCK_DEBUG
     876              :     if (LOCK_DEBUG_ENABLED(locktag))
     877              :         elog(LOG, "LockAcquire: lock [%u,%u] %s",
     878              :              locktag->locktag_field1, locktag->locktag_field2,
     879              :              lockMethodTable->lockModeNames[lockmode]);
     880              : #endif
     881              : 
     882              :     /* Identify owner for lock */
     883     27607603 :     if (sessionLock)
     884       146492 :         owner = NULL;
     885              :     else
     886     27461111 :         owner = CurrentResourceOwner;
     887              : 
     888              :     /*
     889              :      * Find or create a LOCALLOCK entry for this lock and lockmode
     890              :      */
     891     27607603 :     MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
     892     27607603 :     localtag.lock = *locktag;
     893     27607603 :     localtag.mode = lockmode;
     894              : 
     895     27607603 :     locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
     896              :                                           &localtag,
     897              :                                           HASH_ENTER, &found);
     898              : 
     899              :     /*
     900              :      * if it's a new locallock object, initialize it
     901              :      */
     902     27607603 :     if (!found)
     903              :     {
     904     23116965 :         locallock->lock = NULL;
     905     23116965 :         locallock->proclock = NULL;
     906     23116965 :         locallock->hashcode = LockTagHashCode(&(localtag.lock));
     907     23116965 :         locallock->nLocks = 0;
     908     23116965 :         locallock->holdsStrongLockCount = false;
     909     23116965 :         locallock->lockCleared = false;
     910     23116965 :         locallock->numLockOwners = 0;
     911     23116965 :         locallock->maxLockOwners = 8;
     912     23116965 :         locallock->lockOwners = NULL;    /* in case next line fails */
     913     23116965 :         locallock->lockOwners = (LOCALLOCKOWNER *)
     914     23116965 :             MemoryContextAlloc(TopMemoryContext,
     915     23116965 :                                locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
     916              :     }
     917              :     else
     918              :     {
     919              :         /* Make sure there will be room to remember the lock */
     920      4490638 :         if (locallock->numLockOwners >= locallock->maxLockOwners)
     921              :         {
     922           21 :             int         newsize = locallock->maxLockOwners * 2;
     923              : 
     924           21 :             locallock->lockOwners = (LOCALLOCKOWNER *)
     925           21 :                 repalloc(locallock->lockOwners,
     926              :                          newsize * sizeof(LOCALLOCKOWNER));
     927           21 :             locallock->maxLockOwners = newsize;
     928              :         }
     929              :     }
     930     27607603 :     hashcode = locallock->hashcode;
     931              : 
     932     27607603 :     if (locallockp)
     933     26580893 :         *locallockp = locallock;
     934              : 
     935              :     /*
     936              :      * If we already hold the lock, we can just increase the count locally.
     937              :      *
     938              :      * If lockCleared is already set, caller need not worry about absorbing
     939              :      * sinval messages related to the lock's object.
     940              :      */
     941     27607603 :     if (locallock->nLocks > 0)
     942              :     {
     943      4490638 :         GrantLockLocal(locallock, owner);
     944      4490638 :         if (locallock->lockCleared)
     945      4395985 :             return LOCKACQUIRE_ALREADY_CLEAR;
     946              :         else
     947        94653 :             return LOCKACQUIRE_ALREADY_HELD;
     948              :     }
     949              : 
     950              :     /*
     951              :      * We don't acquire any other heavyweight lock while holding the relation
     952              :      * extension lock.  We do allow to acquire the same relation extension
     953              :      * lock more than once but that case won't reach here.
     954              :      */
     955              :     Assert(!IsRelationExtensionLockHeld);
     956              : 
     957              :     /*
     958              :      * Prepare to emit a WAL record if acquisition of this lock needs to be
     959              :      * replayed in a standby server.
     960              :      *
     961              :      * Here we prepare to log; after lock is acquired we'll issue log record.
     962              :      * This arrangement simplifies error recovery in case the preparation step
     963              :      * fails.
     964              :      *
     965              :      * Only AccessExclusiveLocks can conflict with lock types that read-only
     966              :      * transactions can acquire in a standby server. Make sure this definition
     967              :      * matches the one in GetRunningTransactionLocks().
     968              :      */
     969     23116965 :     if (lockmode >= AccessExclusiveLock &&
     970       301986 :         locktag->locktag_type == LOCKTAG_RELATION &&
     971       198442 :         !RecoveryInProgress() &&
     972       171738 :         XLogStandbyInfoActive())
     973              :     {
     974       137685 :         LogAccessExclusiveLockPrepare();
     975       137685 :         log_lock = true;
     976              :     }
     977              : 
     978              :     /*
     979              :      * Attempt to take lock via fast path, if eligible.  But if we remember
     980              :      * having filled up the fast path array, we don't attempt to make any
     981              :      * further use of it until we release some locks.  It's possible that some
     982              :      * other backend has transferred some of those locks to the shared hash
     983              :      * table, leaving space free, but it's not worth acquiring the LWLock just
     984              :      * to check.  It's also possible that we're acquiring a second or third
     985              :      * lock type on a relation we have already locked using the fast-path, but
     986              :      * for now we don't worry about that case either.
     987              :      */
     988     23116965 :     if (EligibleForRelationFastPath(locktag, lockmode))
     989              :     {
     990     20822275 :         if (FastPathLocalUseCounts[FAST_PATH_REL_GROUP(locktag->locktag_field2)] <
     991              :             FP_LOCK_SLOTS_PER_GROUP)
     992              :         {
     993     20562054 :             uint32      fasthashcode = FastPathStrongLockHashPartition(hashcode);
     994              :             bool        acquired;
     995              : 
     996              :             /*
     997              :              * LWLockAcquire acts as a memory sequencing point, so it's safe
     998              :              * to assume that any strong locker whose increment to
     999              :              * FastPathStrongRelationLocks->counts becomes visible after we
    1000              :              * test it has yet to begin to transfer fast-path locks.
    1001              :              */
    1002     20562054 :             LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    1003     20562054 :             if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
    1004       306591 :                 acquired = false;
    1005              :             else
    1006     20255463 :                 acquired = FastPathGrantRelationLock(locktag->locktag_field2,
    1007              :                                                      lockmode);
    1008     20562054 :             LWLockRelease(&MyProc->fpInfoLock);
    1009     20562054 :             if (acquired)
    1010              :             {
    1011              :                 /*
    1012              :                  * The locallock might contain stale pointers to some old
    1013              :                  * shared objects; we MUST reset these to null before
    1014              :                  * considering the lock to be acquired via fast-path.
    1015              :                  */
    1016     20255463 :                 locallock->lock = NULL;
    1017     20255463 :                 locallock->proclock = NULL;
    1018     20255463 :                 GrantLockLocal(locallock, owner);
    1019     20255463 :                 return LOCKACQUIRE_OK;
    1020              :             }
    1021              :         }
    1022              :         else
    1023              :         {
    1024              :             /*
    1025              :              * Increment the lock statistics counter if lock could not be
    1026              :              * acquired via the fast-path.
    1027              :              */
    1028       260221 :             pgstat_count_lock_fastpath_exceeded(locallock->tag.lock.locktag_type);
    1029              :         }
    1030              :     }
    1031              : 
    1032              :     /*
    1033              :      * If this lock could potentially have been taken via the fast-path by
    1034              :      * some other backend, we must (temporarily) disable further use of the
    1035              :      * fast-path for this lock tag, and migrate any locks already taken via
    1036              :      * this method to the main lock table.
    1037              :      */
    1038      2861502 :     if (ConflictsWithRelationFastPath(locktag, lockmode))
    1039              :     {
    1040       236635 :         uint32      fasthashcode = FastPathStrongLockHashPartition(hashcode);
    1041              : 
    1042       236635 :         BeginStrongLockAcquire(locallock, fasthashcode);
    1043       236635 :         if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
    1044              :                                            hashcode))
    1045              :         {
    1046            0 :             AbortStrongLockAcquire();
    1047            0 :             if (locallock->nLocks == 0)
    1048            0 :                 RemoveLocalLock(locallock);
    1049            0 :             if (locallockp)
    1050            0 :                 *locallockp = NULL;
    1051            0 :             if (reportMemoryError)
    1052            0 :                 ereport(ERROR,
    1053              :                         (errcode(ERRCODE_OUT_OF_MEMORY),
    1054              :                          errmsg("out of shared memory"),
    1055              :                          errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
    1056              :             else
    1057            0 :                 return LOCKACQUIRE_NOT_AVAIL;
    1058              :         }
    1059              :     }
    1060              : 
    1061              :     /*
    1062              :      * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
    1063              :      * take it via the fast-path, either, so we've got to mess with the shared
    1064              :      * lock table.
    1065              :      */
    1066      2861502 :     partitionLock = LockHashPartitionLock(hashcode);
    1067              : 
    1068      2861502 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    1069              : 
    1070              :     /*
    1071              :      * Find or create lock and proclock entries with this tag
    1072              :      *
    1073              :      * Note: if the locallock object already existed, it might have a pointer
    1074              :      * to the lock already ... but we should not assume that that pointer is
    1075              :      * valid, since a lock object with zero hold and request counts can go
    1076              :      * away anytime.  So we have to use SetupLockInTable() to recompute the
    1077              :      * lock and proclock pointers, even if they're already set.
    1078              :      */
    1079      2861502 :     proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
    1080              :                                 hashcode, lockmode);
    1081      2861502 :     if (!proclock)
    1082              :     {
    1083            0 :         AbortStrongLockAcquire();
    1084            0 :         LWLockRelease(partitionLock);
    1085            0 :         if (locallock->nLocks == 0)
    1086            0 :             RemoveLocalLock(locallock);
    1087            0 :         if (locallockp)
    1088            0 :             *locallockp = NULL;
    1089            0 :         if (reportMemoryError)
    1090            0 :             ereport(ERROR,
    1091              :                     (errcode(ERRCODE_OUT_OF_MEMORY),
    1092              :                      errmsg("out of shared memory"),
    1093              :                      errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
    1094              :         else
    1095            0 :             return LOCKACQUIRE_NOT_AVAIL;
    1096              :     }
    1097      2861502 :     locallock->proclock = proclock;
    1098      2861502 :     lock = proclock->tag.myLock;
    1099      2861502 :     locallock->lock = lock;
    1100              : 
    1101              :     /*
    1102              :      * If lock requested conflicts with locks requested by waiters, must join
    1103              :      * wait queue.  Otherwise, check for conflict with already-held locks.
    1104              :      * (That's last because most complex check.)
    1105              :      */
    1106      2861502 :     if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
    1107          247 :         found_conflict = true;
    1108              :     else
    1109      2861255 :         found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
    1110              :                                             lock, proclock);
    1111              : 
    1112      2861502 :     if (!found_conflict)
    1113              :     {
    1114              :         /* No conflict with held or previously requested locks */
    1115      2859277 :         GrantLock(lock, proclock, lockmode);
    1116      2859277 :         waitResult = PROC_WAIT_STATUS_OK;
    1117              :     }
    1118              :     else
    1119              :     {
    1120              :         /*
    1121              :          * Join the lock's wait queue.  We call this even in the dontWait
    1122              :          * case, because JoinWaitQueue() may discover that we can acquire the
    1123              :          * lock immediately after all.
    1124              :          */
    1125         2225 :         waitResult = JoinWaitQueue(locallock, lockMethodTable, dontWait);
    1126              :     }
    1127              : 
    1128      2861502 :     if (waitResult == PROC_WAIT_STATUS_ERROR)
    1129              :     {
    1130              :         /*
    1131              :          * We're not getting the lock because a deadlock was detected already
    1132              :          * while trying to join the wait queue, or because we would have to
    1133              :          * wait but the caller requested no blocking.
    1134              :          *
    1135              :          * Undo the changes to shared entries before releasing the partition
    1136              :          * lock.
    1137              :          */
    1138          738 :         AbortStrongLockAcquire();
    1139              : 
    1140          738 :         if (proclock->holdMask == 0)
    1141              :         {
    1142              :             uint32      proclock_hashcode;
    1143              : 
    1144          535 :             proclock_hashcode = ProcLockHashCode(&proclock->tag,
    1145              :                                                  hashcode);
    1146          535 :             dlist_delete(&proclock->lockLink);
    1147          535 :             dlist_delete(&proclock->procLink);
    1148          535 :             if (!hash_search_with_hash_value(LockMethodProcLockHash,
    1149          535 :                                              &(proclock->tag),
    1150              :                                              proclock_hashcode,
    1151              :                                              HASH_REMOVE,
    1152              :                                              NULL))
    1153            0 :                 elog(PANIC, "proclock table corrupted");
    1154              :         }
    1155              :         else
    1156              :             PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
    1157          738 :         lock->nRequested--;
    1158          738 :         lock->requested[lockmode]--;
    1159              :         LOCK_PRINT("LockAcquire: did not join wait queue",
    1160              :                    lock, lockmode);
    1161              :         Assert((lock->nRequested > 0) &&
    1162              :                (lock->requested[lockmode] >= 0));
    1163              :         Assert(lock->nGranted <= lock->nRequested);
    1164          738 :         LWLockRelease(partitionLock);
    1165          738 :         if (locallock->nLocks == 0)
    1166          738 :             RemoveLocalLock(locallock);
    1167              : 
    1168          738 :         if (dontWait)
    1169              :         {
    1170              :             /*
    1171              :              * Log lock holders and waiters as a detail log message if
    1172              :              * logLockFailure = true and lock acquisition fails with dontWait
    1173              :              * = true
    1174              :              */
    1175          737 :             if (logLockFailure)
    1176              :             {
    1177              :                 StringInfoData buf,
    1178              :                             lock_waiters_sbuf,
    1179              :                             lock_holders_sbuf;
    1180              :                 const char *modename;
    1181            0 :                 int         lockHoldersNum = 0;
    1182              : 
    1183            0 :                 initStringInfo(&buf);
    1184            0 :                 initStringInfo(&lock_waiters_sbuf);
    1185            0 :                 initStringInfo(&lock_holders_sbuf);
    1186              : 
    1187            0 :                 DescribeLockTag(&buf, &locallock->tag.lock);
    1188            0 :                 modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
    1189              :                                            lockmode);
    1190              : 
    1191              :                 /* Gather a list of all lock holders and waiters */
    1192            0 :                 LWLockAcquire(partitionLock, LW_SHARED);
    1193            0 :                 GetLockHoldersAndWaiters(locallock, &lock_holders_sbuf,
    1194              :                                          &lock_waiters_sbuf, &lockHoldersNum);
    1195            0 :                 LWLockRelease(partitionLock);
    1196              : 
    1197            0 :                 ereport(LOG,
    1198              :                         (errmsg("process %d could not obtain %s on %s",
    1199              :                                 MyProcPid, modename, buf.data),
    1200              :                          errdetail_log_plural(
    1201              :                                               "Process holding the lock: %s, Wait queue: %s.",
    1202              :                                               "Processes holding the lock: %s, Wait queue: %s.",
    1203              :                                               lockHoldersNum,
    1204              :                                               lock_holders_sbuf.data,
    1205              :                                               lock_waiters_sbuf.data)));
    1206              : 
    1207            0 :                 pfree(buf.data);
    1208            0 :                 pfree(lock_holders_sbuf.data);
    1209            0 :                 pfree(lock_waiters_sbuf.data);
    1210              :             }
    1211          737 :             if (locallockp)
    1212          223 :                 *locallockp = NULL;
    1213          737 :             return LOCKACQUIRE_NOT_AVAIL;
    1214              :         }
    1215              :         else
    1216              :         {
    1217            1 :             DeadLockReport();
    1218              :             /* DeadLockReport() will not return */
    1219              :         }
    1220              :     }
    1221              : 
    1222              :     /*
    1223              :      * We are now in the lock queue, or the lock was already granted.  If
    1224              :      * queued, go to sleep.
    1225              :      */
    1226      2860764 :     if (waitResult == PROC_WAIT_STATUS_WAITING)
    1227              :     {
    1228              :         Assert(!dontWait);
    1229              :         PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
    1230              :         LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
    1231         1481 :         LWLockRelease(partitionLock);
    1232              : 
    1233         1481 :         waitResult = WaitOnLock(locallock, owner);
    1234              : 
    1235              :         /*
    1236              :          * NOTE: do not do any material change of state between here and
    1237              :          * return.  All required changes in locktable state must have been
    1238              :          * done when the lock was granted to us --- see notes in WaitOnLock.
    1239              :          */
    1240              : 
    1241         1436 :         if (waitResult == PROC_WAIT_STATUS_ERROR)
    1242              :         {
    1243              :             /*
    1244              :              * We failed as a result of a deadlock, see CheckDeadLock(). Quit
    1245              :              * now.
    1246              :              */
    1247              :             Assert(!dontWait);
    1248            5 :             DeadLockReport();
    1249              :             /* DeadLockReport() will not return */
    1250              :         }
    1251              :     }
    1252              :     else
    1253      2859283 :         LWLockRelease(partitionLock);
    1254              :     Assert(waitResult == PROC_WAIT_STATUS_OK);
    1255              : 
    1256              :     /* The lock was granted to us.  Update the local lock entry accordingly */
    1257              :     Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
    1258      2860714 :     GrantLockLocal(locallock, owner);
    1259              : 
    1260              :     /*
    1261              :      * Lock state is fully up-to-date now; if we error out after this, no
    1262              :      * special error cleanup is required.
    1263              :      */
    1264      2860714 :     FinishStrongLockAcquire();
    1265              : 
    1266              :     /*
    1267              :      * Emit a WAL record if acquisition of this lock needs to be replayed in a
    1268              :      * standby server.
    1269              :      */
    1270      2860714 :     if (log_lock)
    1271              :     {
    1272              :         /*
    1273              :          * Decode the locktag back to the original values, to avoid sending
    1274              :          * lots of empty bytes with every message.  See lock.h to check how a
    1275              :          * locktag is defined for LOCKTAG_RELATION
    1276              :          */
    1277       137472 :         LogAccessExclusiveLock(locktag->locktag_field1,
    1278       137472 :                                locktag->locktag_field2);
    1279              :     }
    1280              : 
    1281      2860714 :     return LOCKACQUIRE_OK;
    1282              : }
    1283              : 
    1284              : /*
    1285              :  * Find or create LOCK and PROCLOCK objects as needed for a new lock
    1286              :  * request.
    1287              :  *
    1288              :  * Returns the PROCLOCK object, or NULL if we failed to create the objects
    1289              :  * for lack of shared memory.
    1290              :  *
    1291              :  * The appropriate partition lock must be held at entry, and will be
    1292              :  * held at exit.
    1293              :  */
    1294              : static PROCLOCK *
    1295      2863673 : SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
    1296              :                  const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
    1297              : {
    1298              :     LOCK       *lock;
    1299              :     PROCLOCK   *proclock;
    1300              :     PROCLOCKTAG proclocktag;
    1301              :     uint32      proclock_hashcode;
    1302              :     bool        found;
    1303              : 
    1304              :     /*
    1305              :      * Find or create a lock with this tag.
    1306              :      */
    1307      2863673 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    1308              :                                                 locktag,
    1309              :                                                 hashcode,
    1310              :                                                 HASH_ENTER_NULL,
    1311              :                                                 &found);
    1312      2863673 :     if (!lock)
    1313            0 :         return NULL;
    1314              : 
    1315              :     /*
    1316              :      * if it's a new lock object, initialize it
    1317              :      */
    1318      2863673 :     if (!found)
    1319              :     {
    1320      2592965 :         lock->grantMask = 0;
    1321      2592965 :         lock->waitMask = 0;
    1322      2592965 :         dlist_init(&lock->procLocks);
    1323      2592965 :         dclist_init(&lock->waitProcs);
    1324      2592965 :         lock->nRequested = 0;
    1325      2592965 :         lock->nGranted = 0;
    1326     15557790 :         MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
    1327      2592965 :         MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
    1328              :         LOCK_PRINT("LockAcquire: new", lock, lockmode);
    1329              :     }
    1330              :     else
    1331              :     {
    1332              :         LOCK_PRINT("LockAcquire: found", lock, lockmode);
    1333              :         Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
    1334              :         Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
    1335              :         Assert(lock->nGranted <= lock->nRequested);
    1336              :     }
    1337              : 
    1338              :     /*
    1339              :      * Create the hash key for the proclock table.
    1340              :      */
    1341      2863673 :     proclocktag.myLock = lock;
    1342      2863673 :     proclocktag.myProc = proc;
    1343              : 
    1344      2863673 :     proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
    1345              : 
    1346              :     /*
    1347              :      * Find or create a proclock entry with this tag
    1348              :      */
    1349      2863673 :     proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
    1350              :                                                         &proclocktag,
    1351              :                                                         proclock_hashcode,
    1352              :                                                         HASH_ENTER_NULL,
    1353              :                                                         &found);
    1354      2863673 :     if (!proclock)
    1355              :     {
    1356              :         /* Oops, not enough shmem for the proclock */
    1357            0 :         if (lock->nRequested == 0)
    1358              :         {
    1359              :             /*
    1360              :              * There are no other requestors of this lock, so garbage-collect
    1361              :              * the lock object.  We *must* do this to avoid a permanent leak
    1362              :              * of shared memory, because there won't be anything to cause
    1363              :              * anyone to release the lock object later.
    1364              :              */
    1365              :             Assert(dlist_is_empty(&(lock->procLocks)));
    1366            0 :             if (!hash_search_with_hash_value(LockMethodLockHash,
    1367            0 :                                              &(lock->tag),
    1368              :                                              hashcode,
    1369              :                                              HASH_REMOVE,
    1370              :                                              NULL))
    1371            0 :                 elog(PANIC, "lock table corrupted");
    1372              :         }
    1373            0 :         return NULL;
    1374              :     }
    1375              : 
    1376              :     /*
    1377              :      * If new, initialize the new entry
    1378              :      */
    1379      2863673 :     if (!found)
    1380              :     {
    1381      2617006 :         uint32      partition = LockHashPartition(hashcode);
    1382              : 
    1383              :         /*
    1384              :          * It might seem unsafe to access proclock->groupLeader without a
    1385              :          * lock, but it's not really.  Either we are initializing a proclock
    1386              :          * on our own behalf, in which case our group leader isn't changing
    1387              :          * because the group leader for a process can only ever be changed by
    1388              :          * the process itself; or else we are transferring a fast-path lock to
    1389              :          * the main lock table, in which case that process can't change its
    1390              :          * lock group leader without first releasing all of its locks (and in
    1391              :          * particular the one we are currently transferring).
    1392              :          */
    1393      5234012 :         proclock->groupLeader = proc->lockGroupLeader != NULL ?
    1394      2617006 :             proc->lockGroupLeader : proc;
    1395      2617006 :         proclock->holdMask = 0;
    1396      2617006 :         proclock->releaseMask = 0;
    1397              :         /* Add proclock to appropriate lists */
    1398      2617006 :         dlist_push_tail(&lock->procLocks, &proclock->lockLink);
    1399      2617006 :         dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
    1400              :         PROCLOCK_PRINT("LockAcquire: new", proclock);
    1401              :     }
    1402              :     else
    1403              :     {
    1404              :         PROCLOCK_PRINT("LockAcquire: found", proclock);
    1405              :         Assert((proclock->holdMask & ~lock->grantMask) == 0);
    1406              : 
    1407              : #ifdef CHECK_DEADLOCK_RISK
    1408              : 
    1409              :         /*
    1410              :          * Issue warning if we already hold a lower-level lock on this object
    1411              :          * and do not hold a lock of the requested level or higher. This
    1412              :          * indicates a deadlock-prone coding practice (eg, we'd have a
    1413              :          * deadlock if another backend were following the same code path at
    1414              :          * about the same time).
    1415              :          *
    1416              :          * This is not enabled by default, because it may generate log entries
    1417              :          * about user-level coding practices that are in fact safe in context.
    1418              :          * It can be enabled to help find system-level problems.
    1419              :          *
    1420              :          * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
    1421              :          * better to use a table.  For now, though, this works.
    1422              :          */
    1423              :         {
    1424              :             int         i;
    1425              : 
    1426              :             for (i = lockMethodTable->numLockModes; i > 0; i--)
    1427              :             {
    1428              :                 if (proclock->holdMask & LOCKBIT_ON(i))
    1429              :                 {
    1430              :                     if (i >= (int) lockmode)
    1431              :                         break;  /* safe: we have a lock >= req level */
    1432              :                     elog(LOG, "deadlock risk: raising lock level"
    1433              :                          " from %s to %s on object %u/%u/%u",
    1434              :                          lockMethodTable->lockModeNames[i],
    1435              :                          lockMethodTable->lockModeNames[lockmode],
    1436              :                          lock->tag.locktag_field1, lock->tag.locktag_field2,
    1437              :                          lock->tag.locktag_field3);
    1438              :                     break;
    1439              :                 }
    1440              :             }
    1441              :         }
    1442              : #endif                          /* CHECK_DEADLOCK_RISK */
    1443              :     }
    1444              : 
    1445              :     /*
    1446              :      * lock->nRequested and lock->requested[] count the total number of
    1447              :      * requests, whether granted or waiting, so increment those immediately.
    1448              :      * The other counts don't increment till we get the lock.
    1449              :      */
    1450      2863673 :     lock->nRequested++;
    1451      2863673 :     lock->requested[lockmode]++;
    1452              :     Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
    1453              : 
    1454              :     /*
    1455              :      * We shouldn't already hold the desired lock; else locallock table is
    1456              :      * broken.
    1457              :      */
    1458      2863673 :     if (proclock->holdMask & LOCKBIT_ON(lockmode))
    1459            0 :         elog(ERROR, "lock %s on object %u/%u/%u is already held",
    1460              :              lockMethodTable->lockModeNames[lockmode],
    1461              :              lock->tag.locktag_field1, lock->tag.locktag_field2,
    1462              :              lock->tag.locktag_field3);
    1463              : 
    1464      2863673 :     return proclock;
    1465              : }
    1466              : 
    1467              : /*
    1468              :  * Check and set/reset the flag that we hold the relation extension lock.
    1469              :  *
    1470              :  * It is callers responsibility that this function is called after
    1471              :  * acquiring/releasing the relation extension lock.
    1472              :  *
    1473              :  * Pass acquired as true if lock is acquired, false otherwise.
    1474              :  */
    1475              : static inline void
    1476     47368657 : CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
    1477              : {
    1478              : #ifdef USE_ASSERT_CHECKING
    1479              :     if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
    1480              :         IsRelationExtensionLockHeld = acquired;
    1481              : #endif
    1482     47368657 : }
    1483              : 
    1484              : /*
    1485              :  * Subroutine to free a locallock entry
    1486              :  */
    1487              : static void
    1488     23116965 : RemoveLocalLock(LOCALLOCK *locallock)
    1489              : {
    1490              :     int         i;
    1491              : 
    1492     23229184 :     for (i = locallock->numLockOwners - 1; i >= 0; i--)
    1493              :     {
    1494       112219 :         if (locallock->lockOwners[i].owner != NULL)
    1495       112172 :             ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
    1496              :     }
    1497     23116965 :     locallock->numLockOwners = 0;
    1498     23116965 :     if (locallock->lockOwners != NULL)
    1499     23116965 :         pfree(locallock->lockOwners);
    1500     23116965 :     locallock->lockOwners = NULL;
    1501              : 
    1502     23116965 :     if (locallock->holdsStrongLockCount)
    1503              :     {
    1504              :         uint32      fasthashcode;
    1505              : 
    1506       236299 :         fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
    1507              : 
    1508       236299 :         SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    1509              :         Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
    1510       236299 :         FastPathStrongRelationLocks->count[fasthashcode]--;
    1511       236299 :         locallock->holdsStrongLockCount = false;
    1512       236299 :         SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    1513              :     }
    1514              : 
    1515     23116965 :     if (!hash_search(LockMethodLocalHash,
    1516     23116965 :                      &(locallock->tag),
    1517              :                      HASH_REMOVE, NULL))
    1518            0 :         elog(WARNING, "locallock table corrupted");
    1519              : 
    1520              :     /*
    1521              :      * Indicate that the lock is released for certain types of locks
    1522              :      */
    1523     23116965 :     CheckAndSetLockHeld(locallock, false);
    1524     23116965 : }
    1525              : 
    1526              : /*
    1527              :  * LockCheckConflicts -- test whether requested lock conflicts
    1528              :  *      with those already granted
    1529              :  *
    1530              :  * Returns true if conflict, false if no conflict.
    1531              :  *
    1532              :  * NOTES:
    1533              :  *      Here's what makes this complicated: one process's locks don't
    1534              :  * conflict with one another, no matter what purpose they are held for
    1535              :  * (eg, session and transaction locks do not conflict).  Nor do the locks
    1536              :  * of one process in a lock group conflict with those of another process in
    1537              :  * the same group.  So, we must subtract off these locks when determining
    1538              :  * whether the requested new lock conflicts with those already held.
    1539              :  */
    1540              : bool
    1541      2862976 : LockCheckConflicts(LockMethod lockMethodTable,
    1542              :                    LOCKMODE lockmode,
    1543              :                    LOCK *lock,
    1544              :                    PROCLOCK *proclock)
    1545              : {
    1546      2862976 :     int         numLockModes = lockMethodTable->numLockModes;
    1547              :     LOCKMASK    myLocks;
    1548      2862976 :     int         conflictMask = lockMethodTable->conflictTab[lockmode];
    1549              :     int         conflictsRemaining[MAX_LOCKMODES];
    1550      2862976 :     int         totalConflictsRemaining = 0;
    1551              :     dlist_iter  proclock_iter;
    1552              :     int         i;
    1553              : 
    1554              :     /*
    1555              :      * first check for global conflicts: If no locks conflict with my request,
    1556              :      * then I get the lock.
    1557              :      *
    1558              :      * Checking for conflict: lock->grantMask represents the types of
    1559              :      * currently held locks.  conflictTable[lockmode] has a bit set for each
    1560              :      * type of lock that conflicts with request.   Bitwise compare tells if
    1561              :      * there is a conflict.
    1562              :      */
    1563      2862976 :     if (!(conflictMask & lock->grantMask))
    1564              :     {
    1565              :         PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
    1566      2750567 :         return false;
    1567              :     }
    1568              : 
    1569              :     /*
    1570              :      * Rats.  Something conflicts.  But it could still be my own lock, or a
    1571              :      * lock held by another member of my locking group.  First, figure out how
    1572              :      * many conflicts remain after subtracting out any locks I hold myself.
    1573              :      */
    1574       112409 :     myLocks = proclock->holdMask;
    1575      1011681 :     for (i = 1; i <= numLockModes; i++)
    1576              :     {
    1577       899272 :         if ((conflictMask & LOCKBIT_ON(i)) == 0)
    1578              :         {
    1579       477256 :             conflictsRemaining[i] = 0;
    1580       477256 :             continue;
    1581              :         }
    1582       422016 :         conflictsRemaining[i] = lock->granted[i];
    1583       422016 :         if (myLocks & LOCKBIT_ON(i))
    1584       121492 :             --conflictsRemaining[i];
    1585       422016 :         totalConflictsRemaining += conflictsRemaining[i];
    1586              :     }
    1587              : 
    1588              :     /* If no conflicts remain, we get the lock. */
    1589       112409 :     if (totalConflictsRemaining == 0)
    1590              :     {
    1591              :         PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
    1592       109366 :         return false;
    1593              :     }
    1594              : 
    1595              :     /* If no group locking, it's definitely a conflict. */
    1596         3043 :     if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
    1597              :     {
    1598              :         Assert(proclock->tag.myProc == MyProc);
    1599              :         PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
    1600              :                        proclock);
    1601         1974 :         return true;
    1602              :     }
    1603              : 
    1604              :     /*
    1605              :      * The relation extension lock conflict even between the group members.
    1606              :      */
    1607         1069 :     if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND)
    1608              :     {
    1609              :         PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
    1610              :                        proclock);
    1611           11 :         return true;
    1612              :     }
    1613              : 
    1614              :     /*
    1615              :      * Locks held in conflicting modes by members of our own lock group are
    1616              :      * not real conflicts; we can subtract those out and see if we still have
    1617              :      * a conflict.  This is O(N) in the number of processes holding or
    1618              :      * awaiting locks on this object.  We could improve that by making the
    1619              :      * shared memory state more complex (and larger) but it doesn't seem worth
    1620              :      * it.
    1621              :      */
    1622         2168 :     dlist_foreach(proclock_iter, &lock->procLocks)
    1623              :     {
    1624         1894 :         PROCLOCK   *otherproclock =
    1625         1894 :             dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
    1626              : 
    1627         1894 :         if (proclock != otherproclock &&
    1628         1620 :             proclock->groupLeader == otherproclock->groupLeader &&
    1629          793 :             (otherproclock->holdMask & conflictMask) != 0)
    1630              :         {
    1631          791 :             int         intersectMask = otherproclock->holdMask & conflictMask;
    1632              : 
    1633         7119 :             for (i = 1; i <= numLockModes; i++)
    1634              :             {
    1635         6328 :                 if ((intersectMask & LOCKBIT_ON(i)) != 0)
    1636              :                 {
    1637          804 :                     if (conflictsRemaining[i] <= 0)
    1638            0 :                         elog(PANIC, "proclocks held do not match lock");
    1639          804 :                     conflictsRemaining[i]--;
    1640          804 :                     totalConflictsRemaining--;
    1641              :                 }
    1642              :             }
    1643              : 
    1644          791 :             if (totalConflictsRemaining == 0)
    1645              :             {
    1646              :                 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
    1647              :                                proclock);
    1648          784 :                 return false;
    1649              :             }
    1650              :         }
    1651              :     }
    1652              : 
    1653              :     /* Nope, it's a real conflict. */
    1654              :     PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
    1655          274 :     return true;
    1656              : }
    1657              : 
    1658              : /*
    1659              :  * GrantLock -- update the lock and proclock data structures to show
    1660              :  *      the lock request has been granted.
    1661              :  *
    1662              :  * NOTE: if proc was blocked, it also needs to be removed from the wait list
    1663              :  * and have its waitLock/waitProcLock fields cleared.  That's not done here.
    1664              :  *
    1665              :  * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
    1666              :  * table entry; but since we may be awaking some other process, we can't do
    1667              :  * that here; it's done by GrantLockLocal, instead.
    1668              :  */
    1669              : void
    1670      2862978 : GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
    1671              : {
    1672      2862978 :     lock->nGranted++;
    1673      2862978 :     lock->granted[lockmode]++;
    1674      2862978 :     lock->grantMask |= LOCKBIT_ON(lockmode);
    1675      2862978 :     if (lock->granted[lockmode] == lock->requested[lockmode])
    1676      2862550 :         lock->waitMask &= LOCKBIT_OFF(lockmode);
    1677      2862978 :     proclock->holdMask |= LOCKBIT_ON(lockmode);
    1678              :     LOCK_PRINT("GrantLock", lock, lockmode);
    1679              :     Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
    1680              :     Assert(lock->nGranted <= lock->nRequested);
    1681      2862978 : }
    1682              : 
    1683              : /*
    1684              :  * UnGrantLock -- opposite of GrantLock.
    1685              :  *
    1686              :  * Updates the lock and proclock data structures to show that the lock
    1687              :  * is no longer held nor requested by the current holder.
    1688              :  *
    1689              :  * Returns true if there were any waiters waiting on the lock that
    1690              :  * should now be woken up with ProcLockWakeup.
    1691              :  */
    1692              : static bool
    1693      2862894 : UnGrantLock(LOCK *lock, LOCKMODE lockmode,
    1694              :             PROCLOCK *proclock, LockMethod lockMethodTable)
    1695              : {
    1696      2862894 :     bool        wakeupNeeded = false;
    1697              : 
    1698              :     Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
    1699              :     Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
    1700              :     Assert(lock->nGranted <= lock->nRequested);
    1701              : 
    1702              :     /*
    1703              :      * fix the general lock stats
    1704              :      */
    1705      2862894 :     lock->nRequested--;
    1706      2862894 :     lock->requested[lockmode]--;
    1707      2862894 :     lock->nGranted--;
    1708      2862894 :     lock->granted[lockmode]--;
    1709              : 
    1710      2862894 :     if (lock->granted[lockmode] == 0)
    1711              :     {
    1712              :         /* change the conflict mask.  No more of this lock type. */
    1713      2845659 :         lock->grantMask &= LOCKBIT_OFF(lockmode);
    1714              :     }
    1715              : 
    1716              :     LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
    1717              : 
    1718              :     /*
    1719              :      * We need only run ProcLockWakeup if the released lock conflicts with at
    1720              :      * least one of the lock types requested by waiter(s).  Otherwise whatever
    1721              :      * conflict made them wait must still exist.  NOTE: before MVCC, we could
    1722              :      * skip wakeup if lock->granted[lockmode] was still positive. But that's
    1723              :      * not true anymore, because the remaining granted locks might belong to
    1724              :      * some waiter, who could now be awakened because he doesn't conflict with
    1725              :      * his own locks.
    1726              :      */
    1727      2862894 :     if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
    1728         1394 :         wakeupNeeded = true;
    1729              : 
    1730              :     /*
    1731              :      * Now fix the per-proclock state.
    1732              :      */
    1733      2862894 :     proclock->holdMask &= LOCKBIT_OFF(lockmode);
    1734              :     PROCLOCK_PRINT("UnGrantLock: updated", proclock);
    1735              : 
    1736      2862894 :     return wakeupNeeded;
    1737              : }
    1738              : 
    1739              : /*
    1740              :  * CleanUpLock -- clean up after releasing a lock.  We garbage-collect the
    1741              :  * proclock and lock objects if possible, and call ProcLockWakeup if there
    1742              :  * are remaining requests and the caller says it's OK.  (Normally, this
    1743              :  * should be called after UnGrantLock, and wakeupNeeded is the result from
    1744              :  * UnGrantLock.)
    1745              :  *
    1746              :  * The appropriate partition lock must be held at entry, and will be
    1747              :  * held at exit.
    1748              :  */
    1749              : static void
    1750      2819010 : CleanUpLock(LOCK *lock, PROCLOCK *proclock,
    1751              :             LockMethod lockMethodTable, uint32 hashcode,
    1752              :             bool wakeupNeeded)
    1753              : {
    1754              :     /*
    1755              :      * If this was my last hold on this lock, delete my entry in the proclock
    1756              :      * table.
    1757              :      */
    1758      2819010 :     if (proclock->holdMask == 0)
    1759              :     {
    1760              :         uint32      proclock_hashcode;
    1761              : 
    1762              :         PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
    1763      2616477 :         dlist_delete(&proclock->lockLink);
    1764      2616477 :         dlist_delete(&proclock->procLink);
    1765      2616477 :         proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
    1766      2616477 :         if (!hash_search_with_hash_value(LockMethodProcLockHash,
    1767      2616477 :                                          &(proclock->tag),
    1768              :                                          proclock_hashcode,
    1769              :                                          HASH_REMOVE,
    1770              :                                          NULL))
    1771            0 :             elog(PANIC, "proclock table corrupted");
    1772              :     }
    1773              : 
    1774      2819010 :     if (lock->nRequested == 0)
    1775              :     {
    1776              :         /*
    1777              :          * The caller just released the last lock, so garbage-collect the lock
    1778              :          * object.
    1779              :          */
    1780              :         LOCK_PRINT("CleanUpLock: deleting", lock, 0);
    1781              :         Assert(dlist_is_empty(&lock->procLocks));
    1782      2592970 :         if (!hash_search_with_hash_value(LockMethodLockHash,
    1783      2592970 :                                          &(lock->tag),
    1784              :                                          hashcode,
    1785              :                                          HASH_REMOVE,
    1786              :                                          NULL))
    1787            0 :             elog(PANIC, "lock table corrupted");
    1788              :     }
    1789       226040 :     else if (wakeupNeeded)
    1790              :     {
    1791              :         /* There are waiters on this lock, so wake them up. */
    1792         1440 :         ProcLockWakeup(lockMethodTable, lock);
    1793              :     }
    1794      2819010 : }
    1795              : 
    1796              : /*
    1797              :  * GrantLockLocal -- update the locallock data structures to show
    1798              :  *      the lock request has been granted.
    1799              :  *
    1800              :  * We expect that LockAcquire made sure there is room to add a new
    1801              :  * ResourceOwner entry.
    1802              :  */
    1803              : static void
    1804     27606816 : GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
    1805              : {
    1806     27606816 :     LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    1807              :     int         i;
    1808              : 
    1809              :     Assert(locallock->numLockOwners < locallock->maxLockOwners);
    1810              :     /* Count the total */
    1811     27606816 :     locallock->nLocks++;
    1812              :     /* Count the per-owner lock */
    1813     29416639 :     for (i = 0; i < locallock->numLockOwners; i++)
    1814              :     {
    1815      5164947 :         if (lockOwners[i].owner == owner)
    1816              :         {
    1817      3355124 :             lockOwners[i].nLocks++;
    1818      3355124 :             return;
    1819              :         }
    1820              :     }
    1821     24251692 :     lockOwners[i].owner = owner;
    1822     24251692 :     lockOwners[i].nLocks = 1;
    1823     24251692 :     locallock->numLockOwners++;
    1824     24251692 :     if (owner != NULL)
    1825     24105737 :         ResourceOwnerRememberLock(owner, locallock);
    1826              : 
    1827              :     /* Indicate that the lock is acquired for certain types of locks. */
    1828     24251692 :     CheckAndSetLockHeld(locallock, true);
    1829              : }
    1830              : 
    1831              : /*
    1832              :  * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
    1833              :  * and arrange for error cleanup if it fails
    1834              :  */
    1835              : static void
    1836       236635 : BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
    1837              : {
    1838              :     Assert(StrongLockInProgress == NULL);
    1839              :     Assert(locallock->holdsStrongLockCount == false);
    1840              : 
    1841              :     /*
    1842              :      * Adding to a memory location is not atomic, so we take a spinlock to
    1843              :      * ensure we don't collide with someone else trying to bump the count at
    1844              :      * the same time.
    1845              :      *
    1846              :      * XXX: It might be worth considering using an atomic fetch-and-add
    1847              :      * instruction here, on architectures where that is supported.
    1848              :      */
    1849              : 
    1850       236635 :     SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    1851       236635 :     FastPathStrongRelationLocks->count[fasthashcode]++;
    1852       236635 :     locallock->holdsStrongLockCount = true;
    1853       236635 :     StrongLockInProgress = locallock;
    1854       236635 :     SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    1855       236635 : }
    1856              : 
    1857              : /*
    1858              :  * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
    1859              :  * acquisition once it's no longer needed
    1860              :  */
    1861              : static void
    1862      2860714 : FinishStrongLockAcquire(void)
    1863              : {
    1864      2860714 :     StrongLockInProgress = NULL;
    1865      2860714 : }
    1866              : 
    1867              : /*
    1868              :  * AbortStrongLockAcquire - undo strong lock state changes performed by
    1869              :  * BeginStrongLockAcquire.
    1870              :  */
    1871              : void
    1872       658913 : AbortStrongLockAcquire(void)
    1873              : {
    1874              :     uint32      fasthashcode;
    1875       658913 :     LOCALLOCK  *locallock = StrongLockInProgress;
    1876              : 
    1877       658913 :     if (locallock == NULL)
    1878       658700 :         return;
    1879              : 
    1880          213 :     fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
    1881              :     Assert(locallock->holdsStrongLockCount == true);
    1882          213 :     SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    1883              :     Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
    1884          213 :     FastPathStrongRelationLocks->count[fasthashcode]--;
    1885          213 :     locallock->holdsStrongLockCount = false;
    1886          213 :     StrongLockInProgress = NULL;
    1887          213 :     SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    1888              : }
    1889              : 
    1890              : /*
    1891              :  * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
    1892              :  *      WaitOnLock on.
    1893              :  *
    1894              :  * proc.c needs this for the case where we are booted off the lock by
    1895              :  * timeout, but discover that someone granted us the lock anyway.
    1896              :  *
    1897              :  * We could just export GrantLockLocal, but that would require including
    1898              :  * resowner.h in lock.h, which creates circularity.
    1899              :  */
    1900              : void
    1901            1 : GrantAwaitedLock(void)
    1902              : {
    1903            1 :     GrantLockLocal(awaitedLock, awaitedOwner);
    1904            1 : }
    1905              : 
    1906              : /*
    1907              :  * GetAwaitedLock -- Return the lock we're currently doing WaitOnLock on.
    1908              :  */
    1909              : LOCALLOCK *
    1910       658182 : GetAwaitedLock(void)
    1911              : {
    1912       658182 :     return awaitedLock;
    1913              : }
    1914              : 
    1915              : /*
    1916              :  * ResetAwaitedLock -- Forget that we are waiting on a lock.
    1917              :  */
    1918              : void
    1919           43 : ResetAwaitedLock(void)
    1920              : {
    1921           43 :     awaitedLock = NULL;
    1922           43 : }
    1923              : 
    1924              : /*
    1925              :  * MarkLockClear -- mark an acquired lock as "clear"
    1926              :  *
    1927              :  * This means that we know we have absorbed all sinval messages that other
    1928              :  * sessions generated before we acquired this lock, and so we can confidently
    1929              :  * assume we know about any catalog changes protected by this lock.
    1930              :  */
    1931              : void
    1932     22303240 : MarkLockClear(LOCALLOCK *locallock)
    1933              : {
    1934              :     Assert(locallock->nLocks > 0);
    1935     22303240 :     locallock->lockCleared = true;
    1936     22303240 : }
    1937              : 
    1938              : /*
    1939              :  * WaitOnLock -- wait to acquire a lock
    1940              :  *
    1941              :  * This is a wrapper around ProcSleep, with extra tracing and bookkeeping.
    1942              :  */
    1943              : static ProcWaitStatus
    1944         1481 : WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
    1945              : {
    1946              :     ProcWaitStatus result;
    1947              :     ErrorContextCallback waiterrcontext;
    1948              : 
    1949              :     TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
    1950              :                                      locallock->tag.lock.locktag_field2,
    1951              :                                      locallock->tag.lock.locktag_field3,
    1952              :                                      locallock->tag.lock.locktag_field4,
    1953              :                                      locallock->tag.lock.locktag_type,
    1954              :                                      locallock->tag.mode);
    1955              : 
    1956              :     /* Setup error traceback support for ereport() */
    1957         1481 :     waiterrcontext.callback = waitonlock_error_callback;
    1958         1481 :     waiterrcontext.arg = locallock;
    1959         1481 :     waiterrcontext.previous = error_context_stack;
    1960         1481 :     error_context_stack = &waiterrcontext;
    1961              : 
    1962              :     /* adjust the process title to indicate that it's waiting */
    1963         1481 :     set_ps_display_suffix("waiting");
    1964              : 
    1965              :     /*
    1966              :      * Record the fact that we are waiting for a lock, so that
    1967              :      * LockErrorCleanup will clean up if cancel/die happens.
    1968              :      */
    1969         1481 :     awaitedLock = locallock;
    1970         1481 :     awaitedOwner = owner;
    1971              : 
    1972              :     /*
    1973              :      * NOTE: Think not to put any shared-state cleanup after the call to
    1974              :      * ProcSleep, in either the normal or failure path.  The lock state must
    1975              :      * be fully set by the lock grantor, or by CheckDeadLock if we give up
    1976              :      * waiting for the lock.  This is necessary because of the possibility
    1977              :      * that a cancel/die interrupt will interrupt ProcSleep after someone else
    1978              :      * grants us the lock, but before we've noticed it. Hence, after granting,
    1979              :      * the locktable state must fully reflect the fact that we own the lock;
    1980              :      * we can't do additional work on return.
    1981              :      *
    1982              :      * We can and do use a PG_TRY block to try to clean up after failure, but
    1983              :      * this still has a major limitation: elog(FATAL) can occur while waiting
    1984              :      * (eg, a "die" interrupt), and then control won't come back here. So all
    1985              :      * cleanup of essential state should happen in LockErrorCleanup, not here.
    1986              :      * We can use PG_TRY to clear the "waiting" status flags, since doing that
    1987              :      * is unimportant if the process exits.
    1988              :      */
    1989         1481 :     PG_TRY();
    1990              :     {
    1991         1481 :         result = ProcSleep(locallock);
    1992              :     }
    1993           39 :     PG_CATCH();
    1994              :     {
    1995              :         /* In this path, awaitedLock remains set until LockErrorCleanup */
    1996              : 
    1997              :         /* reset ps display to remove the suffix */
    1998           39 :         set_ps_display_remove_suffix();
    1999              : 
    2000              :         /* and propagate the error */
    2001           39 :         PG_RE_THROW();
    2002              :     }
    2003         1436 :     PG_END_TRY();
    2004              : 
    2005              :     /*
    2006              :      * We no longer want LockErrorCleanup to do anything.
    2007              :      */
    2008         1436 :     awaitedLock = NULL;
    2009              : 
    2010              :     /* reset ps display to remove the suffix */
    2011         1436 :     set_ps_display_remove_suffix();
    2012              : 
    2013         1436 :     error_context_stack = waiterrcontext.previous;
    2014              : 
    2015              :     TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
    2016              :                                     locallock->tag.lock.locktag_field2,
    2017              :                                     locallock->tag.lock.locktag_field3,
    2018              :                                     locallock->tag.lock.locktag_field4,
    2019              :                                     locallock->tag.lock.locktag_type,
    2020              :                                     locallock->tag.mode);
    2021              : 
    2022         1436 :     return result;
    2023              : }
    2024              : 
    2025              : /*
    2026              :  * error context callback for failures in WaitOnLock
    2027              :  *
    2028              :  * We report which lock was being waited on, in the same style used in
    2029              :  * deadlock reports.  This helps with lock timeout errors in particular.
    2030              :  */
    2031              : static void
    2032          102 : waitonlock_error_callback(void *arg)
    2033              : {
    2034          102 :     LOCALLOCK  *locallock = (LOCALLOCK *) arg;
    2035          102 :     const LOCKTAG *tag = &locallock->tag.lock;
    2036          102 :     LOCKMODE    mode = locallock->tag.mode;
    2037              :     StringInfoData locktagbuf;
    2038              : 
    2039          102 :     initStringInfo(&locktagbuf);
    2040          102 :     DescribeLockTag(&locktagbuf, tag);
    2041              : 
    2042          204 :     errcontext("waiting for %s on %s",
    2043          102 :                GetLockmodeName(tag->locktag_lockmethodid, mode),
    2044              :                locktagbuf.data);
    2045          102 : }
    2046              : 
    2047              : /*
    2048              :  * Remove a proc from the wait-queue it is on (caller must know it is on one).
    2049              :  * This is only used when the proc has failed to get the lock, so we set its
    2050              :  * waitStatus to PROC_WAIT_STATUS_ERROR.
    2051              :  *
    2052              :  * Appropriate partition lock must be held by caller.  Also, caller is
    2053              :  * responsible for signaling the proc if needed.
    2054              :  *
    2055              :  * NB: this does not clean up any locallock object that may exist for the lock.
    2056              :  */
    2057              : void
    2058           47 : RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
    2059              : {
    2060           47 :     LOCK       *waitLock = proc->waitLock;
    2061           47 :     PROCLOCK   *proclock = proc->waitProcLock;
    2062           47 :     LOCKMODE    lockmode = proc->waitLockMode;
    2063           47 :     LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
    2064              : 
    2065              :     /* Make sure proc is waiting */
    2066              :     Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
    2067              :     Assert(!dlist_node_is_detached(&proc->waitLink));
    2068              :     Assert(waitLock);
    2069              :     Assert(!dclist_is_empty(&waitLock->waitProcs));
    2070              :     Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
    2071              : 
    2072              :     /* Remove proc from lock's wait queue */
    2073           47 :     dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->waitLink);
    2074              : 
    2075              :     /* Undo increments of request counts by waiting process */
    2076              :     Assert(waitLock->nRequested > 0);
    2077              :     Assert(waitLock->nRequested > proc->waitLock->nGranted);
    2078           47 :     waitLock->nRequested--;
    2079              :     Assert(waitLock->requested[lockmode] > 0);
    2080           47 :     waitLock->requested[lockmode]--;
    2081              :     /* don't forget to clear waitMask bit if appropriate */
    2082           47 :     if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
    2083           47 :         waitLock->waitMask &= LOCKBIT_OFF(lockmode);
    2084              : 
    2085              :     /* Clean up the proc's own state, and pass it the ok/fail signal */
    2086           47 :     proc->waitLock = NULL;
    2087           47 :     proc->waitProcLock = NULL;
    2088           47 :     proc->waitStatus = PROC_WAIT_STATUS_ERROR;
    2089              : 
    2090              :     /*
    2091              :      * Delete the proclock immediately if it represents no already-held locks.
    2092              :      * (This must happen now because if the owner of the lock decides to
    2093              :      * release it, and the requested/granted counts then go to zero,
    2094              :      * LockRelease expects there to be no remaining proclocks.) Then see if
    2095              :      * any other waiters for the lock can be woken up now.
    2096              :      */
    2097           47 :     CleanUpLock(waitLock, proclock,
    2098           47 :                 LockMethods[lockmethodid], hashcode,
    2099              :                 true);
    2100           47 : }
    2101              : 
    2102              : /*
    2103              :  * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
    2104              :  *      Release a session lock if 'sessionLock' is true, else release a
    2105              :  *      regular transaction lock.
    2106              :  *
    2107              :  * Side Effects: find any waiting processes that are now wakable,
    2108              :  *      grant them their requested locks and awaken them.
    2109              :  *      (We have to grant the lock here to avoid a race between
    2110              :  *      the waking process and any new process to
    2111              :  *      come along and request the lock.)
    2112              :  */
    2113              : bool
    2114     23773589 : LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
    2115              : {
    2116     23773589 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
    2117              :     LockMethod  lockMethodTable;
    2118              :     LOCALLOCKTAG localtag;
    2119              :     LOCALLOCK  *locallock;
    2120              :     LOCK       *lock;
    2121              :     PROCLOCK   *proclock;
    2122              :     LWLock     *partitionLock;
    2123              :     bool        wakeupNeeded;
    2124              : 
    2125     23773589 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    2126            0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    2127     23773589 :     lockMethodTable = LockMethods[lockmethodid];
    2128     23773589 :     if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
    2129            0 :         elog(ERROR, "unrecognized lock mode: %d", lockmode);
    2130              : 
    2131              : #ifdef LOCK_DEBUG
    2132              :     if (LOCK_DEBUG_ENABLED(locktag))
    2133              :         elog(LOG, "LockRelease: lock [%u,%u] %s",
    2134              :              locktag->locktag_field1, locktag->locktag_field2,
    2135              :              lockMethodTable->lockModeNames[lockmode]);
    2136              : #endif
    2137              : 
    2138              :     /*
    2139              :      * Find the LOCALLOCK entry for this lock and lockmode
    2140              :      */
    2141     23773589 :     MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
    2142     23773589 :     localtag.lock = *locktag;
    2143     23773589 :     localtag.mode = lockmode;
    2144              : 
    2145     23773589 :     locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
    2146              :                                           &localtag,
    2147              :                                           HASH_FIND, NULL);
    2148              : 
    2149              :     /*
    2150              :      * let the caller print its own error message, too. Do not ereport(ERROR).
    2151              :      */
    2152     23773589 :     if (!locallock || locallock->nLocks <= 0)
    2153              :     {
    2154           17 :         elog(WARNING, "you don't own a lock of type %s",
    2155              :              lockMethodTable->lockModeNames[lockmode]);
    2156           17 :         return false;
    2157              :     }
    2158              : 
    2159              :     /*
    2160              :      * Decrease the count for the resource owner.
    2161              :      */
    2162              :     {
    2163     23773572 :         LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    2164              :         ResourceOwner owner;
    2165              :         int         i;
    2166              : 
    2167              :         /* Identify owner for lock */
    2168     23773572 :         if (sessionLock)
    2169       145950 :             owner = NULL;
    2170              :         else
    2171     23627622 :             owner = CurrentResourceOwner;
    2172              : 
    2173     23774808 :         for (i = locallock->numLockOwners - 1; i >= 0; i--)
    2174              :         {
    2175     23774792 :             if (lockOwners[i].owner == owner)
    2176              :             {
    2177              :                 Assert(lockOwners[i].nLocks > 0);
    2178     23773556 :                 if (--lockOwners[i].nLocks == 0)
    2179              :                 {
    2180     22510522 :                     if (owner != NULL)
    2181     22364614 :                         ResourceOwnerForgetLock(owner, locallock);
    2182              :                     /* compact out unused slot */
    2183     22510522 :                     locallock->numLockOwners--;
    2184     22510522 :                     if (i < locallock->numLockOwners)
    2185           79 :                         lockOwners[i] = lockOwners[locallock->numLockOwners];
    2186              :                 }
    2187     23773556 :                 break;
    2188              :             }
    2189              :         }
    2190     23773572 :         if (i < 0)
    2191              :         {
    2192              :             /* don't release a lock belonging to another owner */
    2193           16 :             elog(WARNING, "you don't own a lock of type %s",
    2194              :                  lockMethodTable->lockModeNames[lockmode]);
    2195           16 :             return false;
    2196              :         }
    2197              :     }
    2198              : 
    2199              :     /*
    2200              :      * Decrease the total local count.  If we're still holding the lock, we're
    2201              :      * done.
    2202              :      */
    2203     23773556 :     locallock->nLocks--;
    2204              : 
    2205     23773556 :     if (locallock->nLocks > 0)
    2206      2126136 :         return true;
    2207              : 
    2208              :     /*
    2209              :      * At this point we can no longer suppose we are clear of invalidation
    2210              :      * messages related to this lock.  Although we'll delete the LOCALLOCK
    2211              :      * object before any intentional return from this routine, it seems worth
    2212              :      * the trouble to explicitly reset lockCleared right now, just in case
    2213              :      * some error prevents us from deleting the LOCALLOCK.
    2214              :      */
    2215     21647420 :     locallock->lockCleared = false;
    2216              : 
    2217              :     /* Attempt fast release of any lock eligible for the fast path. */
    2218     21647420 :     if (EligibleForRelationFastPath(locktag, lockmode) &&
    2219     19988899 :         FastPathLocalUseCounts[FAST_PATH_REL_GROUP(locktag->locktag_field2)] > 0)
    2220              :     {
    2221              :         bool        released;
    2222              : 
    2223              :         /*
    2224              :          * We might not find the lock here, even if we originally entered it
    2225              :          * here.  Another backend may have moved it to the main table.
    2226              :          */
    2227     19736619 :         LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    2228     19736619 :         released = FastPathUnGrantRelationLock(locktag->locktag_field2,
    2229              :                                                lockmode);
    2230     19736619 :         LWLockRelease(&MyProc->fpInfoLock);
    2231     19736619 :         if (released)
    2232              :         {
    2233     19464108 :             RemoveLocalLock(locallock);
    2234     19464108 :             return true;
    2235              :         }
    2236              :     }
    2237              : 
    2238              :     /*
    2239              :      * Otherwise we've got to mess with the shared lock table.
    2240              :      */
    2241      2183312 :     partitionLock = LockHashPartitionLock(locallock->hashcode);
    2242              : 
    2243      2183312 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    2244              : 
    2245              :     /*
    2246              :      * Normally, we don't need to re-find the lock or proclock, since we kept
    2247              :      * their addresses in the locallock table, and they couldn't have been
    2248              :      * removed while we were holding a lock on them.  But it's possible that
    2249              :      * the lock was taken fast-path and has since been moved to the main hash
    2250              :      * table by another backend, in which case we will need to look up the
    2251              :      * objects here.  We assume the lock field is NULL if so.
    2252              :      */
    2253      2183312 :     lock = locallock->lock;
    2254      2183312 :     if (!lock)
    2255              :     {
    2256              :         PROCLOCKTAG proclocktag;
    2257              : 
    2258              :         Assert(EligibleForRelationFastPath(locktag, lockmode));
    2259            6 :         lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    2260              :                                                     locktag,
    2261              :                                                     locallock->hashcode,
    2262              :                                                     HASH_FIND,
    2263              :                                                     NULL);
    2264            6 :         if (!lock)
    2265            0 :             elog(ERROR, "failed to re-find shared lock object");
    2266            6 :         locallock->lock = lock;
    2267              : 
    2268            6 :         proclocktag.myLock = lock;
    2269            6 :         proclocktag.myProc = MyProc;
    2270            6 :         locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
    2271              :                                                        &proclocktag,
    2272              :                                                        HASH_FIND,
    2273              :                                                        NULL);
    2274            6 :         if (!locallock->proclock)
    2275            0 :             elog(ERROR, "failed to re-find shared proclock object");
    2276              :     }
    2277              :     LOCK_PRINT("LockRelease: found", lock, lockmode);
    2278      2183312 :     proclock = locallock->proclock;
    2279              :     PROCLOCK_PRINT("LockRelease: found", proclock);
    2280              : 
    2281              :     /*
    2282              :      * Double-check that we are actually holding a lock of the type we want to
    2283              :      * release.
    2284              :      */
    2285      2183312 :     if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
    2286              :     {
    2287              :         PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
    2288            0 :         LWLockRelease(partitionLock);
    2289            0 :         elog(WARNING, "you don't own a lock of type %s",
    2290              :              lockMethodTable->lockModeNames[lockmode]);
    2291            0 :         RemoveLocalLock(locallock);
    2292            0 :         return false;
    2293              :     }
    2294              : 
    2295              :     /*
    2296              :      * Do the releasing.  CleanUpLock will waken any now-wakable waiters.
    2297              :      */
    2298      2183312 :     wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
    2299              : 
    2300      2183312 :     CleanUpLock(lock, proclock,
    2301              :                 lockMethodTable, locallock->hashcode,
    2302              :                 wakeupNeeded);
    2303              : 
    2304      2183312 :     LWLockRelease(partitionLock);
    2305              : 
    2306      2183312 :     RemoveLocalLock(locallock);
    2307      2183312 :     return true;
    2308              : }
    2309              : 
    2310              : /*
    2311              :  * LockReleaseAll -- Release all locks of the specified lock method that
    2312              :  *      are held by the current process.
    2313              :  *
    2314              :  * Well, not necessarily *all* locks.  The available behaviors are:
    2315              :  *      allLocks == true: release all locks including session locks.
    2316              :  *      allLocks == false: release all non-session locks.
    2317              :  */
    2318              : void
    2319      1253666 : LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
    2320              : {
    2321              :     HASH_SEQ_STATUS status;
    2322              :     LockMethod  lockMethodTable;
    2323              :     int         i,
    2324              :                 numLockModes;
    2325              :     LOCALLOCK  *locallock;
    2326              :     LOCK       *lock;
    2327              :     int         partition;
    2328      1253666 :     bool        have_fast_path_lwlock = false;
    2329              : 
    2330      1253666 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    2331            0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    2332      1253666 :     lockMethodTable = LockMethods[lockmethodid];
    2333              : 
    2334              : #ifdef LOCK_DEBUG
    2335              :     if (*(lockMethodTable->trace_flag))
    2336              :         elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
    2337              : #endif
    2338              : 
    2339              :     /*
    2340              :      * Get rid of our fast-path VXID lock, if appropriate.  Note that this is
    2341              :      * the only way that the lock we hold on our own VXID can ever get
    2342              :      * released: it is always and only released when a toplevel transaction
    2343              :      * ends.
    2344              :      */
    2345      1253666 :     if (lockmethodid == DEFAULT_LOCKMETHOD)
    2346       617398 :         VirtualXactLockTableCleanup();
    2347              : 
    2348      1253666 :     numLockModes = lockMethodTable->numLockModes;
    2349              : 
    2350              :     /*
    2351              :      * First we run through the locallock table and get rid of unwanted
    2352              :      * entries, then we scan the process's proclocks and get rid of those. We
    2353              :      * do this separately because we may have multiple locallock entries
    2354              :      * pointing to the same proclock, and we daren't end up with any dangling
    2355              :      * pointers.  Fast-path locks are cleaned up during the locallock table
    2356              :      * scan, though.
    2357              :      */
    2358      1253666 :     hash_seq_init(&status, LockMethodLocalHash);
    2359              : 
    2360      2991959 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    2361              :     {
    2362              :         /*
    2363              :          * If the LOCALLOCK entry is unused, something must've gone wrong
    2364              :          * while trying to acquire this lock.  Just forget the local entry.
    2365              :          */
    2366      1738293 :         if (locallock->nLocks == 0)
    2367              :         {
    2368           49 :             RemoveLocalLock(locallock);
    2369           49 :             continue;
    2370              :         }
    2371              : 
    2372              :         /* Ignore items that are not of the lockmethod to be removed */
    2373      1738244 :         if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
    2374       135338 :             continue;
    2375              : 
    2376              :         /*
    2377              :          * If we are asked to release all locks, we can just zap the entry.
    2378              :          * Otherwise, must scan to see if there are session locks. We assume
    2379              :          * there is at most one lockOwners entry for session locks.
    2380              :          */
    2381      1602906 :         if (!allLocks)
    2382              :         {
    2383      1492531 :             LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    2384              : 
    2385              :             /* If session lock is above array position 0, move it down to 0 */
    2386      3107358 :             for (i = 0; i < locallock->numLockOwners; i++)
    2387              :             {
    2388      1614827 :                 if (lockOwners[i].owner == NULL)
    2389       135013 :                     lockOwners[0] = lockOwners[i];
    2390              :                 else
    2391      1479814 :                     ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
    2392              :             }
    2393              : 
    2394      1492531 :             if (locallock->numLockOwners > 0 &&
    2395      1492531 :                 lockOwners[0].owner == NULL &&
    2396       135013 :                 lockOwners[0].nLocks > 0)
    2397              :             {
    2398              :                 /* Fix the locallock to show just the session locks */
    2399       135013 :                 locallock->nLocks = lockOwners[0].nLocks;
    2400       135013 :                 locallock->numLockOwners = 1;
    2401              :                 /* We aren't deleting this locallock, so done */
    2402       135013 :                 continue;
    2403              :             }
    2404              :             else
    2405      1357518 :                 locallock->numLockOwners = 0;
    2406              :         }
    2407              : 
    2408              : #ifdef USE_ASSERT_CHECKING
    2409              : 
    2410              :         /*
    2411              :          * Tuple locks are currently held only for short durations within a
    2412              :          * transaction. Check that we didn't forget to release one.
    2413              :          */
    2414              :         if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_TUPLE && !allLocks)
    2415              :             elog(WARNING, "tuple lock held at commit");
    2416              : #endif
    2417              : 
    2418              :         /*
    2419              :          * If the lock or proclock pointers are NULL, this lock was taken via
    2420              :          * the relation fast-path (and is not known to have been transferred).
    2421              :          */
    2422      1467893 :         if (locallock->proclock == NULL || locallock->lock == NULL)
    2423         1514 :         {
    2424       791008 :             LOCKMODE    lockmode = locallock->tag.mode;
    2425              :             Oid         relid;
    2426              : 
    2427              :             /* Verify that a fast-path lock is what we've got. */
    2428       791008 :             if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
    2429            0 :                 elog(PANIC, "locallock table corrupted");
    2430              : 
    2431              :             /*
    2432              :              * If we don't currently hold the LWLock that protects our
    2433              :              * fast-path data structures, we must acquire it before attempting
    2434              :              * to release the lock via the fast-path.  We will continue to
    2435              :              * hold the LWLock until we're done scanning the locallock table,
    2436              :              * unless we hit a transferred fast-path lock.  (XXX is this
    2437              :              * really such a good idea?  There could be a lot of entries ...)
    2438              :              */
    2439       791008 :             if (!have_fast_path_lwlock)
    2440              :             {
    2441       271911 :                 LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    2442       271911 :                 have_fast_path_lwlock = true;
    2443              :             }
    2444              : 
    2445              :             /* Attempt fast-path release. */
    2446       791008 :             relid = locallock->tag.lock.locktag_field2;
    2447       791008 :             if (FastPathUnGrantRelationLock(relid, lockmode))
    2448              :             {
    2449       789494 :                 RemoveLocalLock(locallock);
    2450       789494 :                 continue;
    2451              :             }
    2452              : 
    2453              :             /*
    2454              :              * Our lock, originally taken via the fast path, has been
    2455              :              * transferred to the main lock table.  That's going to require
    2456              :              * some extra work, so release our fast-path lock before starting.
    2457              :              */
    2458         1514 :             LWLockRelease(&MyProc->fpInfoLock);
    2459         1514 :             have_fast_path_lwlock = false;
    2460              : 
    2461              :             /*
    2462              :              * Now dump the lock.  We haven't got a pointer to the LOCK or
    2463              :              * PROCLOCK in this case, so we have to handle this a bit
    2464              :              * differently than a normal lock release.  Unfortunately, this
    2465              :              * requires an extra LWLock acquire-and-release cycle on the
    2466              :              * partitionLock, but hopefully it shouldn't happen often.
    2467              :              */
    2468         1514 :             LockRefindAndRelease(lockMethodTable, MyProc,
    2469              :                                  &locallock->tag.lock, lockmode, false);
    2470         1514 :             RemoveLocalLock(locallock);
    2471         1514 :             continue;
    2472              :         }
    2473              : 
    2474              :         /* Mark the proclock to show we need to release this lockmode */
    2475       676885 :         if (locallock->nLocks > 0)
    2476       676885 :             locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
    2477              : 
    2478              :         /* And remove the locallock hashtable entry */
    2479       676885 :         RemoveLocalLock(locallock);
    2480              :     }
    2481              : 
    2482              :     /* Done with the fast-path data structures */
    2483      1253666 :     if (have_fast_path_lwlock)
    2484       270397 :         LWLockRelease(&MyProc->fpInfoLock);
    2485              : 
    2486              :     /*
    2487              :      * Now, scan each lock partition separately.
    2488              :      */
    2489     21312322 :     for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
    2490              :     {
    2491              :         LWLock     *partitionLock;
    2492     20058656 :         dlist_head *procLocks = &MyProc->myProcLocks[partition];
    2493              :         dlist_mutable_iter proclock_iter;
    2494              : 
    2495     20058656 :         partitionLock = LockHashPartitionLockByIndex(partition);
    2496              : 
    2497              :         /*
    2498              :          * If the proclock list for this partition is empty, we can skip
    2499              :          * acquiring the partition lock.  This optimization is trickier than
    2500              :          * it looks, because another backend could be in process of adding
    2501              :          * something to our proclock list due to promoting one of our
    2502              :          * fast-path locks.  However, any such lock must be one that we
    2503              :          * decided not to delete above, so it's okay to skip it again now;
    2504              :          * we'd just decide not to delete it again.  We must, however, be
    2505              :          * careful to re-fetch the list header once we've acquired the
    2506              :          * partition lock, to be sure we have a valid, up-to-date pointer.
    2507              :          * (There is probably no significant risk if pointer fetch/store is
    2508              :          * atomic, but we don't wish to assume that.)
    2509              :          *
    2510              :          * XXX This argument assumes that the locallock table correctly
    2511              :          * represents all of our fast-path locks.  While allLocks mode
    2512              :          * guarantees to clean up all of our normal locks regardless of the
    2513              :          * locallock situation, we lose that guarantee for fast-path locks.
    2514              :          * This is not ideal.
    2515              :          */
    2516     20058656 :         if (dlist_is_empty(procLocks))
    2517     19302255 :             continue;           /* needn't examine this partition */
    2518              : 
    2519       756401 :         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    2520              : 
    2521      1658831 :         dlist_foreach_modify(proclock_iter, procLocks)
    2522              :         {
    2523       902430 :             PROCLOCK   *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
    2524       902430 :             bool        wakeupNeeded = false;
    2525              : 
    2526              :             Assert(proclock->tag.myProc == MyProc);
    2527              : 
    2528       902430 :             lock = proclock->tag.myLock;
    2529              : 
    2530              :             /* Ignore items that are not of the lockmethod to be removed */
    2531       902430 :             if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
    2532       135338 :                 continue;
    2533              : 
    2534              :             /*
    2535              :              * In allLocks mode, force release of all locks even if locallock
    2536              :              * table had problems
    2537              :              */
    2538       767092 :             if (allLocks)
    2539        59697 :                 proclock->releaseMask = proclock->holdMask;
    2540              :             else
    2541              :                 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
    2542              : 
    2543              :             /*
    2544              :              * Ignore items that have nothing to be released, unless they have
    2545              :              * holdMask == 0 and are therefore recyclable
    2546              :              */
    2547       767092 :             if (proclock->releaseMask == 0 && proclock->holdMask != 0)
    2548       134136 :                 continue;
    2549              : 
    2550              :             PROCLOCK_PRINT("LockReleaseAll", proclock);
    2551              :             LOCK_PRINT("LockReleaseAll", lock, 0);
    2552              :             Assert(lock->nRequested >= 0);
    2553              :             Assert(lock->nGranted >= 0);
    2554              :             Assert(lock->nGranted <= lock->nRequested);
    2555              :             Assert((proclock->holdMask & ~lock->grantMask) == 0);
    2556              : 
    2557              :             /*
    2558              :              * Release the previously-marked lock modes
    2559              :              */
    2560      5696604 :             for (i = 1; i <= numLockModes; i++)
    2561              :             {
    2562      5063648 :                 if (proclock->releaseMask & LOCKBIT_ON(i))
    2563       676887 :                     wakeupNeeded |= UnGrantLock(lock, i, proclock,
    2564              :                                                 lockMethodTable);
    2565              :             }
    2566              :             Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
    2567              :             Assert(lock->nGranted <= lock->nRequested);
    2568              :             LOCK_PRINT("LockReleaseAll: updated", lock, 0);
    2569              : 
    2570       632956 :             proclock->releaseMask = 0;
    2571              : 
    2572              :             /* CleanUpLock will wake up waiters if needed. */
    2573       632956 :             CleanUpLock(lock, proclock,
    2574              :                         lockMethodTable,
    2575       632956 :                         LockTagHashCode(&lock->tag),
    2576              :                         wakeupNeeded);
    2577              :         }                       /* loop over PROCLOCKs within this partition */
    2578              : 
    2579       756401 :         LWLockRelease(partitionLock);
    2580              :     }                           /* loop over partitions */
    2581              : 
    2582              : #ifdef LOCK_DEBUG
    2583              :     if (*(lockMethodTable->trace_flag))
    2584              :         elog(LOG, "LockReleaseAll done");
    2585              : #endif
    2586      1253666 : }
    2587              : 
    2588              : /*
    2589              :  * LockReleaseSession -- Release all session locks of the specified lock method
    2590              :  *      that are held by the current process.
    2591              :  */
    2592              : void
    2593          122 : LockReleaseSession(LOCKMETHODID lockmethodid)
    2594              : {
    2595              :     HASH_SEQ_STATUS status;
    2596              :     LOCALLOCK  *locallock;
    2597              : 
    2598          122 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    2599            0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    2600              : 
    2601          122 :     hash_seq_init(&status, LockMethodLocalHash);
    2602              : 
    2603          242 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    2604              :     {
    2605              :         /* Ignore items that are not of the specified lock method */
    2606          120 :         if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
    2607           11 :             continue;
    2608              : 
    2609          109 :         ReleaseLockIfHeld(locallock, true);
    2610              :     }
    2611          122 : }
    2612              : 
    2613              : /*
    2614              :  * LockReleaseCurrentOwner
    2615              :  *      Release all locks belonging to CurrentResourceOwner
    2616              :  *
    2617              :  * If the caller knows what those locks are, it can pass them as an array.
    2618              :  * That speeds up the call significantly, when a lot of locks are held.
    2619              :  * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
    2620              :  * table to find them.
    2621              :  */
    2622              : void
    2623         6827 : LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
    2624              : {
    2625         6827 :     if (locallocks == NULL)
    2626              :     {
    2627              :         HASH_SEQ_STATUS status;
    2628              :         LOCALLOCK  *locallock;
    2629              : 
    2630            5 :         hash_seq_init(&status, LockMethodLocalHash);
    2631              : 
    2632          357 :         while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    2633          352 :             ReleaseLockIfHeld(locallock, false);
    2634              :     }
    2635              :     else
    2636              :     {
    2637              :         int         i;
    2638              : 
    2639        10247 :         for (i = nlocks - 1; i >= 0; i--)
    2640         3425 :             ReleaseLockIfHeld(locallocks[i], false);
    2641              :     }
    2642         6827 : }
    2643              : 
    2644              : /*
    2645              :  * ReleaseLockIfHeld
    2646              :  *      Release any session-level locks on this lockable object if sessionLock
    2647              :  *      is true; else, release any locks held by CurrentResourceOwner.
    2648              :  *
    2649              :  * It is tempting to pass this a ResourceOwner pointer (or NULL for session
    2650              :  * locks), but without refactoring LockRelease() we cannot support releasing
    2651              :  * locks belonging to resource owners other than CurrentResourceOwner.
    2652              :  * If we were to refactor, it'd be a good idea to fix it so we don't have to
    2653              :  * do a hashtable lookup of the locallock, too.  However, currently this
    2654              :  * function isn't used heavily enough to justify refactoring for its
    2655              :  * convenience.
    2656              :  */
    2657              : static void
    2658         3886 : ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
    2659              : {
    2660              :     ResourceOwner owner;
    2661              :     LOCALLOCKOWNER *lockOwners;
    2662              :     int         i;
    2663              : 
    2664              :     /* Identify owner for lock (must match LockRelease!) */
    2665         3886 :     if (sessionLock)
    2666          109 :         owner = NULL;
    2667              :     else
    2668         3777 :         owner = CurrentResourceOwner;
    2669              : 
    2670              :     /* Scan to see if there are any locks belonging to the target owner */
    2671         3886 :     lockOwners = locallock->lockOwners;
    2672         4143 :     for (i = locallock->numLockOwners - 1; i >= 0; i--)
    2673              :     {
    2674         3886 :         if (lockOwners[i].owner == owner)
    2675              :         {
    2676              :             Assert(lockOwners[i].nLocks > 0);
    2677         3629 :             if (lockOwners[i].nLocks < locallock->nLocks)
    2678              :             {
    2679              :                 /*
    2680              :                  * We will still hold this lock after forgetting this
    2681              :                  * ResourceOwner.
    2682              :                  */
    2683          963 :                 locallock->nLocks -= lockOwners[i].nLocks;
    2684              :                 /* compact out unused slot */
    2685          963 :                 locallock->numLockOwners--;
    2686          963 :                 if (owner != NULL)
    2687          963 :                     ResourceOwnerForgetLock(owner, locallock);
    2688          963 :                 if (i < locallock->numLockOwners)
    2689            0 :                     lockOwners[i] = lockOwners[locallock->numLockOwners];
    2690              :             }
    2691              :             else
    2692              :             {
    2693              :                 Assert(lockOwners[i].nLocks == locallock->nLocks);
    2694              :                 /* We want to call LockRelease just once */
    2695         2666 :                 lockOwners[i].nLocks = 1;
    2696         2666 :                 locallock->nLocks = 1;
    2697         2666 :                 if (!LockRelease(&locallock->tag.lock,
    2698              :                                  locallock->tag.mode,
    2699              :                                  sessionLock))
    2700            0 :                     elog(WARNING, "ReleaseLockIfHeld: failed??");
    2701              :             }
    2702         3629 :             break;
    2703              :         }
    2704              :     }
    2705         3886 : }
    2706              : 
    2707              : /*
    2708              :  * LockReassignCurrentOwner
    2709              :  *      Reassign all locks belonging to CurrentResourceOwner to belong
    2710              :  *      to its parent resource owner.
    2711              :  *
    2712              :  * If the caller knows what those locks are, it can pass them as an array.
    2713              :  * That speeds up the call significantly, when a lot of locks are held
    2714              :  * (e.g pg_dump with a large schema).  Otherwise, pass NULL for locallocks,
    2715              :  * and we'll traverse through our hash table to find them.
    2716              :  */
    2717              : void
    2718       435514 : LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
    2719              : {
    2720       435514 :     ResourceOwner parent = ResourceOwnerGetParent(CurrentResourceOwner);
    2721              : 
    2722              :     Assert(parent != NULL);
    2723              : 
    2724       435514 :     if (locallocks == NULL)
    2725              :     {
    2726              :         HASH_SEQ_STATUS status;
    2727              :         LOCALLOCK  *locallock;
    2728              : 
    2729         4948 :         hash_seq_init(&status, LockMethodLocalHash);
    2730              : 
    2731       172026 :         while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    2732       167078 :             LockReassignOwner(locallock, parent);
    2733              :     }
    2734              :     else
    2735              :     {
    2736              :         int         i;
    2737              : 
    2738       928680 :         for (i = nlocks - 1; i >= 0; i--)
    2739       498114 :             LockReassignOwner(locallocks[i], parent);
    2740              :     }
    2741       435514 : }
    2742              : 
    2743              : /*
    2744              :  * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
    2745              :  * CurrentResourceOwner to its parent.
    2746              :  */
    2747              : static void
    2748       665192 : LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
    2749              : {
    2750              :     LOCALLOCKOWNER *lockOwners;
    2751              :     int         i;
    2752       665192 :     int         ic = -1;
    2753       665192 :     int         ip = -1;
    2754              : 
    2755              :     /*
    2756              :      * Scan to see if there are any locks belonging to current owner or its
    2757              :      * parent
    2758              :      */
    2759       665192 :     lockOwners = locallock->lockOwners;
    2760      1522473 :     for (i = locallock->numLockOwners - 1; i >= 0; i--)
    2761              :     {
    2762       857281 :         if (lockOwners[i].owner == CurrentResourceOwner)
    2763       616583 :             ic = i;
    2764       240698 :         else if (lockOwners[i].owner == parent)
    2765       196741 :             ip = i;
    2766              :     }
    2767              : 
    2768       665192 :     if (ic < 0)
    2769        48609 :         return;                 /* no current locks */
    2770              : 
    2771       616583 :     if (ip < 0)
    2772              :     {
    2773              :         /* Parent has no slot, so just give it the child's slot */
    2774       468409 :         lockOwners[ic].owner = parent;
    2775       468409 :         ResourceOwnerRememberLock(parent, locallock);
    2776              :     }
    2777              :     else
    2778              :     {
    2779              :         /* Merge child's count with parent's */
    2780       148174 :         lockOwners[ip].nLocks += lockOwners[ic].nLocks;
    2781              :         /* compact out unused slot */
    2782       148174 :         locallock->numLockOwners--;
    2783       148174 :         if (ic < locallock->numLockOwners)
    2784          906 :             lockOwners[ic] = lockOwners[locallock->numLockOwners];
    2785              :     }
    2786       616583 :     ResourceOwnerForgetLock(CurrentResourceOwner, locallock);
    2787              : }
    2788              : 
    2789              : /*
    2790              :  * FastPathGrantRelationLock
    2791              :  *      Grant lock using per-backend fast-path array, if there is space.
    2792              :  */
    2793              : static bool
    2794     20255463 : FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
    2795              : {
    2796              :     uint32      i;
    2797     20255463 :     uint32      unused_slot = FastPathLockSlotsPerBackend();
    2798              : 
    2799              :     /* fast-path group the lock belongs to */
    2800     20255463 :     uint32      group = FAST_PATH_REL_GROUP(relid);
    2801              : 
    2802              :     /* Scan for existing entry for this relid, remembering empty slot. */
    2803    343587900 :     for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
    2804              :     {
    2805              :         /* index into the whole per-backend array */
    2806    323854476 :         uint32      f = FAST_PATH_SLOT(group, i);
    2807              : 
    2808    323854476 :         if (FAST_PATH_GET_BITS(MyProc, f) == 0)
    2809    312633824 :             unused_slot = f;
    2810     11220652 :         else if (MyProc->fpRelId[f] == relid)
    2811              :         {
    2812              :             Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
    2813       522039 :             FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
    2814       522039 :             return true;
    2815              :         }
    2816              :     }
    2817              : 
    2818              :     /* If no existing entry, use any empty slot. */
    2819     19733424 :     if (unused_slot < FastPathLockSlotsPerBackend())
    2820              :     {
    2821     19733424 :         MyProc->fpRelId[unused_slot] = relid;
    2822     19733424 :         FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
    2823     19733424 :         ++FastPathLocalUseCounts[group];
    2824     19733424 :         return true;
    2825              :     }
    2826              : 
    2827              :     /* No existing entry, and no empty slot. */
    2828            0 :     return false;
    2829              : }
    2830              : 
    2831              : /*
    2832              :  * FastPathUnGrantRelationLock
    2833              :  *      Release fast-path lock, if present.  Update backend-private local
    2834              :  *      use count, while we're at it.
    2835              :  */
    2836              : static bool
    2837     20527627 : FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
    2838              : {
    2839              :     uint32      i;
    2840     20527627 :     bool        result = false;
    2841              : 
    2842              :     /* fast-path group the lock belongs to */
    2843     20527627 :     uint32      group = FAST_PATH_REL_GROUP(relid);
    2844              : 
    2845     20527627 :     FastPathLocalUseCounts[group] = 0;
    2846    348969659 :     for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
    2847              :     {
    2848              :         /* index into the whole per-backend array */
    2849    328442032 :         uint32      f = FAST_PATH_SLOT(group, i);
    2850              : 
    2851    328442032 :         if (MyProc->fpRelId[f] == relid
    2852     28214634 :             && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
    2853              :         {
    2854              :             Assert(!result);
    2855     20253602 :             FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
    2856     20253602 :             result = true;
    2857              :             /* we continue iterating so as to update FastPathLocalUseCount */
    2858              :         }
    2859    328442032 :         if (FAST_PATH_GET_BITS(MyProc, f) != 0)
    2860     15304611 :             ++FastPathLocalUseCounts[group];
    2861              :     }
    2862     20527627 :     return result;
    2863              : }
    2864              : 
    2865              : /*
    2866              :  * FastPathTransferRelationLocks
    2867              :  *      Transfer locks matching the given lock tag from per-backend fast-path
    2868              :  *      arrays to the shared hash table.
    2869              :  *
    2870              :  * Returns true if successful, false if ran out of shared memory.
    2871              :  */
    2872              : static bool
    2873       236635 : FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
    2874              :                               uint32 hashcode)
    2875              : {
    2876       236635 :     LWLock     *partitionLock = LockHashPartitionLock(hashcode);
    2877       236635 :     Oid         relid = locktag->locktag_field2;
    2878              :     uint32      i;
    2879              : 
    2880              :     /* fast-path group the lock belongs to */
    2881       236635 :     uint32      group = FAST_PATH_REL_GROUP(relid);
    2882              : 
    2883              :     /*
    2884              :      * Every PGPROC that can potentially hold a fast-path lock is present in
    2885              :      * ProcGlobal->allProcs.  Prepared transactions are not, but any
    2886              :      * outstanding fast-path locks held by prepared transactions are
    2887              :      * transferred to the main lock table.
    2888              :      */
    2889     35158108 :     for (i = 0; i < ProcGlobal->allProcCount; i++)
    2890              :     {
    2891     34921473 :         PGPROC     *proc = GetPGProcByNumber(i);
    2892              :         uint32      j;
    2893              : 
    2894     34921473 :         LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
    2895              : 
    2896              :         /*
    2897              :          * If the target backend isn't referencing the same database as the
    2898              :          * lock, then we needn't examine the individual relation IDs at all;
    2899              :          * none of them can be relevant.
    2900              :          *
    2901              :          * proc->databaseId is set at backend startup time and never changes
    2902              :          * thereafter, so it might be safe to perform this test before
    2903              :          * acquiring &proc->fpInfoLock.  In particular, it's certainly safe to
    2904              :          * assume that if the target backend holds any fast-path locks, it
    2905              :          * must have performed a memory-fencing operation (in particular, an
    2906              :          * LWLock acquisition) since setting proc->databaseId.  However, it's
    2907              :          * less clear that our backend is certain to have performed a memory
    2908              :          * fencing operation since the other backend set proc->databaseId.  So
    2909              :          * for now, we test it after acquiring the LWLock just to be safe.
    2910              :          *
    2911              :          * Also skip groups without any registered fast-path locks.
    2912              :          */
    2913     34921473 :         if (proc->databaseId != locktag->locktag_field1 ||
    2914     13774639 :             proc->fpLockBits[group] == 0)
    2915              :         {
    2916     34688322 :             LWLockRelease(&proc->fpInfoLock);
    2917     34688322 :             continue;
    2918              :         }
    2919              : 
    2920      3961824 :         for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
    2921              :         {
    2922              :             uint32      lockmode;
    2923              : 
    2924              :             /* index into the whole per-backend array */
    2925      3730146 :             uint32      f = FAST_PATH_SLOT(group, j);
    2926              : 
    2927              :             /* Look for an allocated slot matching the given relid. */
    2928      3730146 :             if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
    2929      3728673 :                 continue;
    2930              : 
    2931              :             /* Find or create lock object. */
    2932         1473 :             LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    2933         1473 :             for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
    2934         5892 :                  lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
    2935         4419 :                  ++lockmode)
    2936              :             {
    2937              :                 PROCLOCK   *proclock;
    2938              : 
    2939         4419 :                 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
    2940         2883 :                     continue;
    2941         1536 :                 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
    2942              :                                             hashcode, lockmode);
    2943         1536 :                 if (!proclock)
    2944              :                 {
    2945            0 :                     LWLockRelease(partitionLock);
    2946            0 :                     LWLockRelease(&proc->fpInfoLock);
    2947            0 :                     return false;
    2948              :                 }
    2949         1536 :                 GrantLock(proclock->tag.myLock, proclock, lockmode);
    2950         1536 :                 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
    2951              :             }
    2952         1473 :             LWLockRelease(partitionLock);
    2953              : 
    2954              :             /* No need to examine remaining slots. */
    2955         1473 :             break;
    2956              :         }
    2957       233151 :         LWLockRelease(&proc->fpInfoLock);
    2958              :     }
    2959       236635 :     return true;
    2960              : }
    2961              : 
    2962              : /*
    2963              :  * FastPathGetRelationLockEntry
    2964              :  *      Return the PROCLOCK for a lock originally taken via the fast-path,
    2965              :  *      transferring it to the primary lock table if necessary.
    2966              :  *
    2967              :  * Note: caller takes care of updating the locallock object.
    2968              :  */
    2969              : static PROCLOCK *
    2970          341 : FastPathGetRelationLockEntry(LOCALLOCK *locallock)
    2971              : {
    2972          341 :     LockMethod  lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
    2973          341 :     LOCKTAG    *locktag = &locallock->tag.lock;
    2974          341 :     PROCLOCK   *proclock = NULL;
    2975          341 :     LWLock     *partitionLock = LockHashPartitionLock(locallock->hashcode);
    2976          341 :     Oid         relid = locktag->locktag_field2;
    2977              :     uint32      i,
    2978              :                 group;
    2979              : 
    2980              :     /* fast-path group the lock belongs to */
    2981          341 :     group = FAST_PATH_REL_GROUP(relid);
    2982              : 
    2983          341 :     LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    2984              : 
    2985         5465 :     for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
    2986              :     {
    2987              :         uint32      lockmode;
    2988              : 
    2989              :         /* index into the whole per-backend array */
    2990         5449 :         uint32      f = FAST_PATH_SLOT(group, i);
    2991              : 
    2992              :         /* Look for an allocated slot matching the given relid. */
    2993         5449 :         if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
    2994         5124 :             continue;
    2995              : 
    2996              :         /* If we don't have a lock of the given mode, forget it! */
    2997          325 :         lockmode = locallock->tag.mode;
    2998          325 :         if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
    2999            0 :             break;
    3000              : 
    3001              :         /* Find or create lock object. */
    3002          325 :         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    3003              : 
    3004          325 :         proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
    3005              :                                     locallock->hashcode, lockmode);
    3006          325 :         if (!proclock)
    3007              :         {
    3008            0 :             LWLockRelease(partitionLock);
    3009            0 :             LWLockRelease(&MyProc->fpInfoLock);
    3010            0 :             ereport(ERROR,
    3011              :                     (errcode(ERRCODE_OUT_OF_MEMORY),
    3012              :                      errmsg("out of shared memory"),
    3013              :                      errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
    3014              :         }
    3015          325 :         GrantLock(proclock->tag.myLock, proclock, lockmode);
    3016          325 :         FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
    3017              : 
    3018          325 :         LWLockRelease(partitionLock);
    3019              : 
    3020              :         /* No need to examine remaining slots. */
    3021          325 :         break;
    3022              :     }
    3023              : 
    3024          341 :     LWLockRelease(&MyProc->fpInfoLock);
    3025              : 
    3026              :     /* Lock may have already been transferred by some other backend. */
    3027          341 :     if (proclock == NULL)
    3028              :     {
    3029              :         LOCK       *lock;
    3030              :         PROCLOCKTAG proclocktag;
    3031              :         uint32      proclock_hashcode;
    3032              : 
    3033           16 :         LWLockAcquire(partitionLock, LW_SHARED);
    3034              : 
    3035           16 :         lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    3036              :                                                     locktag,
    3037              :                                                     locallock->hashcode,
    3038              :                                                     HASH_FIND,
    3039              :                                                     NULL);
    3040           16 :         if (!lock)
    3041            0 :             elog(ERROR, "failed to re-find shared lock object");
    3042              : 
    3043           16 :         proclocktag.myLock = lock;
    3044           16 :         proclocktag.myProc = MyProc;
    3045              : 
    3046           16 :         proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
    3047              :         proclock = (PROCLOCK *)
    3048           16 :             hash_search_with_hash_value(LockMethodProcLockHash,
    3049              :                                         &proclocktag,
    3050              :                                         proclock_hashcode,
    3051              :                                         HASH_FIND,
    3052              :                                         NULL);
    3053           16 :         if (!proclock)
    3054            0 :             elog(ERROR, "failed to re-find shared proclock object");
    3055           16 :         LWLockRelease(partitionLock);
    3056              :     }
    3057              : 
    3058          341 :     return proclock;
    3059              : }
    3060              : 
    3061              : /*
    3062              :  * GetLockConflicts
    3063              :  *      Get an array of VirtualTransactionIds of xacts currently holding locks
    3064              :  *      that would conflict with the specified lock/lockmode.
    3065              :  *      xacts merely awaiting such a lock are NOT reported.
    3066              :  *
    3067              :  * The result array is palloc'd and is terminated with an invalid VXID.
    3068              :  * *countp, if not null, is updated to the number of items set.
    3069              :  *
    3070              :  * Of course, the result could be out of date by the time it's returned, so
    3071              :  * use of this function has to be thought about carefully.  Similarly, a
    3072              :  * PGPROC with no "lxid" will be considered non-conflicting regardless of any
    3073              :  * lock it holds.  Existing callers don't care about a locker after that
    3074              :  * locker's pg_xact updates complete.  CommitTransaction() clears "lxid" after
    3075              :  * pg_xact updates and before releasing locks.
    3076              :  *
    3077              :  * Note we never include the current xact's vxid in the result array,
    3078              :  * since an xact never blocks itself.
    3079              :  */
    3080              : VirtualTransactionId *
    3081         1718 : GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
    3082              : {
    3083              :     static VirtualTransactionId *vxids;
    3084         1718 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
    3085              :     LockMethod  lockMethodTable;
    3086              :     LOCK       *lock;
    3087              :     LOCKMASK    conflictMask;
    3088              :     dlist_iter  proclock_iter;
    3089              :     PROCLOCK   *proclock;
    3090              :     uint32      hashcode;
    3091              :     LWLock     *partitionLock;
    3092         1718 :     int         count = 0;
    3093         1718 :     int         fast_count = 0;
    3094              : 
    3095         1718 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    3096            0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    3097         1718 :     lockMethodTable = LockMethods[lockmethodid];
    3098         1718 :     if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
    3099            0 :         elog(ERROR, "unrecognized lock mode: %d", lockmode);
    3100              : 
    3101              :     /*
    3102              :      * Allocate memory to store results, and fill with InvalidVXID.  We only
    3103              :      * need enough space for MaxBackends + max_prepared_xacts + a terminator.
    3104              :      * InHotStandby allocate once in TopMemoryContext.
    3105              :      */
    3106         1718 :     if (InHotStandby)
    3107              :     {
    3108            4 :         if (vxids == NULL)
    3109            1 :             vxids = (VirtualTransactionId *)
    3110            1 :                 MemoryContextAlloc(TopMemoryContext,
    3111              :                                    sizeof(VirtualTransactionId) *
    3112            1 :                                    (MaxBackends + max_prepared_xacts + 1));
    3113              :     }
    3114              :     else
    3115         1714 :         vxids = palloc0_array(VirtualTransactionId, (MaxBackends + max_prepared_xacts + 1));
    3116              : 
    3117              :     /* Compute hash code and partition lock, and look up conflicting modes. */
    3118         1718 :     hashcode = LockTagHashCode(locktag);
    3119         1718 :     partitionLock = LockHashPartitionLock(hashcode);
    3120         1718 :     conflictMask = lockMethodTable->conflictTab[lockmode];
    3121              : 
    3122              :     /*
    3123              :      * Fast path locks might not have been entered in the primary lock table.
    3124              :      * If the lock we're dealing with could conflict with such a lock, we must
    3125              :      * examine each backend's fast-path array for conflicts.
    3126              :      */
    3127         1718 :     if (ConflictsWithRelationFastPath(locktag, lockmode))
    3128              :     {
    3129              :         int         i;
    3130         1718 :         Oid         relid = locktag->locktag_field2;
    3131              :         VirtualTransactionId vxid;
    3132              : 
    3133              :         /* fast-path group the lock belongs to */
    3134         1718 :         uint32      group = FAST_PATH_REL_GROUP(relid);
    3135              : 
    3136              :         /*
    3137              :          * Iterate over relevant PGPROCs.  Anything held by a prepared
    3138              :          * transaction will have been transferred to the primary lock table,
    3139              :          * so we need not worry about those.  This is all a bit fuzzy, because
    3140              :          * new locks could be taken after we've visited a particular
    3141              :          * partition, but the callers had better be prepared to deal with that
    3142              :          * anyway, since the locks could equally well be taken between the
    3143              :          * time we return the value and the time the caller does something
    3144              :          * with it.
    3145              :          */
    3146       271970 :         for (i = 0; i < ProcGlobal->allProcCount; i++)
    3147              :         {
    3148       270252 :             PGPROC     *proc = GetPGProcByNumber(i);
    3149              :             uint32      j;
    3150              : 
    3151              :             /* A backend never blocks itself */
    3152       270252 :             if (proc == MyProc)
    3153         1718 :                 continue;
    3154              : 
    3155       268534 :             LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
    3156              : 
    3157              :             /*
    3158              :              * If the target backend isn't referencing the same database as
    3159              :              * the lock, then we needn't examine the individual relation IDs
    3160              :              * at all; none of them can be relevant.
    3161              :              *
    3162              :              * See FastPathTransferRelationLocks() for discussion of why we do
    3163              :              * this test after acquiring the lock.
    3164              :              *
    3165              :              * Also skip groups without any registered fast-path locks.
    3166              :              */
    3167       268534 :             if (proc->databaseId != locktag->locktag_field1 ||
    3168       112079 :                 proc->fpLockBits[group] == 0)
    3169              :             {
    3170       268110 :                 LWLockRelease(&proc->fpInfoLock);
    3171       268110 :                 continue;
    3172              :             }
    3173              : 
    3174         6940 :             for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
    3175              :             {
    3176              :                 uint32      lockmask;
    3177              : 
    3178              :                 /* index into the whole per-backend array */
    3179         6772 :                 uint32      f = FAST_PATH_SLOT(group, j);
    3180              : 
    3181              :                 /* Look for an allocated slot matching the given relid. */
    3182         6772 :                 if (relid != proc->fpRelId[f])
    3183         6516 :                     continue;
    3184          256 :                 lockmask = FAST_PATH_GET_BITS(proc, f);
    3185          256 :                 if (!lockmask)
    3186            0 :                     continue;
    3187          256 :                 lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
    3188              : 
    3189              :                 /*
    3190              :                  * There can only be one entry per relation, so if we found it
    3191              :                  * and it doesn't conflict, we can skip the rest of the slots.
    3192              :                  */
    3193          256 :                 if ((lockmask & conflictMask) == 0)
    3194            5 :                     break;
    3195              : 
    3196              :                 /* Conflict! */
    3197          251 :                 GET_VXID_FROM_PGPROC(vxid, *proc);
    3198              : 
    3199          251 :                 if (VirtualTransactionIdIsValid(vxid))
    3200          250 :                     vxids[count++] = vxid;
    3201              :                 /* else, xact already committed or aborted */
    3202              : 
    3203              :                 /* No need to examine remaining slots. */
    3204          251 :                 break;
    3205              :             }
    3206              : 
    3207          424 :             LWLockRelease(&proc->fpInfoLock);
    3208              :         }
    3209              :     }
    3210              : 
    3211              :     /* Remember how many fast-path conflicts we found. */
    3212         1718 :     fast_count = count;
    3213              : 
    3214              :     /*
    3215              :      * Look up the lock object matching the tag.
    3216              :      */
    3217         1718 :     LWLockAcquire(partitionLock, LW_SHARED);
    3218              : 
    3219         1718 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    3220              :                                                 locktag,
    3221              :                                                 hashcode,
    3222              :                                                 HASH_FIND,
    3223              :                                                 NULL);
    3224         1718 :     if (!lock)
    3225              :     {
    3226              :         /*
    3227              :          * If the lock object doesn't exist, there is nothing holding a lock
    3228              :          * on this lockable object.
    3229              :          */
    3230           72 :         LWLockRelease(partitionLock);
    3231           72 :         vxids[count].procNumber = INVALID_PROC_NUMBER;
    3232           72 :         vxids[count].localTransactionId = InvalidLocalTransactionId;
    3233           72 :         if (countp)
    3234            0 :             *countp = count;
    3235           72 :         return vxids;
    3236              :     }
    3237              : 
    3238              :     /*
    3239              :      * Examine each existing holder (or awaiter) of the lock.
    3240              :      */
    3241         3314 :     dlist_foreach(proclock_iter, &lock->procLocks)
    3242              :     {
    3243         1668 :         proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
    3244              : 
    3245         1668 :         if (conflictMask & proclock->holdMask)
    3246              :         {
    3247         1664 :             PGPROC     *proc = proclock->tag.myProc;
    3248              : 
    3249              :             /* A backend never blocks itself */
    3250         1664 :             if (proc != MyProc)
    3251              :             {
    3252              :                 VirtualTransactionId vxid;
    3253              : 
    3254           22 :                 GET_VXID_FROM_PGPROC(vxid, *proc);
    3255              : 
    3256           22 :                 if (VirtualTransactionIdIsValid(vxid))
    3257              :                 {
    3258              :                     int         i;
    3259              : 
    3260              :                     /* Avoid duplicate entries. */
    3261           33 :                     for (i = 0; i < fast_count; ++i)
    3262           11 :                         if (VirtualTransactionIdEquals(vxids[i], vxid))
    3263            0 :                             break;
    3264           22 :                     if (i >= fast_count)
    3265           22 :                         vxids[count++] = vxid;
    3266              :                 }
    3267              :                 /* else, xact already committed or aborted */
    3268              :             }
    3269              :         }
    3270              :     }
    3271              : 
    3272         1646 :     LWLockRelease(partitionLock);
    3273              : 
    3274         1646 :     if (count > MaxBackends + max_prepared_xacts)    /* should never happen */
    3275            0 :         elog(PANIC, "too many conflicting locks found");
    3276              : 
    3277         1646 :     vxids[count].procNumber = INVALID_PROC_NUMBER;
    3278         1646 :     vxids[count].localTransactionId = InvalidLocalTransactionId;
    3279         1646 :     if (countp)
    3280         1643 :         *countp = count;
    3281         1646 :     return vxids;
    3282              : }
    3283              : 
    3284              : /*
    3285              :  * Find a lock in the shared lock table and release it.  It is the caller's
    3286              :  * responsibility to verify that this is a sane thing to do.  (For example, it
    3287              :  * would be bad to release a lock here if there might still be a LOCALLOCK
    3288              :  * object with pointers to it.)
    3289              :  *
    3290              :  * We currently use this in two situations: first, to release locks held by
    3291              :  * prepared transactions on commit (see lock_twophase_postcommit); and second,
    3292              :  * to release locks taken via the fast-path, transferred to the main hash
    3293              :  * table, and then released (see LockReleaseAll).
    3294              :  */
    3295              : static void
    3296         2695 : LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
    3297              :                      LOCKTAG *locktag, LOCKMODE lockmode,
    3298              :                      bool decrement_strong_lock_count)
    3299              : {
    3300              :     LOCK       *lock;
    3301              :     PROCLOCK   *proclock;
    3302              :     PROCLOCKTAG proclocktag;
    3303              :     uint32      hashcode;
    3304              :     uint32      proclock_hashcode;
    3305              :     LWLock     *partitionLock;
    3306              :     bool        wakeupNeeded;
    3307              : 
    3308         2695 :     hashcode = LockTagHashCode(locktag);
    3309         2695 :     partitionLock = LockHashPartitionLock(hashcode);
    3310              : 
    3311         2695 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    3312              : 
    3313              :     /*
    3314              :      * Re-find the lock object (it had better be there).
    3315              :      */
    3316         2695 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    3317              :                                                 locktag,
    3318              :                                                 hashcode,
    3319              :                                                 HASH_FIND,
    3320              :                                                 NULL);
    3321         2695 :     if (!lock)
    3322            0 :         elog(PANIC, "failed to re-find shared lock object");
    3323              : 
    3324              :     /*
    3325              :      * Re-find the proclock object (ditto).
    3326              :      */
    3327         2695 :     proclocktag.myLock = lock;
    3328         2695 :     proclocktag.myProc = proc;
    3329              : 
    3330         2695 :     proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
    3331              : 
    3332         2695 :     proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
    3333              :                                                         &proclocktag,
    3334              :                                                         proclock_hashcode,
    3335              :                                                         HASH_FIND,
    3336              :                                                         NULL);
    3337         2695 :     if (!proclock)
    3338            0 :         elog(PANIC, "failed to re-find shared proclock object");
    3339              : 
    3340              :     /*
    3341              :      * Double-check that we are actually holding a lock of the type we want to
    3342              :      * release.
    3343              :      */
    3344         2695 :     if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
    3345              :     {
    3346              :         PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
    3347            0 :         LWLockRelease(partitionLock);
    3348            0 :         elog(WARNING, "you don't own a lock of type %s",
    3349              :              lockMethodTable->lockModeNames[lockmode]);
    3350            0 :         return;
    3351              :     }
    3352              : 
    3353              :     /*
    3354              :      * Do the releasing.  CleanUpLock will waken any now-wakable waiters.
    3355              :      */
    3356         2695 :     wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
    3357              : 
    3358         2695 :     CleanUpLock(lock, proclock,
    3359              :                 lockMethodTable, hashcode,
    3360              :                 wakeupNeeded);
    3361              : 
    3362         2695 :     LWLockRelease(partitionLock);
    3363              : 
    3364              :     /*
    3365              :      * Decrement strong lock count.  This logic is needed only for 2PC.
    3366              :      */
    3367         2695 :     if (decrement_strong_lock_count
    3368          871 :         && ConflictsWithRelationFastPath(locktag, lockmode))
    3369              :     {
    3370          121 :         uint32      fasthashcode = FastPathStrongLockHashPartition(hashcode);
    3371              : 
    3372          121 :         SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    3373              :         Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
    3374          121 :         FastPathStrongRelationLocks->count[fasthashcode]--;
    3375          121 :         SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    3376              :     }
    3377              : }
    3378              : 
    3379              : /*
    3380              :  * CheckForSessionAndXactLocks
    3381              :  *      Check to see if transaction holds both session-level and xact-level
    3382              :  *      locks on the same object; if so, throw an error.
    3383              :  *
    3384              :  * If we have both session- and transaction-level locks on the same object,
    3385              :  * PREPARE TRANSACTION must fail.  This should never happen with regular
    3386              :  * locks, since we only take those at session level in some special operations
    3387              :  * like VACUUM.  It's possible to hit this with advisory locks, though.
    3388              :  *
    3389              :  * It would be nice if we could keep the session hold and give away the
    3390              :  * transactional hold to the prepared xact.  However, that would require two
    3391              :  * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
    3392              :  * available when it comes time for PostPrepare_Locks to do the deed.
    3393              :  * So for now, we error out while we can still do so safely.
    3394              :  *
    3395              :  * Since the LOCALLOCK table stores a separate entry for each lockmode,
    3396              :  * we can't implement this check by examining LOCALLOCK entries in isolation.
    3397              :  * We must build a transient hashtable that is indexed by locktag only.
    3398              :  */
    3399              : static void
    3400          324 : CheckForSessionAndXactLocks(void)
    3401              : {
    3402              :     typedef struct
    3403              :     {
    3404              :         LOCKTAG     lock;       /* identifies the lockable object */
    3405              :         bool        sessLock;   /* is any lockmode held at session level? */
    3406              :         bool        xactLock;   /* is any lockmode held at xact level? */
    3407              :     } PerLockTagEntry;
    3408              : 
    3409              :     HASHCTL     hash_ctl;
    3410              :     HTAB       *lockhtab;
    3411              :     HASH_SEQ_STATUS status;
    3412              :     LOCALLOCK  *locallock;
    3413              : 
    3414              :     /* Create a local hash table keyed by LOCKTAG only */
    3415          324 :     hash_ctl.keysize = sizeof(LOCKTAG);
    3416          324 :     hash_ctl.entrysize = sizeof(PerLockTagEntry);
    3417          324 :     hash_ctl.hcxt = CurrentMemoryContext;
    3418              : 
    3419          324 :     lockhtab = hash_create("CheckForSessionAndXactLocks table",
    3420              :                            256, /* arbitrary initial size */
    3421              :                            &hash_ctl,
    3422              :                            HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
    3423              : 
    3424              :     /* Scan local lock table to find entries for each LOCKTAG */
    3425          324 :     hash_seq_init(&status, LockMethodLocalHash);
    3426              : 
    3427         1201 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    3428              :     {
    3429          879 :         LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    3430              :         PerLockTagEntry *hentry;
    3431              :         bool        found;
    3432              :         int         i;
    3433              : 
    3434              :         /*
    3435              :          * Ignore VXID locks.  We don't want those to be held by prepared
    3436              :          * transactions, since they aren't meaningful after a restart.
    3437              :          */
    3438          879 :         if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
    3439            0 :             continue;
    3440              : 
    3441              :         /* Ignore it if we don't actually hold the lock */
    3442          879 :         if (locallock->nLocks <= 0)
    3443            0 :             continue;
    3444              : 
    3445              :         /* Otherwise, find or make an entry in lockhtab */
    3446          879 :         hentry = (PerLockTagEntry *) hash_search(lockhtab,
    3447          879 :                                                  &locallock->tag.lock,
    3448              :                                                  HASH_ENTER, &found);
    3449          879 :         if (!found)             /* initialize, if newly created */
    3450          796 :             hentry->sessLock = hentry->xactLock = false;
    3451              : 
    3452              :         /* Scan to see if we hold lock at session or xact level or both */
    3453         1758 :         for (i = locallock->numLockOwners - 1; i >= 0; i--)
    3454              :         {
    3455          879 :             if (lockOwners[i].owner == NULL)
    3456           10 :                 hentry->sessLock = true;
    3457              :             else
    3458          869 :                 hentry->xactLock = true;
    3459              :         }
    3460              : 
    3461              :         /*
    3462              :          * We can throw error immediately when we see both types of locks; no
    3463              :          * need to wait around to see if there are more violations.
    3464              :          */
    3465          879 :         if (hentry->sessLock && hentry->xactLock)
    3466            2 :             ereport(ERROR,
    3467              :                     (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    3468              :                      errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
    3469              :     }
    3470              : 
    3471              :     /* Success, so clean up */
    3472          322 :     hash_destroy(lockhtab);
    3473          322 : }
    3474              : 
    3475              : /*
    3476              :  * AtPrepare_Locks
    3477              :  *      Do the preparatory work for a PREPARE: make 2PC state file records
    3478              :  *      for all locks currently held.
    3479              :  *
    3480              :  * Session-level locks are ignored, as are VXID locks.
    3481              :  *
    3482              :  * For the most part, we don't need to touch shared memory for this ---
    3483              :  * all the necessary state information is in the locallock table.
    3484              :  * Fast-path locks are an exception, however: we move any such locks to
    3485              :  * the main table before allowing PREPARE TRANSACTION to succeed.
    3486              :  */
    3487              : void
    3488          324 : AtPrepare_Locks(void)
    3489              : {
    3490              :     HASH_SEQ_STATUS status;
    3491              :     LOCALLOCK  *locallock;
    3492              : 
    3493              :     /* First, verify there aren't locks of both xact and session level */
    3494          324 :     CheckForSessionAndXactLocks();
    3495              : 
    3496              :     /* Now do the per-locallock cleanup work */
    3497          322 :     hash_seq_init(&status, LockMethodLocalHash);
    3498              : 
    3499         1195 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    3500              :     {
    3501              :         TwoPhaseLockRecord record;
    3502          873 :         LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    3503              :         bool        haveSessionLock;
    3504              :         bool        haveXactLock;
    3505              :         int         i;
    3506              : 
    3507              :         /*
    3508              :          * Ignore VXID locks.  We don't want those to be held by prepared
    3509              :          * transactions, since they aren't meaningful after a restart.
    3510              :          */
    3511          873 :         if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
    3512            8 :             continue;
    3513              : 
    3514              :         /* Ignore it if we don't actually hold the lock */
    3515          873 :         if (locallock->nLocks <= 0)
    3516            0 :             continue;
    3517              : 
    3518              :         /* Scan to see whether we hold it at session or transaction level */
    3519          873 :         haveSessionLock = haveXactLock = false;
    3520         1746 :         for (i = locallock->numLockOwners - 1; i >= 0; i--)
    3521              :         {
    3522          873 :             if (lockOwners[i].owner == NULL)
    3523            8 :                 haveSessionLock = true;
    3524              :             else
    3525          865 :                 haveXactLock = true;
    3526              :         }
    3527              : 
    3528              :         /* Ignore it if we have only session lock */
    3529          873 :         if (!haveXactLock)
    3530            8 :             continue;
    3531              : 
    3532              :         /* This can't happen, because we already checked it */
    3533          865 :         if (haveSessionLock)
    3534            0 :             ereport(ERROR,
    3535              :                     (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    3536              :                      errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
    3537              : 
    3538              :         /*
    3539              :          * If the local lock was taken via the fast-path, we need to move it
    3540              :          * to the primary lock table, or just get a pointer to the existing
    3541              :          * primary lock table entry if by chance it's already been
    3542              :          * transferred.
    3543              :          */
    3544          865 :         if (locallock->proclock == NULL)
    3545              :         {
    3546          341 :             locallock->proclock = FastPathGetRelationLockEntry(locallock);
    3547          341 :             locallock->lock = locallock->proclock->tag.myLock;
    3548              :         }
    3549              : 
    3550              :         /*
    3551              :          * Arrange to not release any strong lock count held by this lock
    3552              :          * entry.  We must retain the count until the prepared transaction is
    3553              :          * committed or rolled back.
    3554              :          */
    3555          865 :         locallock->holdsStrongLockCount = false;
    3556              : 
    3557              :         /*
    3558              :          * Create a 2PC record.
    3559              :          */
    3560          865 :         memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
    3561          865 :         record.lockmode = locallock->tag.mode;
    3562              : 
    3563          865 :         RegisterTwoPhaseRecord(TWOPHASE_RM_LOCK_ID, 0,
    3564              :                                &record, sizeof(TwoPhaseLockRecord));
    3565              :     }
    3566          322 : }
    3567              : 
    3568              : /*
    3569              :  * PostPrepare_Locks
    3570              :  *      Clean up after successful PREPARE
    3571              :  *
    3572              :  * Here, we want to transfer ownership of our locks to a dummy PGPROC
    3573              :  * that's now associated with the prepared transaction, and we want to
    3574              :  * clean out the corresponding entries in the LOCALLOCK table.
    3575              :  *
    3576              :  * Note: by removing the LOCALLOCK entries, we are leaving dangling
    3577              :  * pointers in the transaction's resource owner.  This is OK at the
    3578              :  * moment since resowner.c doesn't try to free locks retail at a toplevel
    3579              :  * transaction commit or abort.  We could alternatively zero out nLocks
    3580              :  * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
    3581              :  * but that probably costs more cycles.
    3582              :  */
    3583              : void
    3584          322 : PostPrepare_Locks(FullTransactionId fxid)
    3585              : {
    3586          322 :     PGPROC     *newproc = TwoPhaseGetDummyProc(fxid, false);
    3587              :     HASH_SEQ_STATUS status;
    3588              :     LOCALLOCK  *locallock;
    3589              :     LOCK       *lock;
    3590              :     PROCLOCK   *proclock;
    3591              :     PROCLOCKTAG proclocktag;
    3592              :     int         partition;
    3593              : 
    3594              :     /* Can't prepare a lock group follower. */
    3595              :     Assert(MyProc->lockGroupLeader == NULL ||
    3596              :            MyProc->lockGroupLeader == MyProc);
    3597              : 
    3598              :     /* This is a critical section: any error means big trouble */
    3599          322 :     START_CRIT_SECTION();
    3600              : 
    3601              :     /*
    3602              :      * First we run through the locallock table and get rid of unwanted
    3603              :      * entries, then we scan the process's proclocks and transfer them to the
    3604              :      * target proc.
    3605              :      *
    3606              :      * We do this separately because we may have multiple locallock entries
    3607              :      * pointing to the same proclock, and we daren't end up with any dangling
    3608              :      * pointers.
    3609              :      */
    3610          322 :     hash_seq_init(&status, LockMethodLocalHash);
    3611              : 
    3612         1195 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    3613              :     {
    3614          873 :         LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    3615              :         bool        haveSessionLock;
    3616              :         bool        haveXactLock;
    3617              :         int         i;
    3618              : 
    3619          873 :         if (locallock->proclock == NULL || locallock->lock == NULL)
    3620              :         {
    3621              :             /*
    3622              :              * We must've run out of shared memory while trying to set up this
    3623              :              * lock.  Just forget the local entry.
    3624              :              */
    3625              :             Assert(locallock->nLocks == 0);
    3626            0 :             RemoveLocalLock(locallock);
    3627            0 :             continue;
    3628              :         }
    3629              : 
    3630              :         /* Ignore VXID locks */
    3631          873 :         if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
    3632            0 :             continue;
    3633              : 
    3634              :         /* Scan to see whether we hold it at session or transaction level */
    3635          873 :         haveSessionLock = haveXactLock = false;
    3636         1746 :         for (i = locallock->numLockOwners - 1; i >= 0; i--)
    3637              :         {
    3638          873 :             if (lockOwners[i].owner == NULL)
    3639            8 :                 haveSessionLock = true;
    3640              :             else
    3641          865 :                 haveXactLock = true;
    3642              :         }
    3643              : 
    3644              :         /* Ignore it if we have only session lock */
    3645          873 :         if (!haveXactLock)
    3646            8 :             continue;
    3647              : 
    3648              :         /* This can't happen, because we already checked it */
    3649          865 :         if (haveSessionLock)
    3650            0 :             ereport(PANIC,
    3651              :                     (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    3652              :                      errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
    3653              : 
    3654              :         /* Mark the proclock to show we need to release this lockmode */
    3655          865 :         if (locallock->nLocks > 0)
    3656          865 :             locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
    3657              : 
    3658              :         /* And remove the locallock hashtable entry */
    3659          865 :         RemoveLocalLock(locallock);
    3660              :     }
    3661              : 
    3662              :     /*
    3663              :      * Now, scan each lock partition separately.
    3664              :      */
    3665         5474 :     for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
    3666              :     {
    3667              :         LWLock     *partitionLock;
    3668         5152 :         dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
    3669              :         dlist_mutable_iter proclock_iter;
    3670              : 
    3671         5152 :         partitionLock = LockHashPartitionLockByIndex(partition);
    3672              : 
    3673              :         /*
    3674              :          * If the proclock list for this partition is empty, we can skip
    3675              :          * acquiring the partition lock.  This optimization is safer than the
    3676              :          * situation in LockReleaseAll, because we got rid of any fast-path
    3677              :          * locks during AtPrepare_Locks, so there cannot be any case where
    3678              :          * another backend is adding something to our lists now.  For safety,
    3679              :          * though, we code this the same way as in LockReleaseAll.
    3680              :          */
    3681         5152 :         if (dlist_is_empty(procLocks))
    3682         4385 :             continue;           /* needn't examine this partition */
    3683              : 
    3684          767 :         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    3685              : 
    3686         1599 :         dlist_foreach_modify(proclock_iter, procLocks)
    3687              :         {
    3688          832 :             proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
    3689              : 
    3690              :             Assert(proclock->tag.myProc == MyProc);
    3691              : 
    3692          832 :             lock = proclock->tag.myLock;
    3693              : 
    3694              :             /* Ignore VXID locks */
    3695          832 :             if (lock->tag.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
    3696           40 :                 continue;
    3697              : 
    3698              :             PROCLOCK_PRINT("PostPrepare_Locks", proclock);
    3699              :             LOCK_PRINT("PostPrepare_Locks", lock, 0);
    3700              :             Assert(lock->nRequested >= 0);
    3701              :             Assert(lock->nGranted >= 0);
    3702              :             Assert(lock->nGranted <= lock->nRequested);
    3703              :             Assert((proclock->holdMask & ~lock->grantMask) == 0);
    3704              : 
    3705              :             /* Ignore it if nothing to release (must be a session lock) */
    3706          792 :             if (proclock->releaseMask == 0)
    3707            8 :                 continue;
    3708              : 
    3709              :             /* Else we should be releasing all locks */
    3710          784 :             if (proclock->releaseMask != proclock->holdMask)
    3711            0 :                 elog(PANIC, "we seem to have dropped a bit somewhere");
    3712              : 
    3713              :             /*
    3714              :              * We cannot simply modify proclock->tag.myProc to reassign
    3715              :              * ownership of the lock, because that's part of the hash key and
    3716              :              * the proclock would then be in the wrong hash chain.  Instead
    3717              :              * use hash_update_hash_key.  (We used to create a new hash entry,
    3718              :              * but that risks out-of-memory failure if other processes are
    3719              :              * busy making proclocks too.)  We must unlink the proclock from
    3720              :              * our procLink chain and put it into the new proc's chain, too.
    3721              :              *
    3722              :              * Note: the updated proclock hash key will still belong to the
    3723              :              * same hash partition, cf proclock_hash().  So the partition lock
    3724              :              * we already hold is sufficient for this.
    3725              :              */
    3726          784 :             dlist_delete(&proclock->procLink);
    3727              : 
    3728              :             /*
    3729              :              * Create the new hash key for the proclock.
    3730              :              */
    3731          784 :             proclocktag.myLock = lock;
    3732          784 :             proclocktag.myProc = newproc;
    3733              : 
    3734              :             /*
    3735              :              * Update groupLeader pointer to point to the new proc.  (We'd
    3736              :              * better not be a member of somebody else's lock group!)
    3737              :              */
    3738              :             Assert(proclock->groupLeader == proclock->tag.myProc);
    3739          784 :             proclock->groupLeader = newproc;
    3740              : 
    3741              :             /*
    3742              :              * Update the proclock.  We should not find any existing entry for
    3743              :              * the same hash key, since there can be only one entry for any
    3744              :              * given lock with my own proc.
    3745              :              */
    3746          784 :             if (!hash_update_hash_key(LockMethodProcLockHash,
    3747              :                                       proclock,
    3748              :                                       &proclocktag))
    3749            0 :                 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
    3750              : 
    3751              :             /* Re-link into the new proc's proclock list */
    3752          784 :             dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
    3753              : 
    3754              :             PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
    3755              :         }                       /* loop over PROCLOCKs within this partition */
    3756              : 
    3757          767 :         LWLockRelease(partitionLock);
    3758              :     }                           /* loop over partitions */
    3759              : 
    3760          322 :     END_CRIT_SECTION();
    3761          322 : }
    3762              : 
    3763              : 
    3764              : /*
    3765              :  * Estimate shared-memory space used for lock tables
    3766              :  */
    3767              : Size
    3768         2207 : LockManagerShmemSize(void)
    3769              : {
    3770         2207 :     Size        size = 0;
    3771              :     long        max_table_size;
    3772              : 
    3773              :     /* lock hash table */
    3774         2207 :     max_table_size = NLOCKENTS();
    3775         2207 :     size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
    3776              : 
    3777              :     /* proclock hash table */
    3778         2207 :     max_table_size *= 2;
    3779         2207 :     size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
    3780              : 
    3781              :     /*
    3782              :      * Since NLOCKENTS is only an estimate, add 10% safety margin.
    3783              :      */
    3784         2207 :     size = add_size(size, size / 10);
    3785              : 
    3786         2207 :     return size;
    3787              : }
    3788              : 
    3789              : /*
    3790              :  * GetLockStatusData - Return a summary of the lock manager's internal
    3791              :  * status, for use in a user-level reporting function.
    3792              :  *
    3793              :  * The return data consists of an array of LockInstanceData objects,
    3794              :  * which are a lightly abstracted version of the PROCLOCK data structures,
    3795              :  * i.e. there is one entry for each unique lock and interested PGPROC.
    3796              :  * It is the caller's responsibility to match up related items (such as
    3797              :  * references to the same lockable object or PGPROC) if wanted.
    3798              :  *
    3799              :  * The design goal is to hold the LWLocks for as short a time as possible;
    3800              :  * thus, this function simply makes a copy of the necessary data and releases
    3801              :  * the locks, allowing the caller to contemplate and format the data for as
    3802              :  * long as it pleases.
    3803              :  */
    3804              : LockData *
    3805          311 : GetLockStatusData(void)
    3806              : {
    3807              :     LockData   *data;
    3808              :     PROCLOCK   *proclock;
    3809              :     HASH_SEQ_STATUS seqstat;
    3810              :     int         els;
    3811              :     int         el;
    3812              :     int         i;
    3813              : 
    3814          311 :     data = palloc_object(LockData);
    3815              : 
    3816              :     /* Guess how much space we'll need. */
    3817          311 :     els = MaxBackends;
    3818          311 :     el = 0;
    3819          311 :     data->locks = palloc_array(LockInstanceData, els);
    3820              : 
    3821              :     /*
    3822              :      * First, we iterate through the per-backend fast-path arrays, locking
    3823              :      * them one at a time.  This might produce an inconsistent picture of the
    3824              :      * system state, but taking all of those LWLocks at the same time seems
    3825              :      * impractical (in particular, note MAX_SIMUL_LWLOCKS).  It shouldn't
    3826              :      * matter too much, because none of these locks can be involved in lock
    3827              :      * conflicts anyway - anything that might must be present in the main lock
    3828              :      * table.  (For the same reason, we don't sweat about making leaderPid
    3829              :      * completely valid.  We cannot safely dereference another backend's
    3830              :      * lockGroupLeader field without holding all lock partition locks, and
    3831              :      * it's not worth that.)
    3832              :      */
    3833        46562 :     for (i = 0; i < ProcGlobal->allProcCount; ++i)
    3834              :     {
    3835        46251 :         PGPROC     *proc = GetPGProcByNumber(i);
    3836              : 
    3837              :         /* Skip backends with pid=0, as they don't hold fast-path locks */
    3838        46251 :         if (proc->pid == 0)
    3839        41447 :             continue;
    3840              : 
    3841         4804 :         LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
    3842              : 
    3843        24020 :         for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
    3844              :         {
    3845              :             /* Skip groups without registered fast-path locks */
    3846        19216 :             if (proc->fpLockBits[g] == 0)
    3847        16474 :                 continue;
    3848              : 
    3849        46614 :             for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
    3850              :             {
    3851              :                 LockInstanceData *instance;
    3852        43872 :                 uint32      f = FAST_PATH_SLOT(g, j);
    3853        43872 :                 uint32      lockbits = FAST_PATH_GET_BITS(proc, f);
    3854              : 
    3855              :                 /* Skip unallocated slots */
    3856        43872 :                 if (!lockbits)
    3857        39055 :                     continue;
    3858              : 
    3859         4817 :                 if (el >= els)
    3860              :                 {
    3861            9 :                     els += MaxBackends;
    3862            9 :                     data->locks = (LockInstanceData *)
    3863            9 :                         repalloc(data->locks, sizeof(LockInstanceData) * els);
    3864              :                 }
    3865              : 
    3866         4817 :                 instance = &data->locks[el];
    3867         4817 :                 SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
    3868              :                                      proc->fpRelId[f]);
    3869         4817 :                 instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
    3870         4817 :                 instance->waitLockMode = NoLock;
    3871         4817 :                 instance->vxid.procNumber = proc->vxid.procNumber;
    3872         4817 :                 instance->vxid.localTransactionId = proc->vxid.lxid;
    3873         4817 :                 instance->pid = proc->pid;
    3874         4817 :                 instance->leaderPid = proc->pid;
    3875         4817 :                 instance->fastpath = true;
    3876              : 
    3877              :                 /*
    3878              :                  * Successfully taking fast path lock means there were no
    3879              :                  * conflicting locks.
    3880              :                  */
    3881         4817 :                 instance->waitStart = 0;
    3882              : 
    3883         4817 :                 el++;
    3884              :             }
    3885              :         }
    3886              : 
    3887         4804 :         if (proc->fpVXIDLock)
    3888              :         {
    3889              :             VirtualTransactionId vxid;
    3890              :             LockInstanceData *instance;
    3891              : 
    3892         1483 :             if (el >= els)
    3893              :             {
    3894            7 :                 els += MaxBackends;
    3895            7 :                 data->locks = (LockInstanceData *)
    3896            7 :                     repalloc(data->locks, sizeof(LockInstanceData) * els);
    3897              :             }
    3898              : 
    3899         1483 :             vxid.procNumber = proc->vxid.procNumber;
    3900         1483 :             vxid.localTransactionId = proc->fpLocalTransactionId;
    3901              : 
    3902         1483 :             instance = &data->locks[el];
    3903         1483 :             SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
    3904         1483 :             instance->holdMask = LOCKBIT_ON(ExclusiveLock);
    3905         1483 :             instance->waitLockMode = NoLock;
    3906         1483 :             instance->vxid.procNumber = proc->vxid.procNumber;
    3907         1483 :             instance->vxid.localTransactionId = proc->vxid.lxid;
    3908         1483 :             instance->pid = proc->pid;
    3909         1483 :             instance->leaderPid = proc->pid;
    3910         1483 :             instance->fastpath = true;
    3911         1483 :             instance->waitStart = 0;
    3912              : 
    3913         1483 :             el++;
    3914              :         }
    3915              : 
    3916         4804 :         LWLockRelease(&proc->fpInfoLock);
    3917              :     }
    3918              : 
    3919              :     /*
    3920              :      * Next, acquire lock on the entire shared lock data structure.  We do
    3921              :      * this so that, at least for locks in the primary lock table, the state
    3922              :      * will be self-consistent.
    3923              :      *
    3924              :      * Since this is a read-only operation, we take shared instead of
    3925              :      * exclusive lock.  There's not a whole lot of point to this, because all
    3926              :      * the normal operations require exclusive lock, but it doesn't hurt
    3927              :      * anything either. It will at least allow two backends to do
    3928              :      * GetLockStatusData in parallel.
    3929              :      *
    3930              :      * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
    3931              :      */
    3932         5287 :     for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    3933         4976 :         LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
    3934              : 
    3935              :     /* Now we can safely count the number of proclocks */
    3936          311 :     data->nelements = el + hash_get_num_entries(LockMethodProcLockHash);
    3937          311 :     if (data->nelements > els)
    3938              :     {
    3939           19 :         els = data->nelements;
    3940           19 :         data->locks = (LockInstanceData *)
    3941           19 :             repalloc(data->locks, sizeof(LockInstanceData) * els);
    3942              :     }
    3943              : 
    3944              :     /* Now scan the tables to copy the data */
    3945          311 :     hash_seq_init(&seqstat, LockMethodProcLockHash);
    3946              : 
    3947         3486 :     while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
    3948              :     {
    3949         3175 :         PGPROC     *proc = proclock->tag.myProc;
    3950         3175 :         LOCK       *lock = proclock->tag.myLock;
    3951         3175 :         LockInstanceData *instance = &data->locks[el];
    3952              : 
    3953         3175 :         memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
    3954         3175 :         instance->holdMask = proclock->holdMask;
    3955         3175 :         if (proc->waitLock == proclock->tag.myLock)
    3956            9 :             instance->waitLockMode = proc->waitLockMode;
    3957              :         else
    3958         3166 :             instance->waitLockMode = NoLock;
    3959         3175 :         instance->vxid.procNumber = proc->vxid.procNumber;
    3960         3175 :         instance->vxid.localTransactionId = proc->vxid.lxid;
    3961         3175 :         instance->pid = proc->pid;
    3962         3175 :         instance->leaderPid = proclock->groupLeader->pid;
    3963         3175 :         instance->fastpath = false;
    3964         3175 :         instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
    3965              : 
    3966         3175 :         el++;
    3967              :     }
    3968              : 
    3969              :     /*
    3970              :      * And release locks.  We do this in reverse order for two reasons: (1)
    3971              :      * Anyone else who needs more than one of the locks will be trying to lock
    3972              :      * them in increasing order; we don't want to release the other process
    3973              :      * until it can get all the locks it needs. (2) This avoids O(N^2)
    3974              :      * behavior inside LWLockRelease.
    3975              :      */
    3976         5287 :     for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
    3977         4976 :         LWLockRelease(LockHashPartitionLockByIndex(i));
    3978              : 
    3979              :     Assert(el == data->nelements);
    3980              : 
    3981          311 :     return data;
    3982              : }
    3983              : 
    3984              : /*
    3985              :  * GetBlockerStatusData - Return a summary of the lock manager's state
    3986              :  * concerning locks that are blocking the specified PID or any member of
    3987              :  * the PID's lock group, for use in a user-level reporting function.
    3988              :  *
    3989              :  * For each PID within the lock group that is awaiting some heavyweight lock,
    3990              :  * the return data includes an array of LockInstanceData objects, which are
    3991              :  * the same data structure used by GetLockStatusData; but unlike that function,
    3992              :  * this one reports only the PROCLOCKs associated with the lock that that PID
    3993              :  * is blocked on.  (Hence, all the locktags should be the same for any one
    3994              :  * blocked PID.)  In addition, we return an array of the PIDs of those backends
    3995              :  * that are ahead of the blocked PID in the lock's wait queue.  These can be
    3996              :  * compared with the PIDs in the LockInstanceData objects to determine which
    3997              :  * waiters are ahead of or behind the blocked PID in the queue.
    3998              :  *
    3999              :  * If blocked_pid isn't a valid backend PID or nothing in its lock group is
    4000              :  * waiting on any heavyweight lock, return empty arrays.
    4001              :  *
    4002              :  * The design goal is to hold the LWLocks for as short a time as possible;
    4003              :  * thus, this function simply makes a copy of the necessary data and releases
    4004              :  * the locks, allowing the caller to contemplate and format the data for as
    4005              :  * long as it pleases.
    4006              :  */
    4007              : BlockedProcsData *
    4008         1593 : GetBlockerStatusData(int blocked_pid)
    4009              : {
    4010              :     BlockedProcsData *data;
    4011              :     PGPROC     *proc;
    4012              :     int         i;
    4013              : 
    4014         1593 :     data = palloc_object(BlockedProcsData);
    4015              : 
    4016              :     /*
    4017              :      * Guess how much space we'll need, and preallocate.  Most of the time
    4018              :      * this will avoid needing to do repalloc while holding the LWLocks.  (We
    4019              :      * assume, but check with an Assert, that MaxBackends is enough entries
    4020              :      * for the procs[] array; the other two could need enlargement, though.)
    4021              :      */
    4022         1593 :     data->nprocs = data->nlocks = data->npids = 0;
    4023         1593 :     data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
    4024         1593 :     data->procs = palloc_array(BlockedProcData, data->maxprocs);
    4025         1593 :     data->locks = palloc_array(LockInstanceData, data->maxlocks);
    4026         1593 :     data->waiter_pids = palloc_array(int, data->maxpids);
    4027              : 
    4028              :     /*
    4029              :      * In order to search the ProcArray for blocked_pid and assume that that
    4030              :      * entry won't immediately disappear under us, we must hold ProcArrayLock.
    4031              :      * In addition, to examine the lock grouping fields of any other backend,
    4032              :      * we must hold all the hash partition locks.  (Only one of those locks is
    4033              :      * actually relevant for any one lock group, but we can't know which one
    4034              :      * ahead of time.)  It's fairly annoying to hold all those locks
    4035              :      * throughout this, but it's no worse than GetLockStatusData(), and it
    4036              :      * does have the advantage that we're guaranteed to return a
    4037              :      * self-consistent instantaneous state.
    4038              :      */
    4039         1593 :     LWLockAcquire(ProcArrayLock, LW_SHARED);
    4040              : 
    4041         1593 :     proc = BackendPidGetProcWithLock(blocked_pid);
    4042              : 
    4043              :     /* Nothing to do if it's gone */
    4044         1593 :     if (proc != NULL)
    4045              :     {
    4046              :         /*
    4047              :          * Acquire lock on the entire shared lock data structure.  See notes
    4048              :          * in GetLockStatusData().
    4049              :          */
    4050        27081 :         for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    4051        25488 :             LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
    4052              : 
    4053         1593 :         if (proc->lockGroupLeader == NULL)
    4054              :         {
    4055              :             /* Easy case, proc is not a lock group member */
    4056         1501 :             GetSingleProcBlockerStatusData(proc, data);
    4057              :         }
    4058              :         else
    4059              :         {
    4060              :             /* Examine all procs in proc's lock group */
    4061              :             dlist_iter  iter;
    4062              : 
    4063          207 :             dlist_foreach(iter, &proc->lockGroupLeader->lockGroupMembers)
    4064              :             {
    4065              :                 PGPROC     *memberProc;
    4066              : 
    4067          115 :                 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
    4068          115 :                 GetSingleProcBlockerStatusData(memberProc, data);
    4069              :             }
    4070              :         }
    4071              : 
    4072              :         /*
    4073              :          * And release locks.  See notes in GetLockStatusData().
    4074              :          */
    4075        27081 :         for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
    4076        25488 :             LWLockRelease(LockHashPartitionLockByIndex(i));
    4077              : 
    4078              :         Assert(data->nprocs <= data->maxprocs);
    4079              :     }
    4080              : 
    4081         1593 :     LWLockRelease(ProcArrayLock);
    4082              : 
    4083         1593 :     return data;
    4084              : }
    4085              : 
    4086              : /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
    4087              : static void
    4088         1616 : GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
    4089              : {
    4090         1616 :     LOCK       *theLock = blocked_proc->waitLock;
    4091              :     BlockedProcData *bproc;
    4092              :     dlist_iter  proclock_iter;
    4093              :     dlist_iter  proc_iter;
    4094              :     dclist_head *waitQueue;
    4095              :     int         queue_size;
    4096              : 
    4097              :     /* Nothing to do if this proc is not blocked */
    4098         1616 :     if (theLock == NULL)
    4099          401 :         return;
    4100              : 
    4101              :     /* Set up a procs[] element */
    4102         1215 :     bproc = &data->procs[data->nprocs++];
    4103         1215 :     bproc->pid = blocked_proc->pid;
    4104         1215 :     bproc->first_lock = data->nlocks;
    4105         1215 :     bproc->first_waiter = data->npids;
    4106              : 
    4107              :     /*
    4108              :      * We may ignore the proc's fast-path arrays, since nothing in those could
    4109              :      * be related to a contended lock.
    4110              :      */
    4111              : 
    4112              :     /* Collect all PROCLOCKs associated with theLock */
    4113         3688 :     dlist_foreach(proclock_iter, &theLock->procLocks)
    4114              :     {
    4115         2473 :         PROCLOCK   *proclock =
    4116         2473 :             dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
    4117         2473 :         PGPROC     *proc = proclock->tag.myProc;
    4118         2473 :         LOCK       *lock = proclock->tag.myLock;
    4119              :         LockInstanceData *instance;
    4120              : 
    4121         2473 :         if (data->nlocks >= data->maxlocks)
    4122              :         {
    4123            0 :             data->maxlocks += MaxBackends;
    4124            0 :             data->locks = (LockInstanceData *)
    4125            0 :                 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
    4126              :         }
    4127              : 
    4128         2473 :         instance = &data->locks[data->nlocks];
    4129         2473 :         memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
    4130         2473 :         instance->holdMask = proclock->holdMask;
    4131         2473 :         if (proc->waitLock == lock)
    4132         1253 :             instance->waitLockMode = proc->waitLockMode;
    4133              :         else
    4134         1220 :             instance->waitLockMode = NoLock;
    4135         2473 :         instance->vxid.procNumber = proc->vxid.procNumber;
    4136         2473 :         instance->vxid.localTransactionId = proc->vxid.lxid;
    4137         2473 :         instance->pid = proc->pid;
    4138         2473 :         instance->leaderPid = proclock->groupLeader->pid;
    4139         2473 :         instance->fastpath = false;
    4140         2473 :         data->nlocks++;
    4141              :     }
    4142              : 
    4143              :     /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
    4144         1215 :     waitQueue = &(theLock->waitProcs);
    4145         1215 :     queue_size = dclist_count(waitQueue);
    4146              : 
    4147         1215 :     if (queue_size > data->maxpids - data->npids)
    4148              :     {
    4149            0 :         data->maxpids = Max(data->maxpids + MaxBackends,
    4150              :                             data->npids + queue_size);
    4151            0 :         data->waiter_pids = (int *) repalloc(data->waiter_pids,
    4152            0 :                                              sizeof(int) * data->maxpids);
    4153              :     }
    4154              : 
    4155              :     /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
    4156         1233 :     dclist_foreach(proc_iter, waitQueue)
    4157              :     {
    4158         1233 :         PGPROC     *queued_proc = dlist_container(PGPROC, waitLink, proc_iter.cur);
    4159              : 
    4160         1233 :         if (queued_proc == blocked_proc)
    4161         1215 :             break;
    4162           18 :         data->waiter_pids[data->npids++] = queued_proc->pid;
    4163              :     }
    4164              : 
    4165         1215 :     bproc->num_locks = data->nlocks - bproc->first_lock;
    4166         1215 :     bproc->num_waiters = data->npids - bproc->first_waiter;
    4167              : }
    4168              : 
    4169              : /*
    4170              :  * Returns a list of currently held AccessExclusiveLocks, for use by
    4171              :  * LogStandbySnapshot().  The result is a palloc'd array,
    4172              :  * with the number of elements returned into *nlocks.
    4173              :  *
    4174              :  * XXX This currently takes a lock on all partitions of the lock table,
    4175              :  * but it's possible to do better.  By reference counting locks and storing
    4176              :  * the value in the ProcArray entry for each backend we could tell if any
    4177              :  * locks need recording without having to acquire the partition locks and
    4178              :  * scan the lock table.  Whether that's worth the additional overhead
    4179              :  * is pretty dubious though.
    4180              :  */
    4181              : xl_standby_lock *
    4182         1466 : GetRunningTransactionLocks(int *nlocks)
    4183              : {
    4184              :     xl_standby_lock *accessExclusiveLocks;
    4185              :     PROCLOCK   *proclock;
    4186              :     HASH_SEQ_STATUS seqstat;
    4187              :     int         i;
    4188              :     int         index;
    4189              :     int         els;
    4190              : 
    4191              :     /*
    4192              :      * Acquire lock on the entire shared lock data structure.
    4193              :      *
    4194              :      * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
    4195              :      */
    4196        24922 :     for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    4197        23456 :         LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
    4198              : 
    4199              :     /* Now we can safely count the number of proclocks */
    4200         1466 :     els = hash_get_num_entries(LockMethodProcLockHash);
    4201              : 
    4202              :     /*
    4203              :      * Allocating enough space for all locks in the lock table is overkill,
    4204              :      * but it's more convenient and faster than having to enlarge the array.
    4205              :      */
    4206         1466 :     accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
    4207              : 
    4208              :     /* Now scan the tables to copy the data */
    4209         1466 :     hash_seq_init(&seqstat, LockMethodProcLockHash);
    4210              : 
    4211              :     /*
    4212              :      * If lock is a currently granted AccessExclusiveLock then it will have
    4213              :      * just one proclock holder, so locks are never accessed twice in this
    4214              :      * particular case. Don't copy this code for use elsewhere because in the
    4215              :      * general case this will give you duplicate locks when looking at
    4216              :      * non-exclusive lock types.
    4217              :      */
    4218         1466 :     index = 0;
    4219         5911 :     while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
    4220              :     {
    4221              :         /* make sure this definition matches the one used in LockAcquire */
    4222         4445 :         if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
    4223         2289 :             proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
    4224              :         {
    4225         1527 :             PGPROC     *proc = proclock->tag.myProc;
    4226         1527 :             LOCK       *lock = proclock->tag.myLock;
    4227         1527 :             TransactionId xid = proc->xid;
    4228              : 
    4229              :             /*
    4230              :              * Don't record locks for transactions if we know they have
    4231              :              * already issued their WAL record for commit but not yet released
    4232              :              * lock. It is still possible that we see locks held by already
    4233              :              * complete transactions, if they haven't yet zeroed their xids.
    4234              :              */
    4235         1527 :             if (!TransactionIdIsValid(xid))
    4236            8 :                 continue;
    4237              : 
    4238         1519 :             accessExclusiveLocks[index].xid = xid;
    4239         1519 :             accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
    4240         1519 :             accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
    4241              : 
    4242         1519 :             index++;
    4243              :         }
    4244              :     }
    4245              : 
    4246              :     Assert(index <= els);
    4247              : 
    4248              :     /*
    4249              :      * And release locks.  We do this in reverse order for two reasons: (1)
    4250              :      * Anyone else who needs more than one of the locks will be trying to lock
    4251              :      * them in increasing order; we don't want to release the other process
    4252              :      * until it can get all the locks it needs. (2) This avoids O(N^2)
    4253              :      * behavior inside LWLockRelease.
    4254              :      */
    4255        24922 :     for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
    4256        23456 :         LWLockRelease(LockHashPartitionLockByIndex(i));
    4257              : 
    4258         1466 :     *nlocks = index;
    4259         1466 :     return accessExclusiveLocks;
    4260              : }
    4261              : 
    4262              : /* Provide the textual name of any lock mode */
    4263              : const char *
    4264        11289 : GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
    4265              : {
    4266              :     Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
    4267              :     Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
    4268        11289 :     return LockMethods[lockmethodid]->lockModeNames[mode];
    4269              : }
    4270              : 
    4271              : #ifdef LOCK_DEBUG
    4272              : /*
    4273              :  * Dump all locks in the given proc's myProcLocks lists.
    4274              :  *
    4275              :  * Caller is responsible for having acquired appropriate LWLocks.
    4276              :  */
    4277              : void
    4278              : DumpLocks(PGPROC *proc)
    4279              : {
    4280              :     int         i;
    4281              : 
    4282              :     if (proc == NULL)
    4283              :         return;
    4284              : 
    4285              :     if (proc->waitLock)
    4286              :         LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
    4287              : 
    4288              :     for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    4289              :     {
    4290              :         dlist_head *procLocks = &proc->myProcLocks[i];
    4291              :         dlist_iter  iter;
    4292              : 
    4293              :         dlist_foreach(iter, procLocks)
    4294              :         {
    4295              :             PROCLOCK   *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
    4296              :             LOCK       *lock = proclock->tag.myLock;
    4297              : 
    4298              :             Assert(proclock->tag.myProc == proc);
    4299              :             PROCLOCK_PRINT("DumpLocks", proclock);
    4300              :             LOCK_PRINT("DumpLocks", lock, 0);
    4301              :         }
    4302              :     }
    4303              : }
    4304              : 
    4305              : /*
    4306              :  * Dump all lmgr locks.
    4307              :  *
    4308              :  * Caller is responsible for having acquired appropriate LWLocks.
    4309              :  */
    4310              : void
    4311              : DumpAllLocks(void)
    4312              : {
    4313              :     PGPROC     *proc;
    4314              :     PROCLOCK   *proclock;
    4315              :     LOCK       *lock;
    4316              :     HASH_SEQ_STATUS status;
    4317              : 
    4318              :     proc = MyProc;
    4319              : 
    4320              :     if (proc && proc->waitLock)
    4321              :         LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
    4322              : 
    4323              :     hash_seq_init(&status, LockMethodProcLockHash);
    4324              : 
    4325              :     while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
    4326              :     {
    4327              :         PROCLOCK_PRINT("DumpAllLocks", proclock);
    4328              : 
    4329              :         lock = proclock->tag.myLock;
    4330              :         if (lock)
    4331              :             LOCK_PRINT("DumpAllLocks", lock, 0);
    4332              :         else
    4333              :             elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
    4334              :     }
    4335              : }
    4336              : #endif                          /* LOCK_DEBUG */
    4337              : 
    4338              : /*
    4339              :  * LOCK 2PC resource manager's routines
    4340              :  */
    4341              : 
    4342              : /*
    4343              :  * Re-acquire a lock belonging to a transaction that was prepared.
    4344              :  *
    4345              :  * Because this function is run at db startup, re-acquiring the locks should
    4346              :  * never conflict with running transactions because there are none.  We
    4347              :  * assume that the lock state represented by the stored 2PC files is legal.
    4348              :  *
    4349              :  * When switching from Hot Standby mode to normal operation, the locks will
    4350              :  * be already held by the startup process. The locks are acquired for the new
    4351              :  * procs without checking for conflicts, so we don't get a conflict between the
    4352              :  * startup process and the dummy procs, even though we will momentarily have
    4353              :  * a situation where two procs are holding the same AccessExclusiveLock,
    4354              :  * which isn't normally possible because the conflict. If we're in standby
    4355              :  * mode, but a recovery snapshot hasn't been established yet, it's possible
    4356              :  * that some but not all of the locks are already held by the startup process.
    4357              :  *
    4358              :  * This approach is simple, but also a bit dangerous, because if there isn't
    4359              :  * enough shared memory to acquire the locks, an error will be thrown, which
    4360              :  * is promoted to FATAL and recovery will abort, bringing down postmaster.
    4361              :  * A safer approach would be to transfer the locks like we do in
    4362              :  * AtPrepare_Locks, but then again, in hot standby mode it's possible for
    4363              :  * read-only backends to use up all the shared lock memory anyway, so that
    4364              :  * replaying the WAL record that needs to acquire a lock will throw an error
    4365              :  * and PANIC anyway.
    4366              :  */
    4367              : void
    4368           90 : lock_twophase_recover(FullTransactionId fxid, uint16 info,
    4369              :                       void *recdata, uint32 len)
    4370              : {
    4371           90 :     TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
    4372           90 :     PGPROC     *proc = TwoPhaseGetDummyProc(fxid, false);
    4373              :     LOCKTAG    *locktag;
    4374              :     LOCKMODE    lockmode;
    4375              :     LOCKMETHODID lockmethodid;
    4376              :     LOCK       *lock;
    4377              :     PROCLOCK   *proclock;
    4378              :     PROCLOCKTAG proclocktag;
    4379              :     bool        found;
    4380              :     uint32      hashcode;
    4381              :     uint32      proclock_hashcode;
    4382              :     int         partition;
    4383              :     LWLock     *partitionLock;
    4384              :     LockMethod  lockMethodTable;
    4385              : 
    4386              :     Assert(len == sizeof(TwoPhaseLockRecord));
    4387           90 :     locktag = &rec->locktag;
    4388           90 :     lockmode = rec->lockmode;
    4389           90 :     lockmethodid = locktag->locktag_lockmethodid;
    4390              : 
    4391           90 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    4392            0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    4393           90 :     lockMethodTable = LockMethods[lockmethodid];
    4394              : 
    4395           90 :     hashcode = LockTagHashCode(locktag);
    4396           90 :     partition = LockHashPartition(hashcode);
    4397           90 :     partitionLock = LockHashPartitionLock(hashcode);
    4398              : 
    4399           90 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    4400              : 
    4401              :     /*
    4402              :      * Find or create a lock with this tag.
    4403              :      */
    4404           90 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    4405              :                                                 locktag,
    4406              :                                                 hashcode,
    4407              :                                                 HASH_ENTER_NULL,
    4408              :                                                 &found);
    4409           90 :     if (!lock)
    4410              :     {
    4411            0 :         LWLockRelease(partitionLock);
    4412            0 :         ereport(ERROR,
    4413              :                 (errcode(ERRCODE_OUT_OF_MEMORY),
    4414              :                  errmsg("out of shared memory"),
    4415              :                  errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
    4416              :     }
    4417              : 
    4418              :     /*
    4419              :      * if it's a new lock object, initialize it
    4420              :      */
    4421           90 :     if (!found)
    4422              :     {
    4423           78 :         lock->grantMask = 0;
    4424           78 :         lock->waitMask = 0;
    4425           78 :         dlist_init(&lock->procLocks);
    4426           78 :         dclist_init(&lock->waitProcs);
    4427           78 :         lock->nRequested = 0;
    4428           78 :         lock->nGranted = 0;
    4429          468 :         MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
    4430           78 :         MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
    4431              :         LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
    4432              :     }
    4433              :     else
    4434              :     {
    4435              :         LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
    4436              :         Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
    4437              :         Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
    4438              :         Assert(lock->nGranted <= lock->nRequested);
    4439              :     }
    4440              : 
    4441              :     /*
    4442              :      * Create the hash key for the proclock table.
    4443              :      */
    4444           90 :     proclocktag.myLock = lock;
    4445           90 :     proclocktag.myProc = proc;
    4446              : 
    4447           90 :     proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
    4448              : 
    4449              :     /*
    4450              :      * Find or create a proclock entry with this tag
    4451              :      */
    4452           90 :     proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
    4453              :                                                         &proclocktag,
    4454              :                                                         proclock_hashcode,
    4455              :                                                         HASH_ENTER_NULL,
    4456              :                                                         &found);
    4457           90 :     if (!proclock)
    4458              :     {
    4459              :         /* Oops, not enough shmem for the proclock */
    4460            0 :         if (lock->nRequested == 0)
    4461              :         {
    4462              :             /*
    4463              :              * There are no other requestors of this lock, so garbage-collect
    4464              :              * the lock object.  We *must* do this to avoid a permanent leak
    4465              :              * of shared memory, because there won't be anything to cause
    4466              :              * anyone to release the lock object later.
    4467              :              */
    4468              :             Assert(dlist_is_empty(&lock->procLocks));
    4469            0 :             if (!hash_search_with_hash_value(LockMethodLockHash,
    4470            0 :                                              &(lock->tag),
    4471              :                                              hashcode,
    4472              :                                              HASH_REMOVE,
    4473              :                                              NULL))
    4474            0 :                 elog(PANIC, "lock table corrupted");
    4475              :         }
    4476            0 :         LWLockRelease(partitionLock);
    4477            0 :         ereport(ERROR,
    4478              :                 (errcode(ERRCODE_OUT_OF_MEMORY),
    4479              :                  errmsg("out of shared memory"),
    4480              :                  errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
    4481              :     }
    4482              : 
    4483              :     /*
    4484              :      * If new, initialize the new entry
    4485              :      */
    4486           90 :     if (!found)
    4487              :     {
    4488              :         Assert(proc->lockGroupLeader == NULL);
    4489           82 :         proclock->groupLeader = proc;
    4490           82 :         proclock->holdMask = 0;
    4491           82 :         proclock->releaseMask = 0;
    4492              :         /* Add proclock to appropriate lists */
    4493           82 :         dlist_push_tail(&lock->procLocks, &proclock->lockLink);
    4494           82 :         dlist_push_tail(&proc->myProcLocks[partition],
    4495              :                         &proclock->procLink);
    4496              :         PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
    4497              :     }
    4498              :     else
    4499              :     {
    4500              :         PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
    4501              :         Assert((proclock->holdMask & ~lock->grantMask) == 0);
    4502              :     }
    4503              : 
    4504              :     /*
    4505              :      * lock->nRequested and lock->requested[] count the total number of
    4506              :      * requests, whether granted or waiting, so increment those immediately.
    4507              :      */
    4508           90 :     lock->nRequested++;
    4509           90 :     lock->requested[lockmode]++;
    4510              :     Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
    4511              : 
    4512              :     /*
    4513              :      * We shouldn't already hold the desired lock.
    4514              :      */
    4515           90 :     if (proclock->holdMask & LOCKBIT_ON(lockmode))
    4516            0 :         elog(ERROR, "lock %s on object %u/%u/%u is already held",
    4517              :              lockMethodTable->lockModeNames[lockmode],
    4518              :              lock->tag.locktag_field1, lock->tag.locktag_field2,
    4519              :              lock->tag.locktag_field3);
    4520              : 
    4521              :     /*
    4522              :      * We ignore any possible conflicts and just grant ourselves the lock. Not
    4523              :      * only because we don't bother, but also to avoid deadlocks when
    4524              :      * switching from standby to normal mode. See function comment.
    4525              :      */
    4526           90 :     GrantLock(lock, proclock, lockmode);
    4527              : 
    4528              :     /*
    4529              :      * Bump strong lock count, to make sure any fast-path lock requests won't
    4530              :      * be granted without consulting the primary lock table.
    4531              :      */
    4532           90 :     if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
    4533              :     {
    4534           18 :         uint32      fasthashcode = FastPathStrongLockHashPartition(hashcode);
    4535              : 
    4536           18 :         SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    4537           18 :         FastPathStrongRelationLocks->count[fasthashcode]++;
    4538           18 :         SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    4539              :     }
    4540              : 
    4541           90 :     LWLockRelease(partitionLock);
    4542           90 : }
    4543              : 
    4544              : /*
    4545              :  * Re-acquire a lock belonging to a transaction that was prepared, when
    4546              :  * starting up into hot standby mode.
    4547              :  */
    4548              : void
    4549            0 : lock_twophase_standby_recover(FullTransactionId fxid, uint16 info,
    4550              :                               void *recdata, uint32 len)
    4551              : {
    4552            0 :     TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
    4553              :     LOCKTAG    *locktag;
    4554              :     LOCKMODE    lockmode;
    4555              :     LOCKMETHODID lockmethodid;
    4556              : 
    4557              :     Assert(len == sizeof(TwoPhaseLockRecord));
    4558            0 :     locktag = &rec->locktag;
    4559            0 :     lockmode = rec->lockmode;
    4560            0 :     lockmethodid = locktag->locktag_lockmethodid;
    4561              : 
    4562            0 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    4563            0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    4564              : 
    4565            0 :     if (lockmode == AccessExclusiveLock &&
    4566            0 :         locktag->locktag_type == LOCKTAG_RELATION)
    4567              :     {
    4568            0 :         StandbyAcquireAccessExclusiveLock(XidFromFullTransactionId(fxid),
    4569              :                                           locktag->locktag_field1 /* dboid */ ,
    4570              :                                           locktag->locktag_field2 /* reloid */ );
    4571              :     }
    4572            0 : }
    4573              : 
    4574              : 
    4575              : /*
    4576              :  * 2PC processing routine for COMMIT PREPARED case.
    4577              :  *
    4578              :  * Find and release the lock indicated by the 2PC record.
    4579              :  */
    4580              : void
    4581          871 : lock_twophase_postcommit(FullTransactionId fxid, uint16 info,
    4582              :                          void *recdata, uint32 len)
    4583              : {
    4584          871 :     TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
    4585          871 :     PGPROC     *proc = TwoPhaseGetDummyProc(fxid, true);
    4586              :     LOCKTAG    *locktag;
    4587              :     LOCKMETHODID lockmethodid;
    4588              :     LockMethod  lockMethodTable;
    4589              : 
    4590              :     Assert(len == sizeof(TwoPhaseLockRecord));
    4591          871 :     locktag = &rec->locktag;
    4592          871 :     lockmethodid = locktag->locktag_lockmethodid;
    4593              : 
    4594          871 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    4595            0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    4596          871 :     lockMethodTable = LockMethods[lockmethodid];
    4597              : 
    4598          871 :     LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
    4599          871 : }
    4600              : 
    4601              : /*
    4602              :  * 2PC processing routine for ROLLBACK PREPARED case.
    4603              :  *
    4604              :  * This is actually just the same as the COMMIT case.
    4605              :  */
    4606              : void
    4607          170 : lock_twophase_postabort(FullTransactionId fxid, uint16 info,
    4608              :                         void *recdata, uint32 len)
    4609              : {
    4610          170 :     lock_twophase_postcommit(fxid, info, recdata, len);
    4611          170 : }
    4612              : 
    4613              : /*
    4614              :  *      VirtualXactLockTableInsert
    4615              :  *
    4616              :  *      Take vxid lock via the fast-path.  There can't be any pre-existing
    4617              :  *      lockers, as we haven't advertised this vxid via the ProcArray yet.
    4618              :  *
    4619              :  *      Since MyProc->fpLocalTransactionId will normally contain the same data
    4620              :  *      as MyProc->vxid.lxid, you might wonder if we really need both.  The
    4621              :  *      difference is that MyProc->vxid.lxid is set and cleared unlocked, and
    4622              :  *      examined by procarray.c, while fpLocalTransactionId is protected by
    4623              :  *      fpInfoLock and is used only by the locking subsystem.  Doing it this
    4624              :  *      way makes it easier to verify that there are no funny race conditions.
    4625              :  *
    4626              :  *      We don't bother recording this lock in the local lock table, since it's
    4627              :  *      only ever released at the end of a transaction.  Instead,
    4628              :  *      LockReleaseAll() calls VirtualXactLockTableCleanup().
    4629              :  */
    4630              : void
    4631       616959 : VirtualXactLockTableInsert(VirtualTransactionId vxid)
    4632              : {
    4633              :     Assert(VirtualTransactionIdIsValid(vxid));
    4634              : 
    4635       616959 :     LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    4636              : 
    4637              :     Assert(MyProc->vxid.procNumber == vxid.procNumber);
    4638              :     Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId);
    4639              :     Assert(MyProc->fpVXIDLock == false);
    4640              : 
    4641       616959 :     MyProc->fpVXIDLock = true;
    4642       616959 :     MyProc->fpLocalTransactionId = vxid.localTransactionId;
    4643              : 
    4644       616959 :     LWLockRelease(&MyProc->fpInfoLock);
    4645       616959 : }
    4646              : 
    4647              : /*
    4648              :  *      VirtualXactLockTableCleanup
    4649              :  *
    4650              :  *      Check whether a VXID lock has been materialized; if so, release it,
    4651              :  *      unblocking waiters.
    4652              :  */
    4653              : void
    4654       617512 : VirtualXactLockTableCleanup(void)
    4655              : {
    4656              :     bool        fastpath;
    4657              :     LocalTransactionId lxid;
    4658              : 
    4659              :     Assert(MyProc->vxid.procNumber != INVALID_PROC_NUMBER);
    4660              : 
    4661              :     /*
    4662              :      * Clean up shared memory state.
    4663              :      */
    4664       617512 :     LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    4665              : 
    4666       617512 :     fastpath = MyProc->fpVXIDLock;
    4667       617512 :     lxid = MyProc->fpLocalTransactionId;
    4668       617512 :     MyProc->fpVXIDLock = false;
    4669       617512 :     MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
    4670              : 
    4671       617512 :     LWLockRelease(&MyProc->fpInfoLock);
    4672              : 
    4673              :     /*
    4674              :      * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
    4675              :      * that means someone transferred the lock to the main lock table.
    4676              :      */
    4677       617512 :     if (!fastpath && LocalTransactionIdIsValid(lxid))
    4678              :     {
    4679              :         VirtualTransactionId vxid;
    4680              :         LOCKTAG     locktag;
    4681              : 
    4682          310 :         vxid.procNumber = MyProcNumber;
    4683          310 :         vxid.localTransactionId = lxid;
    4684          310 :         SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
    4685              : 
    4686          310 :         LockRefindAndRelease(LockMethods[DEFAULT_LOCKMETHOD], MyProc,
    4687              :                              &locktag, ExclusiveLock, false);
    4688              :     }
    4689       617512 : }
    4690              : 
    4691              : /*
    4692              :  *      XactLockForVirtualXact
    4693              :  *
    4694              :  * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
    4695              :  * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid).  Unlike those
    4696              :  * functions, it assumes "xid" is never a subtransaction and that "xid" is
    4697              :  * prepared, committed, or aborted.
    4698              :  *
    4699              :  * If !TransactionIdIsValid(xid), this locks every prepared XID having been
    4700              :  * known as "vxid" before its PREPARE TRANSACTION.
    4701              :  */
    4702              : static bool
    4703          343 : XactLockForVirtualXact(VirtualTransactionId vxid,
    4704              :                        TransactionId xid, bool wait)
    4705              : {
    4706          343 :     bool        more = false;
    4707              : 
    4708              :     /* There is no point to wait for 2PCs if you have no 2PCs. */
    4709          343 :     if (max_prepared_xacts == 0)
    4710          151 :         return true;
    4711              : 
    4712              :     do
    4713              :     {
    4714              :         LockAcquireResult lar;
    4715              :         LOCKTAG     tag;
    4716              : 
    4717              :         /* Clear state from previous iterations. */
    4718          192 :         if (more)
    4719              :         {
    4720            0 :             xid = InvalidTransactionId;
    4721            0 :             more = false;
    4722              :         }
    4723              : 
    4724              :         /* If we have no xid, try to find one. */
    4725          192 :         if (!TransactionIdIsValid(xid))
    4726           95 :             xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
    4727          192 :         if (!TransactionIdIsValid(xid))
    4728              :         {
    4729              :             Assert(!more);
    4730           81 :             return true;
    4731              :         }
    4732              : 
    4733              :         /* Check or wait for XID completion. */
    4734          111 :         SET_LOCKTAG_TRANSACTION(tag, xid);
    4735          111 :         lar = LockAcquire(&tag, ShareLock, false, !wait);
    4736          111 :         if (lar == LOCKACQUIRE_NOT_AVAIL)
    4737            0 :             return false;
    4738          111 :         LockRelease(&tag, ShareLock, false);
    4739          111 :     } while (more);
    4740              : 
    4741          111 :     return true;
    4742              : }
    4743              : 
    4744              : /*
    4745              :  *      VirtualXactLock
    4746              :  *
    4747              :  * If wait = true, wait as long as the given VXID or any XID acquired by the
    4748              :  * same transaction is still running.  Then, return true.
    4749              :  *
    4750              :  * If wait = false, just check whether that VXID or one of those XIDs is still
    4751              :  * running, and return true or false.
    4752              :  */
    4753              : bool
    4754          384 : VirtualXactLock(VirtualTransactionId vxid, bool wait)
    4755              : {
    4756              :     LOCKTAG     tag;
    4757              :     PGPROC     *proc;
    4758          384 :     TransactionId xid = InvalidTransactionId;
    4759              : 
    4760              :     Assert(VirtualTransactionIdIsValid(vxid));
    4761              : 
    4762          384 :     if (VirtualTransactionIdIsRecoveredPreparedXact(vxid))
    4763              :         /* no vxid lock; localTransactionId is a normal, locked XID */
    4764            1 :         return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
    4765              : 
    4766          383 :     SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
    4767              : 
    4768              :     /*
    4769              :      * If a lock table entry must be made, this is the PGPROC on whose behalf
    4770              :      * it must be done.  Note that the transaction might end or the PGPROC
    4771              :      * might be reassigned to a new backend before we get around to examining
    4772              :      * it, but it doesn't matter.  If we find upon examination that the
    4773              :      * relevant lxid is no longer running here, that's enough to prove that
    4774              :      * it's no longer running anywhere.
    4775              :      */
    4776          383 :     proc = ProcNumberGetProc(vxid.procNumber);
    4777          383 :     if (proc == NULL)
    4778            3 :         return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
    4779              : 
    4780              :     /*
    4781              :      * We must acquire this lock before checking the procNumber and lxid
    4782              :      * against the ones we're waiting for.  The target backend will only set
    4783              :      * or clear lxid while holding this lock.
    4784              :      */
    4785          380 :     LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
    4786              : 
    4787          380 :     if (proc->vxid.procNumber != vxid.procNumber
    4788          380 :         || proc->fpLocalTransactionId != vxid.localTransactionId)
    4789              :     {
    4790              :         /* VXID ended */
    4791           48 :         LWLockRelease(&proc->fpInfoLock);
    4792           48 :         return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
    4793              :     }
    4794              : 
    4795              :     /*
    4796              :      * If we aren't asked to wait, there's no need to set up a lock table
    4797              :      * entry.  The transaction is still in progress, so just return false.
    4798              :      */
    4799          332 :     if (!wait)
    4800              :     {
    4801           16 :         LWLockRelease(&proc->fpInfoLock);
    4802           16 :         return false;
    4803              :     }
    4804              : 
    4805              :     /*
    4806              :      * OK, we're going to need to sleep on the VXID.  But first, we must set
    4807              :      * up the primary lock table entry, if needed (ie, convert the proc's
    4808              :      * fast-path lock on its VXID to a regular lock).
    4809              :      */
    4810          316 :     if (proc->fpVXIDLock)
    4811              :     {
    4812              :         PROCLOCK   *proclock;
    4813              :         uint32      hashcode;
    4814              :         LWLock     *partitionLock;
    4815              : 
    4816          310 :         hashcode = LockTagHashCode(&tag);
    4817              : 
    4818          310 :         partitionLock = LockHashPartitionLock(hashcode);
    4819          310 :         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    4820              : 
    4821          310 :         proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc,
    4822              :                                     &tag, hashcode, ExclusiveLock);
    4823          310 :         if (!proclock)
    4824              :         {
    4825            0 :             LWLockRelease(partitionLock);
    4826            0 :             LWLockRelease(&proc->fpInfoLock);
    4827            0 :             ereport(ERROR,
    4828              :                     (errcode(ERRCODE_OUT_OF_MEMORY),
    4829              :                      errmsg("out of shared memory"),
    4830              :                      errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
    4831              :         }
    4832          310 :         GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
    4833              : 
    4834          310 :         LWLockRelease(partitionLock);
    4835              : 
    4836          310 :         proc->fpVXIDLock = false;
    4837              :     }
    4838              : 
    4839              :     /*
    4840              :      * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
    4841              :      * search.  The proc might have assigned this XID but not yet locked it,
    4842              :      * in which case the proc will lock this XID before releasing the VXID.
    4843              :      * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
    4844              :      * so we won't save an XID of a different VXID.  It doesn't matter whether
    4845              :      * we save this before or after setting up the primary lock table entry.
    4846              :      */
    4847          316 :     xid = proc->xid;
    4848              : 
    4849              :     /* Done with proc->fpLockBits */
    4850          316 :     LWLockRelease(&proc->fpInfoLock);
    4851              : 
    4852              :     /* Time to wait. */
    4853          316 :     (void) LockAcquire(&tag, ShareLock, false, false);
    4854              : 
    4855          291 :     LockRelease(&tag, ShareLock, false);
    4856          291 :     return XactLockForVirtualXact(vxid, xid, wait);
    4857              : }
    4858              : 
    4859              : /*
    4860              :  * LockWaiterCount
    4861              :  *
    4862              :  * Find the number of lock requester on this locktag
    4863              :  */
    4864              : int
    4865        81937 : LockWaiterCount(const LOCKTAG *locktag)
    4866              : {
    4867        81937 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
    4868              :     LOCK       *lock;
    4869              :     bool        found;
    4870              :     uint32      hashcode;
    4871              :     LWLock     *partitionLock;
    4872        81937 :     int         waiters = 0;
    4873              : 
    4874        81937 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    4875            0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    4876              : 
    4877        81937 :     hashcode = LockTagHashCode(locktag);
    4878        81937 :     partitionLock = LockHashPartitionLock(hashcode);
    4879        81937 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    4880              : 
    4881        81937 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    4882              :                                                 locktag,
    4883              :                                                 hashcode,
    4884              :                                                 HASH_FIND,
    4885              :                                                 &found);
    4886        81937 :     if (found)
    4887              :     {
    4888              :         Assert(lock != NULL);
    4889           20 :         waiters = lock->nRequested;
    4890              :     }
    4891        81937 :     LWLockRelease(partitionLock);
    4892              : 
    4893        81937 :     return waiters;
    4894              : }
        

Generated by: LCOV version 2.0-1