LCOV - code coverage report
Current view: top level - src/backend/storage/lmgr - lock.c (source / functions) Hit Total Coverage
Test: PostgreSQL 18devel Lines: 1090 1242 87.8 %
Date: 2024-07-19 16:11:31 Functions: 54 57 94.7 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * lock.c
       4             :  *    POSTGRES primary lock mechanism
       5             :  *
       6             :  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/storage/lmgr/lock.c
      12             :  *
      13             :  * NOTES
      14             :  *    A lock table is a shared memory hash table.  When
      15             :  *    a process tries to acquire a lock of a type that conflicts
      16             :  *    with existing locks, it is put to sleep using the routines
      17             :  *    in storage/lmgr/proc.c.
      18             :  *
      19             :  *    For the most part, this code should be invoked via lmgr.c
      20             :  *    or another lock-management module, not directly.
      21             :  *
      22             :  *  Interface:
      23             :  *
      24             :  *  InitLocks(), GetLocksMethodTable(), GetLockTagsMethodTable(),
      25             :  *  LockAcquire(), LockRelease(), LockReleaseAll(),
      26             :  *  LockCheckConflicts(), GrantLock()
      27             :  *
      28             :  *-------------------------------------------------------------------------
      29             :  */
      30             : #include "postgres.h"
      31             : 
      32             : #include <signal.h>
      33             : #include <unistd.h>
      34             : 
      35             : #include "access/transam.h"
      36             : #include "access/twophase.h"
      37             : #include "access/twophase_rmgr.h"
      38             : #include "access/xlog.h"
      39             : #include "access/xlogutils.h"
      40             : #include "miscadmin.h"
      41             : #include "pg_trace.h"
      42             : #include "storage/proc.h"
      43             : #include "storage/procarray.h"
      44             : #include "storage/sinvaladt.h"
      45             : #include "storage/spin.h"
      46             : #include "storage/standby.h"
      47             : #include "utils/memutils.h"
      48             : #include "utils/ps_status.h"
      49             : #include "utils/resowner.h"
      50             : 
      51             : 
      52             : /* This configuration variable is used to set the lock table size */
      53             : int         max_locks_per_xact; /* set by guc.c */
      54             : 
      55             : #define NLOCKENTS() \
      56             :     mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
      57             : 
      58             : 
      59             : /*
      60             :  * Data structures defining the semantics of the standard lock methods.
      61             :  *
      62             :  * The conflict table defines the semantics of the various lock modes.
      63             :  */
      64             : static const LOCKMASK LockConflicts[] = {
      65             :     0,
      66             : 
      67             :     /* AccessShareLock */
      68             :     LOCKBIT_ON(AccessExclusiveLock),
      69             : 
      70             :     /* RowShareLock */
      71             :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      72             : 
      73             :     /* RowExclusiveLock */
      74             :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
      75             :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      76             : 
      77             :     /* ShareUpdateExclusiveLock */
      78             :     LOCKBIT_ON(ShareUpdateExclusiveLock) |
      79             :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
      80             :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      81             : 
      82             :     /* ShareLock */
      83             :     LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
      84             :     LOCKBIT_ON(ShareRowExclusiveLock) |
      85             :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      86             : 
      87             :     /* ShareRowExclusiveLock */
      88             :     LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
      89             :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
      90             :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      91             : 
      92             :     /* ExclusiveLock */
      93             :     LOCKBIT_ON(RowShareLock) |
      94             :     LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
      95             :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
      96             :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      97             : 
      98             :     /* AccessExclusiveLock */
      99             :     LOCKBIT_ON(AccessShareLock) | LOCKBIT_ON(RowShareLock) |
     100             :     LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
     101             :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
     102             :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock)
     103             : 
     104             : };
     105             : 
     106             : /* Names of lock modes, for debug printouts */
     107             : static const char *const lock_mode_names[] =
     108             : {
     109             :     "INVALID",
     110             :     "AccessShareLock",
     111             :     "RowShareLock",
     112             :     "RowExclusiveLock",
     113             :     "ShareUpdateExclusiveLock",
     114             :     "ShareLock",
     115             :     "ShareRowExclusiveLock",
     116             :     "ExclusiveLock",
     117             :     "AccessExclusiveLock"
     118             : };
     119             : 
     120             : #ifndef LOCK_DEBUG
     121             : static bool Dummy_trace = false;
     122             : #endif
     123             : 
     124             : static const LockMethodData default_lockmethod = {
     125             :     MaxLockMode,
     126             :     LockConflicts,
     127             :     lock_mode_names,
     128             : #ifdef LOCK_DEBUG
     129             :     &Trace_locks
     130             : #else
     131             :     &Dummy_trace
     132             : #endif
     133             : };
     134             : 
     135             : static const LockMethodData user_lockmethod = {
     136             :     MaxLockMode,
     137             :     LockConflicts,
     138             :     lock_mode_names,
     139             : #ifdef LOCK_DEBUG
     140             :     &Trace_userlocks
     141             : #else
     142             :     &Dummy_trace
     143             : #endif
     144             : };
     145             : 
     146             : /*
     147             :  * map from lock method id to the lock table data structures
     148             :  */
     149             : static const LockMethod LockMethods[] = {
     150             :     NULL,
     151             :     &default_lockmethod,
     152             :     &user_lockmethod
     153             : };
     154             : 
     155             : 
     156             : /* Record that's written to 2PC state file when a lock is persisted */
     157             : typedef struct TwoPhaseLockRecord
     158             : {
     159             :     LOCKTAG     locktag;
     160             :     LOCKMODE    lockmode;
     161             : } TwoPhaseLockRecord;
     162             : 
     163             : 
     164             : /*
     165             :  * Count of the number of fast path lock slots we believe to be used.  This
     166             :  * might be higher than the real number if another backend has transferred
     167             :  * our locks to the primary lock table, but it can never be lower than the
     168             :  * real value, since only we can acquire locks on our own behalf.
     169             :  */
     170             : static int  FastPathLocalUseCount = 0;
     171             : 
     172             : /*
     173             :  * Flag to indicate if the relation extension lock is held by this backend.
     174             :  * This flag is used to ensure that while holding the relation extension lock
     175             :  * we don't try to acquire a heavyweight lock on any other object.  This
     176             :  * restriction implies that the relation extension lock won't ever participate
     177             :  * in the deadlock cycle because we can never wait for any other heavyweight
     178             :  * lock after acquiring this lock.
     179             :  *
     180             :  * Such a restriction is okay for relation extension locks as unlike other
     181             :  * heavyweight locks these are not held till the transaction end.  These are
     182             :  * taken for a short duration to extend a particular relation and then
     183             :  * released.
     184             :  */
     185             : static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
     186             : 
     187             : /* Macros for manipulating proc->fpLockBits */
     188             : #define FAST_PATH_BITS_PER_SLOT         3
     189             : #define FAST_PATH_LOCKNUMBER_OFFSET     1
     190             : #define FAST_PATH_MASK                  ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
     191             : #define FAST_PATH_GET_BITS(proc, n) \
     192             :     (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
     193             : #define FAST_PATH_BIT_POSITION(n, l) \
     194             :     (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
     195             :      AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
     196             :      AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
     197             :      ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (n)))
     198             : #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
     199             :      (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
     200             : #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
     201             :      (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
     202             : #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
     203             :      ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
     204             : 
     205             : /*
     206             :  * The fast-path lock mechanism is concerned only with relation locks on
     207             :  * unshared relations by backends bound to a database.  The fast-path
     208             :  * mechanism exists mostly to accelerate acquisition and release of locks
     209             :  * that rarely conflict.  Because ShareUpdateExclusiveLock is
     210             :  * self-conflicting, it can't use the fast-path mechanism; but it also does
     211             :  * not conflict with any of the locks that do, so we can ignore it completely.
     212             :  */
     213             : #define EligibleForRelationFastPath(locktag, mode) \
     214             :     ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
     215             :     (locktag)->locktag_type == LOCKTAG_RELATION && \
     216             :     (locktag)->locktag_field1 == MyDatabaseId && \
     217             :     MyDatabaseId != InvalidOid && \
     218             :     (mode) < ShareUpdateExclusiveLock)
     219             : #define ConflictsWithRelationFastPath(locktag, mode) \
     220             :     ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
     221             :     (locktag)->locktag_type == LOCKTAG_RELATION && \
     222             :     (locktag)->locktag_field1 != InvalidOid && \
     223             :     (mode) > ShareUpdateExclusiveLock)
     224             : 
     225             : static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
     226             : static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
     227             : static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
     228             :                                           const LOCKTAG *locktag, uint32 hashcode);
     229             : static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
     230             : 
     231             : /*
     232             :  * To make the fast-path lock mechanism work, we must have some way of
     233             :  * preventing the use of the fast-path when a conflicting lock might be present.
     234             :  * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
     235             :  * and maintain an integer count of the number of "strong" lockers
     236             :  * in each partition.  When any "strong" lockers are present (which is
     237             :  * hopefully not very often), the fast-path mechanism can't be used, and we
     238             :  * must fall back to the slower method of pushing matching locks directly
     239             :  * into the main lock tables.
     240             :  *
     241             :  * The deadlock detector does not know anything about the fast path mechanism,
     242             :  * so any locks that might be involved in a deadlock must be transferred from
     243             :  * the fast-path queues to the main lock table.
     244             :  */
     245             : 
     246             : #define FAST_PATH_STRONG_LOCK_HASH_BITS         10
     247             : #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
     248             :     (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
     249             : #define FastPathStrongLockHashPartition(hashcode) \
     250             :     ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
     251             : 
     252             : typedef struct
     253             : {
     254             :     slock_t     mutex;
     255             :     uint32      count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
     256             : } FastPathStrongRelationLockData;
     257             : 
     258             : static volatile FastPathStrongRelationLockData *FastPathStrongRelationLocks;
     259             : 
     260             : 
     261             : /*
     262             :  * Pointers to hash tables containing lock state
     263             :  *
     264             :  * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
     265             :  * shared memory; LockMethodLocalHash is local to each backend.
     266             :  */
     267             : static HTAB *LockMethodLockHash;
     268             : static HTAB *LockMethodProcLockHash;
     269             : static HTAB *LockMethodLocalHash;
     270             : 
     271             : 
     272             : /* private state for error cleanup */
     273             : static LOCALLOCK *StrongLockInProgress;
     274             : static LOCALLOCK *awaitedLock;
     275             : static ResourceOwner awaitedOwner;
     276             : 
     277             : 
     278             : #ifdef LOCK_DEBUG
     279             : 
     280             : /*------
     281             :  * The following configuration options are available for lock debugging:
     282             :  *
     283             :  *     TRACE_LOCKS      -- give a bunch of output what's going on in this file
     284             :  *     TRACE_USERLOCKS  -- same but for user locks
     285             :  *     TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
     286             :  *                         (use to avoid output on system tables)
     287             :  *     TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
     288             :  *     DEBUG_DEADLOCKS  -- currently dumps locks at untimely occasions ;)
     289             :  *
     290             :  * Furthermore, but in storage/lmgr/lwlock.c:
     291             :  *     TRACE_LWLOCKS    -- trace lightweight locks (pretty useless)
     292             :  *
     293             :  * Define LOCK_DEBUG at compile time to get all these enabled.
     294             :  * --------
     295             :  */
     296             : 
     297             : int         Trace_lock_oidmin = FirstNormalObjectId;
     298             : bool        Trace_locks = false;
     299             : bool        Trace_userlocks = false;
     300             : int         Trace_lock_table = 0;
     301             : bool        Debug_deadlocks = false;
     302             : 
     303             : 
     304             : inline static bool
     305             : LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
     306             : {
     307             :     return
     308             :         (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
     309             :          ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
     310             :         || (Trace_lock_table &&
     311             :             (tag->locktag_field2 == Trace_lock_table));
     312             : }
     313             : 
     314             : 
     315             : inline static void
     316             : LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
     317             : {
     318             :     if (LOCK_DEBUG_ENABLED(&lock->tag))
     319             :         elog(LOG,
     320             :              "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
     321             :              "req(%d,%d,%d,%d,%d,%d,%d)=%d "
     322             :              "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
     323             :              where, lock,
     324             :              lock->tag.locktag_field1, lock->tag.locktag_field2,
     325             :              lock->tag.locktag_field3, lock->tag.locktag_field4,
     326             :              lock->tag.locktag_type, lock->tag.locktag_lockmethodid,
     327             :              lock->grantMask,
     328             :              lock->requested[1], lock->requested[2], lock->requested[3],
     329             :              lock->requested[4], lock->requested[5], lock->requested[6],
     330             :              lock->requested[7], lock->nRequested,
     331             :              lock->granted[1], lock->granted[2], lock->granted[3],
     332             :              lock->granted[4], lock->granted[5], lock->granted[6],
     333             :              lock->granted[7], lock->nGranted,
     334             :              dclist_count(&lock->waitProcs),
     335             :              LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
     336             : }
     337             : 
     338             : 
     339             : inline static void
     340             : PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
     341             : {
     342             :     if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
     343             :         elog(LOG,
     344             :              "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
     345             :              where, proclockP, proclockP->tag.myLock,
     346             :              PROCLOCK_LOCKMETHOD(*(proclockP)),
     347             :              proclockP->tag.myProc, (int) proclockP->holdMask);
     348             : }
     349             : #else                           /* not LOCK_DEBUG */
     350             : 
     351             : #define LOCK_PRINT(where, lock, type)  ((void) 0)
     352             : #define PROCLOCK_PRINT(where, proclockP)  ((void) 0)
     353             : #endif                          /* not LOCK_DEBUG */
     354             : 
     355             : 
     356             : static uint32 proclock_hash(const void *key, Size keysize);
     357             : static void RemoveLocalLock(LOCALLOCK *locallock);
     358             : static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
     359             :                                   const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
     360             : static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
     361             : static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
     362             : static void FinishStrongLockAcquire(void);
     363             : static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner,
     364             :                        bool dontWait);
     365             : static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
     366             : static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
     367             : static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
     368             :                         PROCLOCK *proclock, LockMethod lockMethodTable);
     369             : static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
     370             :                         LockMethod lockMethodTable, uint32 hashcode,
     371             :                         bool wakeupNeeded);
     372             : static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
     373             :                                  LOCKTAG *locktag, LOCKMODE lockmode,
     374             :                                  bool decrement_strong_lock_count);
     375             : static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
     376             :                                            BlockedProcsData *data);
     377             : 
     378             : 
     379             : /*
     380             :  * InitLocks -- Initialize the lock manager's data structures.
     381             :  *
     382             :  * This is called from CreateSharedMemoryAndSemaphores(), which see for
     383             :  * more comments.  In the normal postmaster case, the shared hash tables
     384             :  * are created here, as well as a locallock hash table that will remain
     385             :  * unused and empty in the postmaster itself.  Backends inherit the pointers
     386             :  * to the shared tables via fork(), and also inherit an image of the locallock
     387             :  * hash table, which they proceed to use.  In the EXEC_BACKEND case, each
     388             :  * backend re-executes this code to obtain pointers to the already existing
     389             :  * shared hash tables and to create its locallock hash table.
     390             :  */
     391             : void
     392        1782 : InitLocks(void)
     393             : {
     394             :     HASHCTL     info;
     395             :     long        init_table_size,
     396             :                 max_table_size;
     397             :     bool        found;
     398             : 
     399             :     /*
     400             :      * Compute init/max size to request for lock hashtables.  Note these
     401             :      * calculations must agree with LockShmemSize!
     402             :      */
     403        1782 :     max_table_size = NLOCKENTS();
     404        1782 :     init_table_size = max_table_size / 2;
     405             : 
     406             :     /*
     407             :      * Allocate hash table for LOCK structs.  This stores per-locked-object
     408             :      * information.
     409             :      */
     410        1782 :     info.keysize = sizeof(LOCKTAG);
     411        1782 :     info.entrysize = sizeof(LOCK);
     412        1782 :     info.num_partitions = NUM_LOCK_PARTITIONS;
     413             : 
     414        1782 :     LockMethodLockHash = ShmemInitHash("LOCK hash",
     415             :                                        init_table_size,
     416             :                                        max_table_size,
     417             :                                        &info,
     418             :                                        HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
     419             : 
     420             :     /* Assume an average of 2 holders per lock */
     421        1782 :     max_table_size *= 2;
     422        1782 :     init_table_size *= 2;
     423             : 
     424             :     /*
     425             :      * Allocate hash table for PROCLOCK structs.  This stores
     426             :      * per-lock-per-holder information.
     427             :      */
     428        1782 :     info.keysize = sizeof(PROCLOCKTAG);
     429        1782 :     info.entrysize = sizeof(PROCLOCK);
     430        1782 :     info.hash = proclock_hash;
     431        1782 :     info.num_partitions = NUM_LOCK_PARTITIONS;
     432             : 
     433        1782 :     LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
     434             :                                            init_table_size,
     435             :                                            max_table_size,
     436             :                                            &info,
     437             :                                            HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
     438             : 
     439             :     /*
     440             :      * Allocate fast-path structures.
     441             :      */
     442        1782 :     FastPathStrongRelationLocks =
     443        1782 :         ShmemInitStruct("Fast Path Strong Relation Lock Data",
     444             :                         sizeof(FastPathStrongRelationLockData), &found);
     445        1782 :     if (!found)
     446        1782 :         SpinLockInit(&FastPathStrongRelationLocks->mutex);
     447             : 
     448             :     /*
     449             :      * Allocate non-shared hash table for LOCALLOCK structs.  This stores lock
     450             :      * counts and resource owner information.
     451             :      *
     452             :      * The non-shared table could already exist in this process (this occurs
     453             :      * when the postmaster is recreating shared memory after a backend crash).
     454             :      * If so, delete and recreate it.  (We could simply leave it, since it
     455             :      * ought to be empty in the postmaster, but for safety let's zap it.)
     456             :      */
     457        1782 :     if (LockMethodLocalHash)
     458           8 :         hash_destroy(LockMethodLocalHash);
     459             : 
     460        1782 :     info.keysize = sizeof(LOCALLOCKTAG);
     461        1782 :     info.entrysize = sizeof(LOCALLOCK);
     462             : 
     463        1782 :     LockMethodLocalHash = hash_create("LOCALLOCK hash",
     464             :                                       16,
     465             :                                       &info,
     466             :                                       HASH_ELEM | HASH_BLOBS);
     467        1782 : }
     468             : 
     469             : 
     470             : /*
     471             :  * Fetch the lock method table associated with a given lock
     472             :  */
     473             : LockMethod
     474         184 : GetLocksMethodTable(const LOCK *lock)
     475             : {
     476         184 :     LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
     477             : 
     478             :     Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
     479         184 :     return LockMethods[lockmethodid];
     480             : }
     481             : 
     482             : /*
     483             :  * Fetch the lock method table associated with a given locktag
     484             :  */
     485             : LockMethod
     486        2268 : GetLockTagsMethodTable(const LOCKTAG *locktag)
     487             : {
     488        2268 :     LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
     489             : 
     490             :     Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
     491        2268 :     return LockMethods[lockmethodid];
     492             : }
     493             : 
     494             : 
     495             : /*
     496             :  * Compute the hash code associated with a LOCKTAG.
     497             :  *
     498             :  * To avoid unnecessary recomputations of the hash code, we try to do this
     499             :  * just once per function, and then pass it around as needed.  Aside from
     500             :  * passing the hashcode to hash_search_with_hash_value(), we can extract
     501             :  * the lock partition number from the hashcode.
     502             :  */
     503             : uint32
     504    27396964 : LockTagHashCode(const LOCKTAG *locktag)
     505             : {
     506    27396964 :     return get_hash_value(LockMethodLockHash, (const void *) locktag);
     507             : }
     508             : 
     509             : /*
     510             :  * Compute the hash code associated with a PROCLOCKTAG.
     511             :  *
     512             :  * Because we want to use just one set of partition locks for both the
     513             :  * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
     514             :  * fall into the same partition number as their associated LOCKs.
     515             :  * dynahash.c expects the partition number to be the low-order bits of
     516             :  * the hash code, and therefore a PROCLOCKTAG's hash code must have the
     517             :  * same low-order bits as the associated LOCKTAG's hash code.  We achieve
     518             :  * this with this specialized hash function.
     519             :  */
     520             : static uint32
     521        1632 : proclock_hash(const void *key, Size keysize)
     522             : {
     523        1632 :     const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
     524             :     uint32      lockhash;
     525             :     Datum       procptr;
     526             : 
     527             :     Assert(keysize == sizeof(PROCLOCKTAG));
     528             : 
     529             :     /* Look into the associated LOCK object, and compute its hash code */
     530        1632 :     lockhash = LockTagHashCode(&proclocktag->myLock->tag);
     531             : 
     532             :     /*
     533             :      * To make the hash code also depend on the PGPROC, we xor the proc
     534             :      * struct's address into the hash code, left-shifted so that the
     535             :      * partition-number bits don't change.  Since this is only a hash, we
     536             :      * don't care if we lose high-order bits of the address; use an
     537             :      * intermediate variable to suppress cast-pointer-to-int warnings.
     538             :      */
     539        1632 :     procptr = PointerGetDatum(proclocktag->myProc);
     540        1632 :     lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
     541             : 
     542        1632 :     return lockhash;
     543             : }
     544             : 
     545             : /*
     546             :  * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
     547             :  * for its underlying LOCK.
     548             :  *
     549             :  * We use this just to avoid redundant calls of LockTagHashCode().
     550             :  */
     551             : static inline uint32
     552     6776560 : ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
     553             : {
     554     6776560 :     uint32      lockhash = hashcode;
     555             :     Datum       procptr;
     556             : 
     557             :     /*
     558             :      * This must match proclock_hash()!
     559             :      */
     560     6776560 :     procptr = PointerGetDatum(proclocktag->myProc);
     561     6776560 :     lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
     562             : 
     563     6776560 :     return lockhash;
     564             : }
     565             : 
     566             : /*
     567             :  * Given two lock modes, return whether they would conflict.
     568             :  */
     569             : bool
     570         464 : DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
     571             : {
     572         464 :     LockMethod  lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
     573             : 
     574         464 :     if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
     575         272 :         return true;
     576             : 
     577         192 :     return false;
     578             : }
     579             : 
     580             : /*
     581             :  * LockHeldByMe -- test whether lock 'locktag' is held by the current
     582             :  *      transaction
     583             :  *
     584             :  * Returns true if current transaction holds a lock on 'tag' of mode
     585             :  * 'lockmode'.  If 'orstronger' is true, a stronger lockmode is also OK.
     586             :  * ("Stronger" is defined as "numerically higher", which is a bit
     587             :  * semantically dubious but is OK for the purposes we use this for.)
     588             :  */
     589             : bool
     590           0 : LockHeldByMe(const LOCKTAG *locktag,
     591             :              LOCKMODE lockmode, bool orstronger)
     592             : {
     593             :     LOCALLOCKTAG localtag;
     594             :     LOCALLOCK  *locallock;
     595             : 
     596             :     /*
     597             :      * See if there is a LOCALLOCK entry for this lock and lockmode
     598             :      */
     599           0 :     MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
     600           0 :     localtag.lock = *locktag;
     601           0 :     localtag.mode = lockmode;
     602             : 
     603           0 :     locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
     604             :                                           &localtag,
     605             :                                           HASH_FIND, NULL);
     606             : 
     607           0 :     if (locallock && locallock->nLocks > 0)
     608           0 :         return true;
     609             : 
     610           0 :     if (orstronger)
     611             :     {
     612             :         LOCKMODE    slockmode;
     613             : 
     614           0 :         for (slockmode = lockmode + 1;
     615             :              slockmode <= MaxLockMode;
     616           0 :              slockmode++)
     617             :         {
     618           0 :             if (LockHeldByMe(locktag, slockmode, false))
     619           0 :                 return true;
     620             :         }
     621             :     }
     622             : 
     623           0 :     return false;
     624             : }
     625             : 
     626             : #ifdef USE_ASSERT_CHECKING
     627             : /*
     628             :  * GetLockMethodLocalHash -- return the hash of local locks, for modules that
     629             :  *      evaluate assertions based on all locks held.
     630             :  */
     631             : HTAB *
     632             : GetLockMethodLocalHash(void)
     633             : {
     634             :     return LockMethodLocalHash;
     635             : }
     636             : #endif
     637             : 
     638             : /*
     639             :  * LockHasWaiters -- look up 'locktag' and check if releasing this
     640             :  *      lock would wake up other processes waiting for it.
     641             :  */
     642             : bool
     643           0 : LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
     644             : {
     645           0 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
     646             :     LockMethod  lockMethodTable;
     647             :     LOCALLOCKTAG localtag;
     648             :     LOCALLOCK  *locallock;
     649             :     LOCK       *lock;
     650             :     PROCLOCK   *proclock;
     651             :     LWLock     *partitionLock;
     652           0 :     bool        hasWaiters = false;
     653             : 
     654           0 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
     655           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
     656           0 :     lockMethodTable = LockMethods[lockmethodid];
     657           0 :     if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
     658           0 :         elog(ERROR, "unrecognized lock mode: %d", lockmode);
     659             : 
     660             : #ifdef LOCK_DEBUG
     661             :     if (LOCK_DEBUG_ENABLED(locktag))
     662             :         elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
     663             :              locktag->locktag_field1, locktag->locktag_field2,
     664             :              lockMethodTable->lockModeNames[lockmode]);
     665             : #endif
     666             : 
     667             :     /*
     668             :      * Find the LOCALLOCK entry for this lock and lockmode
     669             :      */
     670           0 :     MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
     671           0 :     localtag.lock = *locktag;
     672           0 :     localtag.mode = lockmode;
     673             : 
     674           0 :     locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
     675             :                                           &localtag,
     676             :                                           HASH_FIND, NULL);
     677             : 
     678             :     /*
     679             :      * let the caller print its own error message, too. Do not ereport(ERROR).
     680             :      */
     681           0 :     if (!locallock || locallock->nLocks <= 0)
     682             :     {
     683           0 :         elog(WARNING, "you don't own a lock of type %s",
     684             :              lockMethodTable->lockModeNames[lockmode]);
     685           0 :         return false;
     686             :     }
     687             : 
     688             :     /*
     689             :      * Check the shared lock table.
     690             :      */
     691           0 :     partitionLock = LockHashPartitionLock(locallock->hashcode);
     692             : 
     693           0 :     LWLockAcquire(partitionLock, LW_SHARED);
     694             : 
     695             :     /*
     696             :      * We don't need to re-find the lock or proclock, since we kept their
     697             :      * addresses in the locallock table, and they couldn't have been removed
     698             :      * while we were holding a lock on them.
     699             :      */
     700           0 :     lock = locallock->lock;
     701             :     LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
     702           0 :     proclock = locallock->proclock;
     703             :     PROCLOCK_PRINT("LockHasWaiters: found", proclock);
     704             : 
     705             :     /*
     706             :      * Double-check that we are actually holding a lock of the type we want to
     707             :      * release.
     708             :      */
     709           0 :     if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
     710             :     {
     711             :         PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
     712           0 :         LWLockRelease(partitionLock);
     713           0 :         elog(WARNING, "you don't own a lock of type %s",
     714             :              lockMethodTable->lockModeNames[lockmode]);
     715           0 :         RemoveLocalLock(locallock);
     716           0 :         return false;
     717             :     }
     718             : 
     719             :     /*
     720             :      * Do the checking.
     721             :      */
     722           0 :     if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
     723           0 :         hasWaiters = true;
     724             : 
     725           0 :     LWLockRelease(partitionLock);
     726             : 
     727           0 :     return hasWaiters;
     728             : }
     729             : 
     730             : /*
     731             :  * LockAcquire -- Check for lock conflicts, sleep if conflict found,
     732             :  *      set lock if/when no conflicts.
     733             :  *
     734             :  * Inputs:
     735             :  *  locktag: unique identifier for the lockable object
     736             :  *  lockmode: lock mode to acquire
     737             :  *  sessionLock: if true, acquire lock for session not current transaction
     738             :  *  dontWait: if true, don't wait to acquire lock
     739             :  *
     740             :  * Returns one of:
     741             :  *      LOCKACQUIRE_NOT_AVAIL       lock not available, and dontWait=true
     742             :  *      LOCKACQUIRE_OK              lock successfully acquired
     743             :  *      LOCKACQUIRE_ALREADY_HELD    incremented count for lock already held
     744             :  *      LOCKACQUIRE_ALREADY_CLEAR   incremented count for lock already clear
     745             :  *
     746             :  * In the normal case where dontWait=false and the caller doesn't need to
     747             :  * distinguish a freshly acquired lock from one already taken earlier in
     748             :  * this same transaction, there is no need to examine the return value.
     749             :  *
     750             :  * Side Effects: The lock is acquired and recorded in lock tables.
     751             :  *
     752             :  * NOTE: if we wait for the lock, there is no way to abort the wait
     753             :  * short of aborting the transaction.
     754             :  */
     755             : LockAcquireResult
     756      930884 : LockAcquire(const LOCKTAG *locktag,
     757             :             LOCKMODE lockmode,
     758             :             bool sessionLock,
     759             :             bool dontWait)
     760             : {
     761      930884 :     return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
     762             :                                true, NULL);
     763             : }
     764             : 
     765             : /*
     766             :  * LockAcquireExtended - allows us to specify additional options
     767             :  *
     768             :  * reportMemoryError specifies whether a lock request that fills the lock
     769             :  * table should generate an ERROR or not.  Passing "false" allows the caller
     770             :  * to attempt to recover from lock-table-full situations, perhaps by forcibly
     771             :  * canceling other lock holders and then retrying.  Note, however, that the
     772             :  * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
     773             :  * in combination with dontWait = true, as the cause of failure couldn't be
     774             :  * distinguished.
     775             :  *
     776             :  * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
     777             :  * table entry if a lock is successfully acquired, or NULL if not.
     778             :  */
     779             : LockAcquireResult
     780    29641950 : LockAcquireExtended(const LOCKTAG *locktag,
     781             :                     LOCKMODE lockmode,
     782             :                     bool sessionLock,
     783             :                     bool dontWait,
     784             :                     bool reportMemoryError,
     785             :                     LOCALLOCK **locallockp)
     786             : {
     787    29641950 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
     788             :     LockMethod  lockMethodTable;
     789             :     LOCALLOCKTAG localtag;
     790             :     LOCALLOCK  *locallock;
     791             :     LOCK       *lock;
     792             :     PROCLOCK   *proclock;
     793             :     bool        found;
     794             :     ResourceOwner owner;
     795             :     uint32      hashcode;
     796             :     LWLock     *partitionLock;
     797             :     bool        found_conflict;
     798    29641950 :     bool        log_lock = false;
     799             : 
     800    29641950 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
     801           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
     802    29641950 :     lockMethodTable = LockMethods[lockmethodid];
     803    29641950 :     if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
     804           0 :         elog(ERROR, "unrecognized lock mode: %d", lockmode);
     805             : 
     806    29641950 :     if (RecoveryInProgress() && !InRecovery &&
     807      499540 :         (locktag->locktag_type == LOCKTAG_OBJECT ||
     808      499540 :          locktag->locktag_type == LOCKTAG_RELATION) &&
     809             :         lockmode > RowExclusiveLock)
     810           0 :         ereport(ERROR,
     811             :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
     812             :                  errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
     813             :                         lockMethodTable->lockModeNames[lockmode]),
     814             :                  errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
     815             : 
     816             : #ifdef LOCK_DEBUG
     817             :     if (LOCK_DEBUG_ENABLED(locktag))
     818             :         elog(LOG, "LockAcquire: lock [%u,%u] %s",
     819             :              locktag->locktag_field1, locktag->locktag_field2,
     820             :              lockMethodTable->lockModeNames[lockmode]);
     821             : #endif
     822             : 
     823             :     /* Identify owner for lock */
     824    29641950 :     if (sessionLock)
     825       68064 :         owner = NULL;
     826             :     else
     827    29573886 :         owner = CurrentResourceOwner;
     828             : 
     829             :     /*
     830             :      * Find or create a LOCALLOCK entry for this lock and lockmode
     831             :      */
     832    29641950 :     MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
     833    29641950 :     localtag.lock = *locktag;
     834    29641950 :     localtag.mode = lockmode;
     835             : 
     836    29641950 :     locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
     837             :                                           &localtag,
     838             :                                           HASH_ENTER, &found);
     839             : 
     840             :     /*
     841             :      * if it's a new locallock object, initialize it
     842             :      */
     843    29641950 :     if (!found)
     844             :     {
     845    26452318 :         locallock->lock = NULL;
     846    26452318 :         locallock->proclock = NULL;
     847    26452318 :         locallock->hashcode = LockTagHashCode(&(localtag.lock));
     848    26452318 :         locallock->nLocks = 0;
     849    26452318 :         locallock->holdsStrongLockCount = false;
     850    26452318 :         locallock->lockCleared = false;
     851    26452318 :         locallock->numLockOwners = 0;
     852    26452318 :         locallock->maxLockOwners = 8;
     853    26452318 :         locallock->lockOwners = NULL;    /* in case next line fails */
     854    26452318 :         locallock->lockOwners = (LOCALLOCKOWNER *)
     855    26452318 :             MemoryContextAlloc(TopMemoryContext,
     856    26452318 :                                locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
     857             :     }
     858             :     else
     859             :     {
     860             :         /* Make sure there will be room to remember the lock */
     861     3189632 :         if (locallock->numLockOwners >= locallock->maxLockOwners)
     862             :         {
     863          38 :             int         newsize = locallock->maxLockOwners * 2;
     864             : 
     865          38 :             locallock->lockOwners = (LOCALLOCKOWNER *)
     866          38 :                 repalloc(locallock->lockOwners,
     867             :                          newsize * sizeof(LOCALLOCKOWNER));
     868          38 :             locallock->maxLockOwners = newsize;
     869             :         }
     870             :     }
     871    29641950 :     hashcode = locallock->hashcode;
     872             : 
     873    29641950 :     if (locallockp)
     874    28711066 :         *locallockp = locallock;
     875             : 
     876             :     /*
     877             :      * If we already hold the lock, we can just increase the count locally.
     878             :      *
     879             :      * If lockCleared is already set, caller need not worry about absorbing
     880             :      * sinval messages related to the lock's object.
     881             :      */
     882    29641950 :     if (locallock->nLocks > 0)
     883             :     {
     884     3189632 :         GrantLockLocal(locallock, owner);
     885     3189632 :         if (locallock->lockCleared)
     886     3055650 :             return LOCKACQUIRE_ALREADY_CLEAR;
     887             :         else
     888      133982 :             return LOCKACQUIRE_ALREADY_HELD;
     889             :     }
     890             : 
     891             :     /*
     892             :      * We don't acquire any other heavyweight lock while holding the relation
     893             :      * extension lock.  We do allow to acquire the same relation extension
     894             :      * lock more than once but that case won't reach here.
     895             :      */
     896             :     Assert(!IsRelationExtensionLockHeld);
     897             : 
     898             :     /*
     899             :      * Prepare to emit a WAL record if acquisition of this lock needs to be
     900             :      * replayed in a standby server.
     901             :      *
     902             :      * Here we prepare to log; after lock is acquired we'll issue log record.
     903             :      * This arrangement simplifies error recovery in case the preparation step
     904             :      * fails.
     905             :      *
     906             :      * Only AccessExclusiveLocks can conflict with lock types that read-only
     907             :      * transactions can acquire in a standby server. Make sure this definition
     908             :      * matches the one in GetRunningTransactionLocks().
     909             :      */
     910    26452318 :     if (lockmode >= AccessExclusiveLock &&
     911      397074 :         locktag->locktag_type == LOCKTAG_RELATION &&
     912      276342 :         !RecoveryInProgress() &&
     913      231982 :         XLogStandbyInfoActive())
     914             :     {
     915      176132 :         LogAccessExclusiveLockPrepare();
     916      176132 :         log_lock = true;
     917             :     }
     918             : 
     919             :     /*
     920             :      * Attempt to take lock via fast path, if eligible.  But if we remember
     921             :      * having filled up the fast path array, we don't attempt to make any
     922             :      * further use of it until we release some locks.  It's possible that some
     923             :      * other backend has transferred some of those locks to the shared hash
     924             :      * table, leaving space free, but it's not worth acquiring the LWLock just
     925             :      * to check.  It's also possible that we're acquiring a second or third
     926             :      * lock type on a relation we have already locked using the fast-path, but
     927             :      * for now we don't worry about that case either.
     928             :      */
     929    26452318 :     if (EligibleForRelationFastPath(locktag, lockmode) &&
     930    23890512 :         FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND)
     931             :     {
     932    23288218 :         uint32      fasthashcode = FastPathStrongLockHashPartition(hashcode);
     933             :         bool        acquired;
     934             : 
     935             :         /*
     936             :          * LWLockAcquire acts as a memory sequencing point, so it's safe to
     937             :          * assume that any strong locker whose increment to
     938             :          * FastPathStrongRelationLocks->counts becomes visible after we test
     939             :          * it has yet to begin to transfer fast-path locks.
     940             :          */
     941    23288218 :         LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
     942    23288218 :         if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
     943      393254 :             acquired = false;
     944             :         else
     945    22894964 :             acquired = FastPathGrantRelationLock(locktag->locktag_field2,
     946             :                                                  lockmode);
     947    23288218 :         LWLockRelease(&MyProc->fpInfoLock);
     948    23288218 :         if (acquired)
     949             :         {
     950             :             /*
     951             :              * The locallock might contain stale pointers to some old shared
     952             :              * objects; we MUST reset these to null before considering the
     953             :              * lock to be acquired via fast-path.
     954             :              */
     955    22894964 :             locallock->lock = NULL;
     956    22894964 :             locallock->proclock = NULL;
     957    22894964 :             GrantLockLocal(locallock, owner);
     958    22894964 :             return LOCKACQUIRE_OK;
     959             :         }
     960             :     }
     961             : 
     962             :     /*
     963             :      * If this lock could potentially have been taken via the fast-path by
     964             :      * some other backend, we must (temporarily) disable further use of the
     965             :      * fast-path for this lock tag, and migrate any locks already taken via
     966             :      * this method to the main lock table.
     967             :      */
     968     3557354 :     if (ConflictsWithRelationFastPath(locktag, lockmode))
     969             :     {
     970      327748 :         uint32      fasthashcode = FastPathStrongLockHashPartition(hashcode);
     971             : 
     972      327748 :         BeginStrongLockAcquire(locallock, fasthashcode);
     973      327748 :         if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
     974             :                                            hashcode))
     975             :         {
     976           0 :             AbortStrongLockAcquire();
     977           0 :             if (locallock->nLocks == 0)
     978           0 :                 RemoveLocalLock(locallock);
     979           0 :             if (locallockp)
     980           0 :                 *locallockp = NULL;
     981           0 :             if (reportMemoryError)
     982           0 :                 ereport(ERROR,
     983             :                         (errcode(ERRCODE_OUT_OF_MEMORY),
     984             :                          errmsg("out of shared memory"),
     985             :                          errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
     986             :             else
     987           0 :                 return LOCKACQUIRE_NOT_AVAIL;
     988             :         }
     989             :     }
     990             : 
     991             :     /*
     992             :      * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
     993             :      * take it via the fast-path, either, so we've got to mess with the shared
     994             :      * lock table.
     995             :      */
     996     3557354 :     partitionLock = LockHashPartitionLock(hashcode);
     997             : 
     998     3557354 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
     999             : 
    1000             :     /*
    1001             :      * Find or create lock and proclock entries with this tag
    1002             :      *
    1003             :      * Note: if the locallock object already existed, it might have a pointer
    1004             :      * to the lock already ... but we should not assume that that pointer is
    1005             :      * valid, since a lock object with zero hold and request counts can go
    1006             :      * away anytime.  So we have to use SetupLockInTable() to recompute the
    1007             :      * lock and proclock pointers, even if they're already set.
    1008             :      */
    1009     3557354 :     proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
    1010             :                                 hashcode, lockmode);
    1011     3557354 :     if (!proclock)
    1012             :     {
    1013           0 :         AbortStrongLockAcquire();
    1014           0 :         LWLockRelease(partitionLock);
    1015           0 :         if (locallock->nLocks == 0)
    1016           0 :             RemoveLocalLock(locallock);
    1017           0 :         if (locallockp)
    1018           0 :             *locallockp = NULL;
    1019           0 :         if (reportMemoryError)
    1020           0 :             ereport(ERROR,
    1021             :                     (errcode(ERRCODE_OUT_OF_MEMORY),
    1022             :                      errmsg("out of shared memory"),
    1023             :                      errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
    1024             :         else
    1025           0 :             return LOCKACQUIRE_NOT_AVAIL;
    1026             :     }
    1027     3557354 :     locallock->proclock = proclock;
    1028     3557354 :     lock = proclock->tag.myLock;
    1029     3557354 :     locallock->lock = lock;
    1030             : 
    1031             :     /*
    1032             :      * If lock requested conflicts with locks requested by waiters, must join
    1033             :      * wait queue.  Otherwise, check for conflict with already-held locks.
    1034             :      * (That's last because most complex check.)
    1035             :      */
    1036     3557354 :     if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
    1037         352 :         found_conflict = true;
    1038             :     else
    1039     3557002 :         found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
    1040             :                                             lock, proclock);
    1041             : 
    1042     3557354 :     if (!found_conflict)
    1043             :     {
    1044             :         /* No conflict with held or previously requested locks */
    1045     3553636 :         GrantLock(lock, proclock, lockmode);
    1046     3553636 :         GrantLockLocal(locallock, owner);
    1047             :     }
    1048             :     else
    1049             :     {
    1050             :         /*
    1051             :          * Set bitmask of locks this process already holds on this object.
    1052             :          */
    1053        3718 :         MyProc->heldLocks = proclock->holdMask;
    1054             : 
    1055             :         /*
    1056             :          * Sleep till someone wakes me up. We do this even in the dontWait
    1057             :          * case, because while trying to go to sleep, we may discover that we
    1058             :          * can acquire the lock immediately after all.
    1059             :          */
    1060             : 
    1061             :         TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
    1062             :                                          locktag->locktag_field2,
    1063             :                                          locktag->locktag_field3,
    1064             :                                          locktag->locktag_field4,
    1065             :                                          locktag->locktag_type,
    1066             :                                          lockmode);
    1067             : 
    1068        3718 :         WaitOnLock(locallock, owner, dontWait);
    1069             : 
    1070             :         TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
    1071             :                                         locktag->locktag_field2,
    1072             :                                         locktag->locktag_field3,
    1073             :                                         locktag->locktag_field4,
    1074             :                                         locktag->locktag_type,
    1075             :                                         lockmode);
    1076             : 
    1077             :         /*
    1078             :          * NOTE: do not do any material change of state between here and
    1079             :          * return.  All required changes in locktable state must have been
    1080             :          * done when the lock was granted to us --- see notes in WaitOnLock.
    1081             :          */
    1082             : 
    1083             :         /*
    1084             :          * Check the proclock entry status. If dontWait = true, this is an
    1085             :          * expected case; otherwise, it will only happen if something in the
    1086             :          * ipc communication doesn't work correctly.
    1087             :          */
    1088        3626 :         if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
    1089             :         {
    1090        1300 :             AbortStrongLockAcquire();
    1091             : 
    1092        1300 :             if (dontWait)
    1093             :             {
    1094             :                 /*
    1095             :                  * We can't acquire the lock immediately.  If caller specified
    1096             :                  * no blocking, remove useless table entries and return
    1097             :                  * LOCKACQUIRE_NOT_AVAIL without waiting.
    1098             :                  */
    1099        1300 :                 if (proclock->holdMask == 0)
    1100             :                 {
    1101             :                     uint32      proclock_hashcode;
    1102             : 
    1103         896 :                     proclock_hashcode = ProcLockHashCode(&proclock->tag,
    1104             :                                                          hashcode);
    1105         896 :                     dlist_delete(&proclock->lockLink);
    1106         896 :                     dlist_delete(&proclock->procLink);
    1107         896 :                     if (!hash_search_with_hash_value(LockMethodProcLockHash,
    1108         896 :                                                      &(proclock->tag),
    1109             :                                                      proclock_hashcode,
    1110             :                                                      HASH_REMOVE,
    1111             :                                                      NULL))
    1112           0 :                         elog(PANIC, "proclock table corrupted");
    1113             :                 }
    1114             :                 else
    1115             :                     PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
    1116        1300 :                 lock->nRequested--;
    1117        1300 :                 lock->requested[lockmode]--;
    1118             :                 LOCK_PRINT("LockAcquire: conditional lock failed",
    1119             :                            lock, lockmode);
    1120             :                 Assert((lock->nRequested > 0) &&
    1121             :                        (lock->requested[lockmode] >= 0));
    1122             :                 Assert(lock->nGranted <= lock->nRequested);
    1123        1300 :                 LWLockRelease(partitionLock);
    1124        1300 :                 if (locallock->nLocks == 0)
    1125        1300 :                     RemoveLocalLock(locallock);
    1126        1300 :                 if (locallockp)
    1127         440 :                     *locallockp = NULL;
    1128        1300 :                 return LOCKACQUIRE_NOT_AVAIL;
    1129             :             }
    1130             :             else
    1131             :             {
    1132             :                 /*
    1133             :                  * We should have gotten the lock, but somehow that didn't
    1134             :                  * happen. If we get here, it's a bug.
    1135             :                  */
    1136             :                 PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
    1137             :                 LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
    1138           0 :                 LWLockRelease(partitionLock);
    1139           0 :                 elog(ERROR, "LockAcquire failed");
    1140             :             }
    1141             :         }
    1142             :         PROCLOCK_PRINT("LockAcquire: granted", proclock);
    1143             :         LOCK_PRINT("LockAcquire: granted", lock, lockmode);
    1144             :     }
    1145             : 
    1146             :     /*
    1147             :      * Lock state is fully up-to-date now; if we error out after this, no
    1148             :      * special error cleanup is required.
    1149             :      */
    1150     3555962 :     FinishStrongLockAcquire();
    1151             : 
    1152     3555962 :     LWLockRelease(partitionLock);
    1153             : 
    1154             :     /*
    1155             :      * Emit a WAL record if acquisition of this lock needs to be replayed in a
    1156             :      * standby server.
    1157             :      */
    1158     3555962 :     if (log_lock)
    1159             :     {
    1160             :         /*
    1161             :          * Decode the locktag back to the original values, to avoid sending
    1162             :          * lots of empty bytes with every message.  See lock.h to check how a
    1163             :          * locktag is defined for LOCKTAG_RELATION
    1164             :          */
    1165      175708 :         LogAccessExclusiveLock(locktag->locktag_field1,
    1166             :                                locktag->locktag_field2);
    1167             :     }
    1168             : 
    1169     3555962 :     return LOCKACQUIRE_OK;
    1170             : }
    1171             : 
    1172             : /*
    1173             :  * Find or create LOCK and PROCLOCK objects as needed for a new lock
    1174             :  * request.
    1175             :  *
    1176             :  * Returns the PROCLOCK object, or NULL if we failed to create the objects
    1177             :  * for lack of shared memory.
    1178             :  *
    1179             :  * The appropriate partition lock must be held at entry, and will be
    1180             :  * held at exit.
    1181             :  */
    1182             : static PROCLOCK *
    1183     3560612 : SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
    1184             :                  const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
    1185             : {
    1186             :     LOCK       *lock;
    1187             :     PROCLOCK   *proclock;
    1188             :     PROCLOCKTAG proclocktag;
    1189             :     uint32      proclock_hashcode;
    1190             :     bool        found;
    1191             : 
    1192             :     /*
    1193             :      * Find or create a lock with this tag.
    1194             :      */
    1195     3560612 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    1196             :                                                 locktag,
    1197             :                                                 hashcode,
    1198             :                                                 HASH_ENTER_NULL,
    1199             :                                                 &found);
    1200     3560612 :     if (!lock)
    1201           0 :         return NULL;
    1202             : 
    1203             :     /*
    1204             :      * if it's a new lock object, initialize it
    1205             :      */
    1206     3560612 :     if (!found)
    1207             :     {
    1208     3185470 :         lock->grantMask = 0;
    1209     3185470 :         lock->waitMask = 0;
    1210     3185470 :         dlist_init(&lock->procLocks);
    1211     3185470 :         dclist_init(&lock->waitProcs);
    1212     3185470 :         lock->nRequested = 0;
    1213     3185470 :         lock->nGranted = 0;
    1214    19112820 :         MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
    1215     3185470 :         MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
    1216             :         LOCK_PRINT("LockAcquire: new", lock, lockmode);
    1217             :     }
    1218             :     else
    1219             :     {
    1220             :         LOCK_PRINT("LockAcquire: found", lock, lockmode);
    1221             :         Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
    1222             :         Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
    1223             :         Assert(lock->nGranted <= lock->nRequested);
    1224             :     }
    1225             : 
    1226             :     /*
    1227             :      * Create the hash key for the proclock table.
    1228             :      */
    1229     3560612 :     proclocktag.myLock = lock;
    1230     3560612 :     proclocktag.myProc = proc;
    1231             : 
    1232     3560612 :     proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
    1233             : 
    1234             :     /*
    1235             :      * Find or create a proclock entry with this tag
    1236             :      */
    1237     3560612 :     proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
    1238             :                                                         &proclocktag,
    1239             :                                                         proclock_hashcode,
    1240             :                                                         HASH_ENTER_NULL,
    1241             :                                                         &found);
    1242     3560612 :     if (!proclock)
    1243             :     {
    1244             :         /* Oops, not enough shmem for the proclock */
    1245           0 :         if (lock->nRequested == 0)
    1246             :         {
    1247             :             /*
    1248             :              * There are no other requestors of this lock, so garbage-collect
    1249             :              * the lock object.  We *must* do this to avoid a permanent leak
    1250             :              * of shared memory, because there won't be anything to cause
    1251             :              * anyone to release the lock object later.
    1252             :              */
    1253             :             Assert(dlist_is_empty(&(lock->procLocks)));
    1254           0 :             if (!hash_search_with_hash_value(LockMethodLockHash,
    1255           0 :                                              &(lock->tag),
    1256             :                                              hashcode,
    1257             :                                              HASH_REMOVE,
    1258             :                                              NULL))
    1259           0 :                 elog(PANIC, "lock table corrupted");
    1260             :         }
    1261           0 :         return NULL;
    1262             :     }
    1263             : 
    1264             :     /*
    1265             :      * If new, initialize the new entry
    1266             :      */
    1267     3560612 :     if (!found)
    1268             :     {
    1269     3211492 :         uint32      partition = LockHashPartition(hashcode);
    1270             : 
    1271             :         /*
    1272             :          * It might seem unsafe to access proclock->groupLeader without a
    1273             :          * lock, but it's not really.  Either we are initializing a proclock
    1274             :          * on our own behalf, in which case our group leader isn't changing
    1275             :          * because the group leader for a process can only ever be changed by
    1276             :          * the process itself; or else we are transferring a fast-path lock to
    1277             :          * the main lock table, in which case that process can't change it's
    1278             :          * lock group leader without first releasing all of its locks (and in
    1279             :          * particular the one we are currently transferring).
    1280             :          */
    1281     6422984 :         proclock->groupLeader = proc->lockGroupLeader != NULL ?
    1282     3211492 :             proc->lockGroupLeader : proc;
    1283     3211492 :         proclock->holdMask = 0;
    1284     3211492 :         proclock->releaseMask = 0;
    1285             :         /* Add proclock to appropriate lists */
    1286     3211492 :         dlist_push_tail(&lock->procLocks, &proclock->lockLink);
    1287     3211492 :         dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
    1288             :         PROCLOCK_PRINT("LockAcquire: new", proclock);
    1289             :     }
    1290             :     else
    1291             :     {
    1292             :         PROCLOCK_PRINT("LockAcquire: found", proclock);
    1293             :         Assert((proclock->holdMask & ~lock->grantMask) == 0);
    1294             : 
    1295             : #ifdef CHECK_DEADLOCK_RISK
    1296             : 
    1297             :         /*
    1298             :          * Issue warning if we already hold a lower-level lock on this object
    1299             :          * and do not hold a lock of the requested level or higher. This
    1300             :          * indicates a deadlock-prone coding practice (eg, we'd have a
    1301             :          * deadlock if another backend were following the same code path at
    1302             :          * about the same time).
    1303             :          *
    1304             :          * This is not enabled by default, because it may generate log entries
    1305             :          * about user-level coding practices that are in fact safe in context.
    1306             :          * It can be enabled to help find system-level problems.
    1307             :          *
    1308             :          * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
    1309             :          * better to use a table.  For now, though, this works.
    1310             :          */
    1311             :         {
    1312             :             int         i;
    1313             : 
    1314             :             for (i = lockMethodTable->numLockModes; i > 0; i--)
    1315             :             {
    1316             :                 if (proclock->holdMask & LOCKBIT_ON(i))
    1317             :                 {
    1318             :                     if (i >= (int) lockmode)
    1319             :                         break;  /* safe: we have a lock >= req level */
    1320             :                     elog(LOG, "deadlock risk: raising lock level"
    1321             :                          " from %s to %s on object %u/%u/%u",
    1322             :                          lockMethodTable->lockModeNames[i],
    1323             :                          lockMethodTable->lockModeNames[lockmode],
    1324             :                          lock->tag.locktag_field1, lock->tag.locktag_field2,
    1325             :                          lock->tag.locktag_field3);
    1326             :                     break;
    1327             :                 }
    1328             :             }
    1329             :         }
    1330             : #endif                          /* CHECK_DEADLOCK_RISK */
    1331             :     }
    1332             : 
    1333             :     /*
    1334             :      * lock->nRequested and lock->requested[] count the total number of
    1335             :      * requests, whether granted or waiting, so increment those immediately.
    1336             :      * The other counts don't increment till we get the lock.
    1337             :      */
    1338     3560612 :     lock->nRequested++;
    1339     3560612 :     lock->requested[lockmode]++;
    1340             :     Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
    1341             : 
    1342             :     /*
    1343             :      * We shouldn't already hold the desired lock; else locallock table is
    1344             :      * broken.
    1345             :      */
    1346     3560612 :     if (proclock->holdMask & LOCKBIT_ON(lockmode))
    1347           0 :         elog(ERROR, "lock %s on object %u/%u/%u is already held",
    1348             :              lockMethodTable->lockModeNames[lockmode],
    1349             :              lock->tag.locktag_field1, lock->tag.locktag_field2,
    1350             :              lock->tag.locktag_field3);
    1351             : 
    1352     3560612 :     return proclock;
    1353             : }
    1354             : 
    1355             : /*
    1356             :  * Check and set/reset the flag that we hold the relation extension lock.
    1357             :  *
    1358             :  * It is callers responsibility that this function is called after
    1359             :  * acquiring/releasing the relation extension lock.
    1360             :  *
    1361             :  * Pass acquired as true if lock is acquired, false otherwise.
    1362             :  */
    1363             : static inline void
    1364    53720118 : CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
    1365             : {
    1366             : #ifdef USE_ASSERT_CHECKING
    1367             :     if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
    1368             :         IsRelationExtensionLockHeld = acquired;
    1369             : #endif
    1370    53720118 : }
    1371             : 
    1372             : /*
    1373             :  * Subroutine to free a locallock entry
    1374             :  */
    1375             : static void
    1376    26452318 : RemoveLocalLock(LOCALLOCK *locallock)
    1377             : {
    1378             :     int         i;
    1379             : 
    1380    26586638 :     for (i = locallock->numLockOwners - 1; i >= 0; i--)
    1381             :     {
    1382      134320 :         if (locallock->lockOwners[i].owner != NULL)
    1383      134246 :             ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
    1384             :     }
    1385    26452318 :     locallock->numLockOwners = 0;
    1386    26452318 :     if (locallock->lockOwners != NULL)
    1387    26452318 :         pfree(locallock->lockOwners);
    1388    26452318 :     locallock->lockOwners = NULL;
    1389             : 
    1390    26452318 :     if (locallock->holdsStrongLockCount)
    1391             :     {
    1392             :         uint32      fasthashcode;
    1393             : 
    1394      327178 :         fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
    1395             : 
    1396      327178 :         SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    1397             :         Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
    1398      327178 :         FastPathStrongRelationLocks->count[fasthashcode]--;
    1399      327178 :         locallock->holdsStrongLockCount = false;
    1400      327178 :         SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    1401             :     }
    1402             : 
    1403    26452318 :     if (!hash_search(LockMethodLocalHash,
    1404    26452318 :                      &(locallock->tag),
    1405             :                      HASH_REMOVE, NULL))
    1406           0 :         elog(WARNING, "locallock table corrupted");
    1407             : 
    1408             :     /*
    1409             :      * Indicate that the lock is released for certain types of locks
    1410             :      */
    1411    26452318 :     CheckAndSetLockHeld(locallock, false);
    1412    26452318 : }
    1413             : 
    1414             : /*
    1415             :  * LockCheckConflicts -- test whether requested lock conflicts
    1416             :  *      with those already granted
    1417             :  *
    1418             :  * Returns true if conflict, false if no conflict.
    1419             :  *
    1420             :  * NOTES:
    1421             :  *      Here's what makes this complicated: one process's locks don't
    1422             :  * conflict with one another, no matter what purpose they are held for
    1423             :  * (eg, session and transaction locks do not conflict).  Nor do the locks
    1424             :  * of one process in a lock group conflict with those of another process in
    1425             :  * the same group.  So, we must subtract off these locks when determining
    1426             :  * whether the requested new lock conflicts with those already held.
    1427             :  */
    1428             : bool
    1429     3559696 : LockCheckConflicts(LockMethod lockMethodTable,
    1430             :                    LOCKMODE lockmode,
    1431             :                    LOCK *lock,
    1432             :                    PROCLOCK *proclock)
    1433             : {
    1434     3559696 :     int         numLockModes = lockMethodTable->numLockModes;
    1435             :     LOCKMASK    myLocks;
    1436     3559696 :     int         conflictMask = lockMethodTable->conflictTab[lockmode];
    1437             :     int         conflictsRemaining[MAX_LOCKMODES];
    1438     3559696 :     int         totalConflictsRemaining = 0;
    1439             :     dlist_iter  proclock_iter;
    1440             :     int         i;
    1441             : 
    1442             :     /*
    1443             :      * first check for global conflicts: If no locks conflict with my request,
    1444             :      * then I get the lock.
    1445             :      *
    1446             :      * Checking for conflict: lock->grantMask represents the types of
    1447             :      * currently held locks.  conflictTable[lockmode] has a bit set for each
    1448             :      * type of lock that conflicts with request.   Bitwise compare tells if
    1449             :      * there is a conflict.
    1450             :      */
    1451     3559696 :     if (!(conflictMask & lock->grantMask))
    1452             :     {
    1453             :         PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
    1454     3396668 :         return false;
    1455             :     }
    1456             : 
    1457             :     /*
    1458             :      * Rats.  Something conflicts.  But it could still be my own lock, or a
    1459             :      * lock held by another member of my locking group.  First, figure out how
    1460             :      * many conflicts remain after subtracting out any locks I hold myself.
    1461             :      */
    1462      163028 :     myLocks = proclock->holdMask;
    1463     1467252 :     for (i = 1; i <= numLockModes; i++)
    1464             :     {
    1465     1304224 :         if ((conflictMask & LOCKBIT_ON(i)) == 0)
    1466             :         {
    1467      694692 :             conflictsRemaining[i] = 0;
    1468      694692 :             continue;
    1469             :         }
    1470      609532 :         conflictsRemaining[i] = lock->granted[i];
    1471      609532 :         if (myLocks & LOCKBIT_ON(i))
    1472      176444 :             --conflictsRemaining[i];
    1473      609532 :         totalConflictsRemaining += conflictsRemaining[i];
    1474             :     }
    1475             : 
    1476             :     /* If no conflicts remain, we get the lock. */
    1477      163028 :     if (totalConflictsRemaining == 0)
    1478             :     {
    1479             :         PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
    1480      158360 :         return false;
    1481             :     }
    1482             : 
    1483             :     /* If no group locking, it's definitely a conflict. */
    1484        4668 :     if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
    1485             :     {
    1486             :         Assert(proclock->tag.myProc == MyProc);
    1487             :         PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
    1488             :                        proclock);
    1489        3362 :         return true;
    1490             :     }
    1491             : 
    1492             :     /*
    1493             :      * The relation extension lock conflict even between the group members.
    1494             :      */
    1495        1306 :     if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND)
    1496             :     {
    1497             :         PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
    1498             :                        proclock);
    1499           2 :         return true;
    1500             :     }
    1501             : 
    1502             :     /*
    1503             :      * Locks held in conflicting modes by members of our own lock group are
    1504             :      * not real conflicts; we can subtract those out and see if we still have
    1505             :      * a conflict.  This is O(N) in the number of processes holding or
    1506             :      * awaiting locks on this object.  We could improve that by making the
    1507             :      * shared memory state more complex (and larger) but it doesn't seem worth
    1508             :      * it.
    1509             :      */
    1510        2444 :     dlist_foreach(proclock_iter, &lock->procLocks)
    1511             :     {
    1512        2084 :         PROCLOCK   *otherproclock =
    1513        2084 :             dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
    1514             : 
    1515        2084 :         if (proclock != otherproclock &&
    1516        1724 :             proclock->groupLeader == otherproclock->groupLeader &&
    1517         948 :             (otherproclock->holdMask & conflictMask) != 0)
    1518             :         {
    1519         944 :             int         intersectMask = otherproclock->holdMask & conflictMask;
    1520             : 
    1521        8496 :             for (i = 1; i <= numLockModes; i++)
    1522             :             {
    1523        7552 :                 if ((intersectMask & LOCKBIT_ON(i)) != 0)
    1524             :                 {
    1525         964 :                     if (conflictsRemaining[i] <= 0)
    1526           0 :                         elog(PANIC, "proclocks held do not match lock");
    1527         964 :                     conflictsRemaining[i]--;
    1528         964 :                     totalConflictsRemaining--;
    1529             :                 }
    1530             :             }
    1531             : 
    1532         944 :             if (totalConflictsRemaining == 0)
    1533             :             {
    1534             :                 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
    1535             :                                proclock);
    1536         944 :                 return false;
    1537             :             }
    1538             :         }
    1539             :     }
    1540             : 
    1541             :     /* Nope, it's a real conflict. */
    1542             :     PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
    1543         360 :     return true;
    1544             : }
    1545             : 
    1546             : /*
    1547             :  * GrantLock -- update the lock and proclock data structures to show
    1548             :  *      the lock request has been granted.
    1549             :  *
    1550             :  * NOTE: if proc was blocked, it also needs to be removed from the wait list
    1551             :  * and have its waitLock/waitProcLock fields cleared.  That's not done here.
    1552             :  *
    1553             :  * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
    1554             :  * table entry; but since we may be awaking some other process, we can't do
    1555             :  * that here; it's done by GrantLockLocal, instead.
    1556             :  */
    1557             : void
    1558     3559406 : GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
    1559             : {
    1560     3559406 :     lock->nGranted++;
    1561     3559406 :     lock->granted[lockmode]++;
    1562     3559406 :     lock->grantMask |= LOCKBIT_ON(lockmode);
    1563     3559406 :     if (lock->granted[lockmode] == lock->requested[lockmode])
    1564     3558900 :         lock->waitMask &= LOCKBIT_OFF(lockmode);
    1565     3559406 :     proclock->holdMask |= LOCKBIT_ON(lockmode);
    1566             :     LOCK_PRINT("GrantLock", lock, lockmode);
    1567             :     Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
    1568             :     Assert(lock->nGranted <= lock->nRequested);
    1569     3559406 : }
    1570             : 
    1571             : /*
    1572             :  * UnGrantLock -- opposite of GrantLock.
    1573             :  *
    1574             :  * Updates the lock and proclock data structures to show that the lock
    1575             :  * is no longer held nor requested by the current holder.
    1576             :  *
    1577             :  * Returns true if there were any waiters waiting on the lock that
    1578             :  * should now be woken up with ProcLockWakeup.
    1579             :  */
    1580             : static bool
    1581     3559244 : UnGrantLock(LOCK *lock, LOCKMODE lockmode,
    1582             :             PROCLOCK *proclock, LockMethod lockMethodTable)
    1583             : {
    1584     3559244 :     bool        wakeupNeeded = false;
    1585             : 
    1586             :     Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
    1587             :     Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
    1588             :     Assert(lock->nGranted <= lock->nRequested);
    1589             : 
    1590             :     /*
    1591             :      * fix the general lock stats
    1592             :      */
    1593     3559244 :     lock->nRequested--;
    1594     3559244 :     lock->requested[lockmode]--;
    1595     3559244 :     lock->nGranted--;
    1596     3559244 :     lock->granted[lockmode]--;
    1597             : 
    1598     3559244 :     if (lock->granted[lockmode] == 0)
    1599             :     {
    1600             :         /* change the conflict mask.  No more of this lock type. */
    1601     3541116 :         lock->grantMask &= LOCKBIT_OFF(lockmode);
    1602             :     }
    1603             : 
    1604             :     LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
    1605             : 
    1606             :     /*
    1607             :      * We need only run ProcLockWakeup if the released lock conflicts with at
    1608             :      * least one of the lock types requested by waiter(s).  Otherwise whatever
    1609             :      * conflict made them wait must still exist.  NOTE: before MVCC, we could
    1610             :      * skip wakeup if lock->granted[lockmode] was still positive. But that's
    1611             :      * not true anymore, because the remaining granted locks might belong to
    1612             :      * some waiter, who could now be awakened because he doesn't conflict with
    1613             :      * his own locks.
    1614             :      */
    1615     3559244 :     if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
    1616        2290 :         wakeupNeeded = true;
    1617             : 
    1618             :     /*
    1619             :      * Now fix the per-proclock state.
    1620             :      */
    1621     3559244 :     proclock->holdMask &= LOCKBIT_OFF(lockmode);
    1622             :     PROCLOCK_PRINT("UnGrantLock: updated", proclock);
    1623             : 
    1624     3559244 :     return wakeupNeeded;
    1625             : }
    1626             : 
    1627             : /*
    1628             :  * CleanUpLock -- clean up after releasing a lock.  We garbage-collect the
    1629             :  * proclock and lock objects if possible, and call ProcLockWakeup if there
    1630             :  * are remaining requests and the caller says it's OK.  (Normally, this
    1631             :  * should be called after UnGrantLock, and wakeupNeeded is the result from
    1632             :  * UnGrantLock.)
    1633             :  *
    1634             :  * The appropriate partition lock must be held at entry, and will be
    1635             :  * held at exit.
    1636             :  */
    1637             : static void
    1638     3496988 : CleanUpLock(LOCK *lock, PROCLOCK *proclock,
    1639             :             LockMethod lockMethodTable, uint32 hashcode,
    1640             :             bool wakeupNeeded)
    1641             : {
    1642             :     /*
    1643             :      * If this was my last hold on this lock, delete my entry in the proclock
    1644             :      * table.
    1645             :      */
    1646     3496988 :     if (proclock->holdMask == 0)
    1647             :     {
    1648             :         uint32      proclock_hashcode;
    1649             : 
    1650             :         PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
    1651     3210618 :         dlist_delete(&proclock->lockLink);
    1652     3210618 :         dlist_delete(&proclock->procLink);
    1653     3210618 :         proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
    1654     3210618 :         if (!hash_search_with_hash_value(LockMethodProcLockHash,
    1655     3210618 :                                          &(proclock->tag),
    1656             :                                          proclock_hashcode,
    1657             :                                          HASH_REMOVE,
    1658             :                                          NULL))
    1659           0 :             elog(PANIC, "proclock table corrupted");
    1660             :     }
    1661             : 
    1662     3496988 :     if (lock->nRequested == 0)
    1663             :     {
    1664             :         /*
    1665             :          * The caller just released the last lock, so garbage-collect the lock
    1666             :          * object.
    1667             :          */
    1668             :         LOCK_PRINT("CleanUpLock: deleting", lock, 0);
    1669             :         Assert(dlist_is_empty(&lock->procLocks));
    1670     3185484 :         if (!hash_search_with_hash_value(LockMethodLockHash,
    1671     3185484 :                                          &(lock->tag),
    1672             :                                          hashcode,
    1673             :                                          HASH_REMOVE,
    1674             :                                          NULL))
    1675           0 :             elog(PANIC, "lock table corrupted");
    1676             :     }
    1677      311504 :     else if (wakeupNeeded)
    1678             :     {
    1679             :         /* There are waiters on this lock, so wake them up. */
    1680        2376 :         ProcLockWakeup(lockMethodTable, lock);
    1681             :     }
    1682     3496988 : }
    1683             : 
    1684             : /*
    1685             :  * GrantLockLocal -- update the locallock data structures to show
    1686             :  *      the lock request has been granted.
    1687             :  *
    1688             :  * We expect that LockAcquire made sure there is room to add a new
    1689             :  * ResourceOwner entry.
    1690             :  */
    1691             : static void
    1692    29640560 : GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
    1693             : {
    1694    29640560 :     LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    1695             :     int         i;
    1696             : 
    1697             :     Assert(locallock->numLockOwners < locallock->maxLockOwners);
    1698             :     /* Count the total */
    1699    29640560 :     locallock->nLocks++;
    1700             :     /* Count the per-owner lock */
    1701    30786762 :     for (i = 0; i < locallock->numLockOwners; i++)
    1702             :     {
    1703     3518962 :         if (lockOwners[i].owner == owner)
    1704             :         {
    1705     2372760 :             lockOwners[i].nLocks++;
    1706     2372760 :             return;
    1707             :         }
    1708             :     }
    1709    27267800 :     lockOwners[i].owner = owner;
    1710    27267800 :     lockOwners[i].nLocks = 1;
    1711    27267800 :     locallock->numLockOwners++;
    1712    27267800 :     if (owner != NULL)
    1713    27200608 :         ResourceOwnerRememberLock(owner, locallock);
    1714             : 
    1715             :     /* Indicate that the lock is acquired for certain types of locks. */
    1716    27267800 :     CheckAndSetLockHeld(locallock, true);
    1717             : }
    1718             : 
    1719             : /*
    1720             :  * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
    1721             :  * and arrange for error cleanup if it fails
    1722             :  */
    1723             : static void
    1724      327748 : BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
    1725             : {
    1726             :     Assert(StrongLockInProgress == NULL);
    1727             :     Assert(locallock->holdsStrongLockCount == false);
    1728             : 
    1729             :     /*
    1730             :      * Adding to a memory location is not atomic, so we take a spinlock to
    1731             :      * ensure we don't collide with someone else trying to bump the count at
    1732             :      * the same time.
    1733             :      *
    1734             :      * XXX: It might be worth considering using an atomic fetch-and-add
    1735             :      * instruction here, on architectures where that is supported.
    1736             :      */
    1737             : 
    1738      327748 :     SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    1739      327748 :     FastPathStrongRelationLocks->count[fasthashcode]++;
    1740      327748 :     locallock->holdsStrongLockCount = true;
    1741      327748 :     StrongLockInProgress = locallock;
    1742      327748 :     SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    1743      327748 : }
    1744             : 
    1745             : /*
    1746             :  * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
    1747             :  * acquisition once it's no longer needed
    1748             :  */
    1749             : static void
    1750     3555962 : FinishStrongLockAcquire(void)
    1751             : {
    1752     3555962 :     StrongLockInProgress = NULL;
    1753     3555962 : }
    1754             : 
    1755             : /*
    1756             :  * AbortStrongLockAcquire - undo strong lock state changes performed by
    1757             :  * BeginStrongLockAcquire.
    1758             :  */
    1759             : void
    1760      617540 : AbortStrongLockAcquire(void)
    1761             : {
    1762             :     uint32      fasthashcode;
    1763      617540 :     LOCALLOCK  *locallock = StrongLockInProgress;
    1764             : 
    1765      617540 :     if (locallock == NULL)
    1766      617116 :         return;
    1767             : 
    1768         424 :     fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
    1769             :     Assert(locallock->holdsStrongLockCount == true);
    1770         424 :     SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    1771             :     Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
    1772         424 :     FastPathStrongRelationLocks->count[fasthashcode]--;
    1773         424 :     locallock->holdsStrongLockCount = false;
    1774         424 :     StrongLockInProgress = NULL;
    1775         424 :     SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    1776             : }
    1777             : 
    1778             : /*
    1779             :  * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
    1780             :  *      WaitOnLock on.
    1781             :  *
    1782             :  * proc.c needs this for the case where we are booted off the lock by
    1783             :  * timeout, but discover that someone granted us the lock anyway.
    1784             :  *
    1785             :  * We could just export GrantLockLocal, but that would require including
    1786             :  * resowner.h in lock.h, which creates circularity.
    1787             :  */
    1788             : void
    1789        2328 : GrantAwaitedLock(void)
    1790             : {
    1791        2328 :     GrantLockLocal(awaitedLock, awaitedOwner);
    1792        2328 : }
    1793             : 
    1794             : /*
    1795             :  * MarkLockClear -- mark an acquired lock as "clear"
    1796             :  *
    1797             :  * This means that we know we have absorbed all sinval messages that other
    1798             :  * sessions generated before we acquired this lock, and so we can confidently
    1799             :  * assume we know about any catalog changes protected by this lock.
    1800             :  */
    1801             : void
    1802    25676512 : MarkLockClear(LOCALLOCK *locallock)
    1803             : {
    1804             :     Assert(locallock->nLocks > 0);
    1805    25676512 :     locallock->lockCleared = true;
    1806    25676512 : }
    1807             : 
    1808             : /*
    1809             :  * WaitOnLock -- wait to acquire a lock
    1810             :  *
    1811             :  * Caller must have set MyProc->heldLocks to reflect locks already held
    1812             :  * on the lockable object by this process.
    1813             :  *
    1814             :  * The appropriate partition lock must be held at entry, and will still be
    1815             :  * held at exit.
    1816             :  */
    1817             : static void
    1818        3718 : WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner, bool dontWait)
    1819             : {
    1820        3718 :     LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
    1821        3718 :     LockMethod  lockMethodTable = LockMethods[lockmethodid];
    1822             : 
    1823             :     LOCK_PRINT("WaitOnLock: sleeping on lock",
    1824             :                locallock->lock, locallock->tag.mode);
    1825             : 
    1826             :     /* adjust the process title to indicate that it's waiting */
    1827        3718 :     set_ps_display_suffix("waiting");
    1828             : 
    1829        3718 :     awaitedLock = locallock;
    1830        3718 :     awaitedOwner = owner;
    1831             : 
    1832             :     /*
    1833             :      * NOTE: Think not to put any shared-state cleanup after the call to
    1834             :      * ProcSleep, in either the normal or failure path.  The lock state must
    1835             :      * be fully set by the lock grantor, or by CheckDeadLock if we give up
    1836             :      * waiting for the lock.  This is necessary because of the possibility
    1837             :      * that a cancel/die interrupt will interrupt ProcSleep after someone else
    1838             :      * grants us the lock, but before we've noticed it. Hence, after granting,
    1839             :      * the locktable state must fully reflect the fact that we own the lock;
    1840             :      * we can't do additional work on return.
    1841             :      *
    1842             :      * We can and do use a PG_TRY block to try to clean up after failure, but
    1843             :      * this still has a major limitation: elog(FATAL) can occur while waiting
    1844             :      * (eg, a "die" interrupt), and then control won't come back here. So all
    1845             :      * cleanup of essential state should happen in LockErrorCleanup, not here.
    1846             :      * We can use PG_TRY to clear the "waiting" status flags, since doing that
    1847             :      * is unimportant if the process exits.
    1848             :      */
    1849        3718 :     PG_TRY();
    1850             :     {
    1851             :         /*
    1852             :          * If dontWait = true, we handle success and failure in the same way
    1853             :          * here. The caller will be able to sort out what has happened.
    1854             :          */
    1855        3718 :         if (ProcSleep(locallock, lockMethodTable, dontWait) != PROC_WAIT_STATUS_OK
    1856        1310 :             && !dontWait)
    1857             :         {
    1858             : 
    1859             :             /*
    1860             :              * We failed as a result of a deadlock, see CheckDeadLock(). Quit
    1861             :              * now.
    1862             :              */
    1863          10 :             awaitedLock = NULL;
    1864             :             LOCK_PRINT("WaitOnLock: aborting on lock",
    1865             :                        locallock->lock, locallock->tag.mode);
    1866          10 :             LWLockRelease(LockHashPartitionLock(locallock->hashcode));
    1867             : 
    1868             :             /*
    1869             :              * Now that we aren't holding the partition lock, we can give an
    1870             :              * error report including details about the detected deadlock.
    1871             :              */
    1872          10 :             DeadLockReport();
    1873             :             /* not reached */
    1874             :         }
    1875             :     }
    1876          84 :     PG_CATCH();
    1877             :     {
    1878             :         /* In this path, awaitedLock remains set until LockErrorCleanup */
    1879             : 
    1880             :         /* reset ps display to remove the suffix */
    1881          84 :         set_ps_display_remove_suffix();
    1882             : 
    1883             :         /* and propagate the error */
    1884          84 :         PG_RE_THROW();
    1885             :     }
    1886        3626 :     PG_END_TRY();
    1887             : 
    1888        3626 :     awaitedLock = NULL;
    1889             : 
    1890             :     /* reset ps display to remove the suffix */
    1891        3626 :     set_ps_display_remove_suffix();
    1892             : 
    1893             :     LOCK_PRINT("WaitOnLock: wakeup on lock",
    1894             :                locallock->lock, locallock->tag.mode);
    1895        3626 : }
    1896             : 
    1897             : /*
    1898             :  * Remove a proc from the wait-queue it is on (caller must know it is on one).
    1899             :  * This is only used when the proc has failed to get the lock, so we set its
    1900             :  * waitStatus to PROC_WAIT_STATUS_ERROR.
    1901             :  *
    1902             :  * Appropriate partition lock must be held by caller.  Also, caller is
    1903             :  * responsible for signaling the proc if needed.
    1904             :  *
    1905             :  * NB: this does not clean up any locallock object that may exist for the lock.
    1906             :  */
    1907             : void
    1908          90 : RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
    1909             : {
    1910          90 :     LOCK       *waitLock = proc->waitLock;
    1911          90 :     PROCLOCK   *proclock = proc->waitProcLock;
    1912          90 :     LOCKMODE    lockmode = proc->waitLockMode;
    1913          90 :     LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
    1914             : 
    1915             :     /* Make sure proc is waiting */
    1916             :     Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
    1917             :     Assert(proc->links.next != NULL);
    1918             :     Assert(waitLock);
    1919             :     Assert(!dclist_is_empty(&waitLock->waitProcs));
    1920             :     Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
    1921             : 
    1922             :     /* Remove proc from lock's wait queue */
    1923          90 :     dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
    1924             : 
    1925             :     /* Undo increments of request counts by waiting process */
    1926             :     Assert(waitLock->nRequested > 0);
    1927             :     Assert(waitLock->nRequested > proc->waitLock->nGranted);
    1928          90 :     waitLock->nRequested--;
    1929             :     Assert(waitLock->requested[lockmode] > 0);
    1930          90 :     waitLock->requested[lockmode]--;
    1931             :     /* don't forget to clear waitMask bit if appropriate */
    1932          90 :     if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
    1933          88 :         waitLock->waitMask &= LOCKBIT_OFF(lockmode);
    1934             : 
    1935             :     /* Clean up the proc's own state, and pass it the ok/fail signal */
    1936          90 :     proc->waitLock = NULL;
    1937          90 :     proc->waitProcLock = NULL;
    1938          90 :     proc->waitStatus = PROC_WAIT_STATUS_ERROR;
    1939             : 
    1940             :     /*
    1941             :      * Delete the proclock immediately if it represents no already-held locks.
    1942             :      * (This must happen now because if the owner of the lock decides to
    1943             :      * release it, and the requested/granted counts then go to zero,
    1944             :      * LockRelease expects there to be no remaining proclocks.) Then see if
    1945             :      * any other waiters for the lock can be woken up now.
    1946             :      */
    1947          90 :     CleanUpLock(waitLock, proclock,
    1948             :                 LockMethods[lockmethodid], hashcode,
    1949             :                 true);
    1950          90 : }
    1951             : 
    1952             : /*
    1953             :  * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
    1954             :  *      Release a session lock if 'sessionLock' is true, else release a
    1955             :  *      regular transaction lock.
    1956             :  *
    1957             :  * Side Effects: find any waiting processes that are now wakable,
    1958             :  *      grant them their requested locks and awaken them.
    1959             :  *      (We have to grant the lock here to avoid a race between
    1960             :  *      the waking process and any new process to
    1961             :  *      come along and request the lock.)
    1962             :  */
    1963             : bool
    1964    26181356 : LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
    1965             : {
    1966    26181356 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
    1967             :     LockMethod  lockMethodTable;
    1968             :     LOCALLOCKTAG localtag;
    1969             :     LOCALLOCK  *locallock;
    1970             :     LOCK       *lock;
    1971             :     PROCLOCK   *proclock;
    1972             :     LWLock     *partitionLock;
    1973             :     bool        wakeupNeeded;
    1974             : 
    1975    26181356 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    1976           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    1977    26181356 :     lockMethodTable = LockMethods[lockmethodid];
    1978    26181356 :     if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
    1979           0 :         elog(ERROR, "unrecognized lock mode: %d", lockmode);
    1980             : 
    1981             : #ifdef LOCK_DEBUG
    1982             :     if (LOCK_DEBUG_ENABLED(locktag))
    1983             :         elog(LOG, "LockRelease: lock [%u,%u] %s",
    1984             :              locktag->locktag_field1, locktag->locktag_field2,
    1985             :              lockMethodTable->lockModeNames[lockmode]);
    1986             : #endif
    1987             : 
    1988             :     /*
    1989             :      * Find the LOCALLOCK entry for this lock and lockmode
    1990             :      */
    1991    26181356 :     MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
    1992    26181356 :     localtag.lock = *locktag;
    1993    26181356 :     localtag.mode = lockmode;
    1994             : 
    1995    26181356 :     locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
    1996             :                                           &localtag,
    1997             :                                           HASH_FIND, NULL);
    1998             : 
    1999             :     /*
    2000             :      * let the caller print its own error message, too. Do not ereport(ERROR).
    2001             :      */
    2002    26181356 :     if (!locallock || locallock->nLocks <= 0)
    2003             :     {
    2004          26 :         elog(WARNING, "you don't own a lock of type %s",
    2005             :              lockMethodTable->lockModeNames[lockmode]);
    2006          26 :         return false;
    2007             :     }
    2008             : 
    2009             :     /*
    2010             :      * Decrease the count for the resource owner.
    2011             :      */
    2012             :     {
    2013    26181330 :         LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    2014             :         ResourceOwner owner;
    2015             :         int         i;
    2016             : 
    2017             :         /* Identify owner for lock */
    2018    26181330 :         if (sessionLock)
    2019       67162 :             owner = NULL;
    2020             :         else
    2021    26114168 :             owner = CurrentResourceOwner;
    2022             : 
    2023    26183094 :         for (i = locallock->numLockOwners - 1; i >= 0; i--)
    2024             :         {
    2025    26183070 :             if (lockOwners[i].owner == owner)
    2026             :             {
    2027             :                 Assert(lockOwners[i].nLocks > 0);
    2028    26181306 :                 if (--lockOwners[i].nLocks == 0)
    2029             :                 {
    2030    25356940 :                     if (owner != NULL)
    2031    25289822 :                         ResourceOwnerForgetLock(owner, locallock);
    2032             :                     /* compact out unused slot */
    2033    25356940 :                     locallock->numLockOwners--;
    2034    25356940 :                     if (i < locallock->numLockOwners)
    2035          96 :                         lockOwners[i] = lockOwners[locallock->numLockOwners];
    2036             :                 }
    2037    26181306 :                 break;
    2038             :             }
    2039             :         }
    2040    26181330 :         if (i < 0)
    2041             :         {
    2042             :             /* don't release a lock belonging to another owner */
    2043          24 :             elog(WARNING, "you don't own a lock of type %s",
    2044             :                  lockMethodTable->lockModeNames[lockmode]);
    2045          24 :             return false;
    2046             :         }
    2047             :     }
    2048             : 
    2049             :     /*
    2050             :      * Decrease the total local count.  If we're still holding the lock, we're
    2051             :      * done.
    2052             :      */
    2053    26181306 :     locallock->nLocks--;
    2054             : 
    2055    26181306 :     if (locallock->nLocks > 0)
    2056     1397116 :         return true;
    2057             : 
    2058             :     /*
    2059             :      * At this point we can no longer suppose we are clear of invalidation
    2060             :      * messages related to this lock.  Although we'll delete the LOCALLOCK
    2061             :      * object before any intentional return from this routine, it seems worth
    2062             :      * the trouble to explicitly reset lockCleared right now, just in case
    2063             :      * some error prevents us from deleting the LOCALLOCK.
    2064             :      */
    2065    24784190 :     locallock->lockCleared = false;
    2066             : 
    2067             :     /* Attempt fast release of any lock eligible for the fast path. */
    2068    24784190 :     if (EligibleForRelationFastPath(locktag, lockmode) &&
    2069    23033824 :         FastPathLocalUseCount > 0)
    2070             :     {
    2071             :         bool        released;
    2072             : 
    2073             :         /*
    2074             :          * We might not find the lock here, even if we originally entered it
    2075             :          * here.  Another backend may have moved it to the main table.
    2076             :          */
    2077    22886866 :         LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    2078    22886866 :         released = FastPathUnGrantRelationLock(locktag->locktag_field2,
    2079             :                                                lockmode);
    2080    22886866 :         LWLockRelease(&MyProc->fpInfoLock);
    2081    22886866 :         if (released)
    2082             :         {
    2083    22114814 :             RemoveLocalLock(locallock);
    2084    22114814 :             return true;
    2085             :         }
    2086             :     }
    2087             : 
    2088             :     /*
    2089             :      * Otherwise we've got to mess with the shared lock table.
    2090             :      */
    2091     2669376 :     partitionLock = LockHashPartitionLock(locallock->hashcode);
    2092             : 
    2093     2669376 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    2094             : 
    2095             :     /*
    2096             :      * Normally, we don't need to re-find the lock or proclock, since we kept
    2097             :      * their addresses in the locallock table, and they couldn't have been
    2098             :      * removed while we were holding a lock on them.  But it's possible that
    2099             :      * the lock was taken fast-path and has since been moved to the main hash
    2100             :      * table by another backend, in which case we will need to look up the
    2101             :      * objects here.  We assume the lock field is NULL if so.
    2102             :      */
    2103     2669376 :     lock = locallock->lock;
    2104     2669376 :     if (!lock)
    2105             :     {
    2106             :         PROCLOCKTAG proclocktag;
    2107             : 
    2108             :         Assert(EligibleForRelationFastPath(locktag, lockmode));
    2109           6 :         lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    2110             :                                                     locktag,
    2111             :                                                     locallock->hashcode,
    2112             :                                                     HASH_FIND,
    2113             :                                                     NULL);
    2114           6 :         if (!lock)
    2115           0 :             elog(ERROR, "failed to re-find shared lock object");
    2116           6 :         locallock->lock = lock;
    2117             : 
    2118           6 :         proclocktag.myLock = lock;
    2119           6 :         proclocktag.myProc = MyProc;
    2120           6 :         locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
    2121             :                                                        &proclocktag,
    2122             :                                                        HASH_FIND,
    2123             :                                                        NULL);
    2124           6 :         if (!locallock->proclock)
    2125           0 :             elog(ERROR, "failed to re-find shared proclock object");
    2126             :     }
    2127             :     LOCK_PRINT("LockRelease: found", lock, lockmode);
    2128     2669376 :     proclock = locallock->proclock;
    2129             :     PROCLOCK_PRINT("LockRelease: found", proclock);
    2130             : 
    2131             :     /*
    2132             :      * Double-check that we are actually holding a lock of the type we want to
    2133             :      * release.
    2134             :      */
    2135     2669376 :     if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
    2136             :     {
    2137             :         PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
    2138           0 :         LWLockRelease(partitionLock);
    2139           0 :         elog(WARNING, "you don't own a lock of type %s",
    2140             :              lockMethodTable->lockModeNames[lockmode]);
    2141           0 :         RemoveLocalLock(locallock);
    2142           0 :         return false;
    2143             :     }
    2144             : 
    2145             :     /*
    2146             :      * Do the releasing.  CleanUpLock will waken any now-wakable waiters.
    2147             :      */
    2148     2669376 :     wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
    2149             : 
    2150     2669376 :     CleanUpLock(lock, proclock,
    2151             :                 lockMethodTable, locallock->hashcode,
    2152             :                 wakeupNeeded);
    2153             : 
    2154     2669376 :     LWLockRelease(partitionLock);
    2155             : 
    2156     2669376 :     RemoveLocalLock(locallock);
    2157     2669376 :     return true;
    2158             : }
    2159             : 
    2160             : /*
    2161             :  * LockReleaseAll -- Release all locks of the specified lock method that
    2162             :  *      are held by the current process.
    2163             :  *
    2164             :  * Well, not necessarily *all* locks.  The available behaviors are:
    2165             :  *      allLocks == true: release all locks including session locks.
    2166             :  *      allLocks == false: release all non-session locks.
    2167             :  */
    2168             : void
    2169     1148796 : LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
    2170             : {
    2171             :     HASH_SEQ_STATUS status;
    2172             :     LockMethod  lockMethodTable;
    2173             :     int         i,
    2174             :                 numLockModes;
    2175             :     LOCALLOCK  *locallock;
    2176             :     LOCK       *lock;
    2177             :     int         partition;
    2178     1148796 :     bool        have_fast_path_lwlock = false;
    2179             : 
    2180     1148796 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    2181           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    2182     1148796 :     lockMethodTable = LockMethods[lockmethodid];
    2183             : 
    2184             : #ifdef LOCK_DEBUG
    2185             :     if (*(lockMethodTable->trace_flag))
    2186             :         elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
    2187             : #endif
    2188             : 
    2189             :     /*
    2190             :      * Get rid of our fast-path VXID lock, if appropriate.  Note that this is
    2191             :      * the only way that the lock we hold on our own VXID can ever get
    2192             :      * released: it is always and only released when a toplevel transaction
    2193             :      * ends.
    2194             :      */
    2195     1148796 :     if (lockmethodid == DEFAULT_LOCKMETHOD)
    2196      561844 :         VirtualXactLockTableCleanup();
    2197             : 
    2198     1148796 :     numLockModes = lockMethodTable->numLockModes;
    2199             : 
    2200             :     /*
    2201             :      * First we run through the locallock table and get rid of unwanted
    2202             :      * entries, then we scan the process's proclocks and get rid of those. We
    2203             :      * do this separately because we may have multiple locallock entries
    2204             :      * pointing to the same proclock, and we daren't end up with any dangling
    2205             :      * pointers.  Fast-path locks are cleaned up during the locallock table
    2206             :      * scan, though.
    2207             :      */
    2208     1148796 :     hash_seq_init(&status, LockMethodLocalHash);
    2209             : 
    2210     2903462 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    2211             :     {
    2212             :         /*
    2213             :          * If the LOCALLOCK entry is unused, we must've run out of shared
    2214             :          * memory while trying to set up this lock.  Just forget the local
    2215             :          * entry.
    2216             :          */
    2217     1754666 :         if (locallock->nLocks == 0)
    2218             :         {
    2219          90 :             RemoveLocalLock(locallock);
    2220          90 :             continue;
    2221             :         }
    2222             : 
    2223             :         /* Ignore items that are not of the lockmethod to be removed */
    2224     1754576 :         if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
    2225       45090 :             continue;
    2226             : 
    2227             :         /*
    2228             :          * If we are asked to release all locks, we can just zap the entry.
    2229             :          * Otherwise, must scan to see if there are session locks. We assume
    2230             :          * there is at most one lockOwners entry for session locks.
    2231             :          */
    2232     1709486 :         if (!allLocks)
    2233             :         {
    2234     1578114 :             LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    2235             : 
    2236             :             /* If session lock is above array position 0, move it down to 0 */
    2237     3183544 :             for (i = 0; i < locallock->numLockOwners; i++)
    2238             :             {
    2239     1605430 :                 if (lockOwners[i].owner == NULL)
    2240       44476 :                     lockOwners[0] = lockOwners[i];
    2241             :                 else
    2242     1560954 :                     ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
    2243             :             }
    2244             : 
    2245     1578114 :             if (locallock->numLockOwners > 0 &&
    2246     1578114 :                 lockOwners[0].owner == NULL &&
    2247       44476 :                 lockOwners[0].nLocks > 0)
    2248             :             {
    2249             :                 /* Fix the locallock to show just the session locks */
    2250       44476 :                 locallock->nLocks = lockOwners[0].nLocks;
    2251       44476 :                 locallock->numLockOwners = 1;
    2252             :                 /* We aren't deleting this locallock, so done */
    2253       44476 :                 continue;
    2254             :             }
    2255             :             else
    2256     1533638 :                 locallock->numLockOwners = 0;
    2257             :         }
    2258             : 
    2259             :         /*
    2260             :          * If the lock or proclock pointers are NULL, this lock was taken via
    2261             :          * the relation fast-path (and is not known to have been transferred).
    2262             :          */
    2263     1665010 :         if (locallock->proclock == NULL || locallock->lock == NULL)
    2264             :         {
    2265      779384 :             LOCKMODE    lockmode = locallock->tag.mode;
    2266             :             Oid         relid;
    2267             : 
    2268             :             /* Verify that a fast-path lock is what we've got. */
    2269      779384 :             if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
    2270           0 :                 elog(PANIC, "locallock table corrupted");
    2271             : 
    2272             :             /*
    2273             :              * If we don't currently hold the LWLock that protects our
    2274             :              * fast-path data structures, we must acquire it before attempting
    2275             :              * to release the lock via the fast-path.  We will continue to
    2276             :              * hold the LWLock until we're done scanning the locallock table,
    2277             :              * unless we hit a transferred fast-path lock.  (XXX is this
    2278             :              * really such a good idea?  There could be a lot of entries ...)
    2279             :              */
    2280      779384 :             if (!have_fast_path_lwlock)
    2281             :             {
    2282      245578 :                 LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    2283      245578 :                 have_fast_path_lwlock = true;
    2284             :             }
    2285             : 
    2286             :             /* Attempt fast-path release. */
    2287      779384 :             relid = locallock->tag.lock.locktag_field2;
    2288      779384 :             if (FastPathUnGrantRelationLock(relid, lockmode))
    2289             :             {
    2290      777416 :                 RemoveLocalLock(locallock);
    2291      777416 :                 continue;
    2292             :             }
    2293             : 
    2294             :             /*
    2295             :              * Our lock, originally taken via the fast path, has been
    2296             :              * transferred to the main lock table.  That's going to require
    2297             :              * some extra work, so release our fast-path lock before starting.
    2298             :              */
    2299        1968 :             LWLockRelease(&MyProc->fpInfoLock);
    2300        1968 :             have_fast_path_lwlock = false;
    2301             : 
    2302             :             /*
    2303             :              * Now dump the lock.  We haven't got a pointer to the LOCK or
    2304             :              * PROCLOCK in this case, so we have to handle this a bit
    2305             :              * differently than a normal lock release.  Unfortunately, this
    2306             :              * requires an extra LWLock acquire-and-release cycle on the
    2307             :              * partitionLock, but hopefully it shouldn't happen often.
    2308             :              */
    2309        1968 :             LockRefindAndRelease(lockMethodTable, MyProc,
    2310             :                                  &locallock->tag.lock, lockmode, false);
    2311        1968 :             RemoveLocalLock(locallock);
    2312        1968 :             continue;
    2313             :         }
    2314             : 
    2315             :         /* Mark the proclock to show we need to release this lockmode */
    2316      885626 :         if (locallock->nLocks > 0)
    2317      885626 :             locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
    2318             : 
    2319             :         /* And remove the locallock hashtable entry */
    2320      885626 :         RemoveLocalLock(locallock);
    2321             :     }
    2322             : 
    2323             :     /* Done with the fast-path data structures */
    2324     1148796 :     if (have_fast_path_lwlock)
    2325      243610 :         LWLockRelease(&MyProc->fpInfoLock);
    2326             : 
    2327             :     /*
    2328             :      * Now, scan each lock partition separately.
    2329             :      */
    2330    19529532 :     for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
    2331             :     {
    2332             :         LWLock     *partitionLock;
    2333    18380736 :         dlist_head *procLocks = &MyProc->myProcLocks[partition];
    2334             :         dlist_mutable_iter proclock_iter;
    2335             : 
    2336    18380736 :         partitionLock = LockHashPartitionLockByIndex(partition);
    2337             : 
    2338             :         /*
    2339             :          * If the proclock list for this partition is empty, we can skip
    2340             :          * acquiring the partition lock.  This optimization is trickier than
    2341             :          * it looks, because another backend could be in process of adding
    2342             :          * something to our proclock list due to promoting one of our
    2343             :          * fast-path locks.  However, any such lock must be one that we
    2344             :          * decided not to delete above, so it's okay to skip it again now;
    2345             :          * we'd just decide not to delete it again.  We must, however, be
    2346             :          * careful to re-fetch the list header once we've acquired the
    2347             :          * partition lock, to be sure we have a valid, up-to-date pointer.
    2348             :          * (There is probably no significant risk if pointer fetch/store is
    2349             :          * atomic, but we don't wish to assume that.)
    2350             :          *
    2351             :          * XXX This argument assumes that the locallock table correctly
    2352             :          * represents all of our fast-path locks.  While allLocks mode
    2353             :          * guarantees to clean up all of our normal locks regardless of the
    2354             :          * locallock situation, we lose that guarantee for fast-path locks.
    2355             :          * This is not ideal.
    2356             :          */
    2357    18380736 :         if (dlist_is_empty(procLocks))
    2358    17661360 :             continue;           /* needn't examine this partition */
    2359             : 
    2360      719376 :         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    2361             : 
    2362     1630912 :         dlist_foreach_modify(proclock_iter, procLocks)
    2363             :         {
    2364      911536 :             PROCLOCK   *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
    2365      911536 :             bool        wakeupNeeded = false;
    2366             : 
    2367             :             Assert(proclock->tag.myProc == MyProc);
    2368             : 
    2369      911536 :             lock = proclock->tag.myLock;
    2370             : 
    2371             :             /* Ignore items that are not of the lockmethod to be removed */
    2372      911536 :             if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
    2373       45084 :                 continue;
    2374             : 
    2375             :             /*
    2376             :              * In allLocks mode, force release of all locks even if locallock
    2377             :              * table had problems
    2378             :              */
    2379      866452 :             if (allLocks)
    2380       85648 :                 proclock->releaseMask = proclock->holdMask;
    2381             :             else
    2382             :                 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
    2383             : 
    2384             :             /*
    2385             :              * Ignore items that have nothing to be released, unless they have
    2386             :              * holdMask == 0 and are therefore recyclable
    2387             :              */
    2388      866452 :             if (proclock->releaseMask == 0 && proclock->holdMask != 0)
    2389       43172 :                 continue;
    2390             : 
    2391             :             PROCLOCK_PRINT("LockReleaseAll", proclock);
    2392             :             LOCK_PRINT("LockReleaseAll", lock, 0);
    2393             :             Assert(lock->nRequested >= 0);
    2394             :             Assert(lock->nGranted >= 0);
    2395             :             Assert(lock->nGranted <= lock->nRequested);
    2396             :             Assert((proclock->holdMask & ~lock->grantMask) == 0);
    2397             : 
    2398             :             /*
    2399             :              * Release the previously-marked lock modes
    2400             :              */
    2401     7409520 :             for (i = 1; i <= numLockModes; i++)
    2402             :             {
    2403     6586240 :                 if (proclock->releaseMask & LOCKBIT_ON(i))
    2404      885626 :                     wakeupNeeded |= UnGrantLock(lock, i, proclock,
    2405             :                                                 lockMethodTable);
    2406             :             }
    2407             :             Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
    2408             :             Assert(lock->nGranted <= lock->nRequested);
    2409             :             LOCK_PRINT("LockReleaseAll: updated", lock, 0);
    2410             : 
    2411      823280 :             proclock->releaseMask = 0;
    2412             : 
    2413             :             /* CleanUpLock will wake up waiters if needed. */
    2414      823280 :             CleanUpLock(lock, proclock,
    2415             :                         lockMethodTable,
    2416      823280 :                         LockTagHashCode(&lock->tag),
    2417             :                         wakeupNeeded);
    2418             :         }                       /* loop over PROCLOCKs within this partition */
    2419             : 
    2420      719376 :         LWLockRelease(partitionLock);
    2421             :     }                           /* loop over partitions */
    2422             : 
    2423             : #ifdef LOCK_DEBUG
    2424             :     if (*(lockMethodTable->trace_flag))
    2425             :         elog(LOG, "LockReleaseAll done");
    2426             : #endif
    2427     1148796 : }
    2428             : 
    2429             : /*
    2430             :  * LockReleaseSession -- Release all session locks of the specified lock method
    2431             :  *      that are held by the current process.
    2432             :  */
    2433             : void
    2434         238 : LockReleaseSession(LOCKMETHODID lockmethodid)
    2435             : {
    2436             :     HASH_SEQ_STATUS status;
    2437             :     LOCALLOCK  *locallock;
    2438             : 
    2439         238 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    2440           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    2441             : 
    2442         238 :     hash_seq_init(&status, LockMethodLocalHash);
    2443             : 
    2444         452 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    2445             :     {
    2446             :         /* Ignore items that are not of the specified lock method */
    2447         214 :         if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
    2448          20 :             continue;
    2449             : 
    2450         194 :         ReleaseLockIfHeld(locallock, true);
    2451             :     }
    2452         238 : }
    2453             : 
    2454             : /*
    2455             :  * LockReleaseCurrentOwner
    2456             :  *      Release all locks belonging to CurrentResourceOwner
    2457             :  *
    2458             :  * If the caller knows what those locks are, it can pass them as an array.
    2459             :  * That speeds up the call significantly, when a lot of locks are held.
    2460             :  * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
    2461             :  * table to find them.
    2462             :  */
    2463             : void
    2464        9458 : LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
    2465             : {
    2466        9458 :     if (locallocks == NULL)
    2467             :     {
    2468             :         HASH_SEQ_STATUS status;
    2469             :         LOCALLOCK  *locallock;
    2470             : 
    2471           8 :         hash_seq_init(&status, LockMethodLocalHash);
    2472             : 
    2473         544 :         while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    2474         536 :             ReleaseLockIfHeld(locallock, false);
    2475             :     }
    2476             :     else
    2477             :     {
    2478             :         int         i;
    2479             : 
    2480       14484 :         for (i = nlocks - 1; i >= 0; i--)
    2481        5034 :             ReleaseLockIfHeld(locallocks[i], false);
    2482             :     }
    2483        9458 : }
    2484             : 
    2485             : /*
    2486             :  * ReleaseLockIfHeld
    2487             :  *      Release any session-level locks on this lockable object if sessionLock
    2488             :  *      is true; else, release any locks held by CurrentResourceOwner.
    2489             :  *
    2490             :  * It is tempting to pass this a ResourceOwner pointer (or NULL for session
    2491             :  * locks), but without refactoring LockRelease() we cannot support releasing
    2492             :  * locks belonging to resource owners other than CurrentResourceOwner.
    2493             :  * If we were to refactor, it'd be a good idea to fix it so we don't have to
    2494             :  * do a hashtable lookup of the locallock, too.  However, currently this
    2495             :  * function isn't used heavily enough to justify refactoring for its
    2496             :  * convenience.
    2497             :  */
    2498             : static void
    2499        5764 : ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
    2500             : {
    2501             :     ResourceOwner owner;
    2502             :     LOCALLOCKOWNER *lockOwners;
    2503             :     int         i;
    2504             : 
    2505             :     /* Identify owner for lock (must match LockRelease!) */
    2506        5764 :     if (sessionLock)
    2507         194 :         owner = NULL;
    2508             :     else
    2509        5570 :         owner = CurrentResourceOwner;
    2510             : 
    2511             :     /* Scan to see if there are any locks belonging to the target owner */
    2512        5764 :     lockOwners = locallock->lockOwners;
    2513        6150 :     for (i = locallock->numLockOwners - 1; i >= 0; i--)
    2514             :     {
    2515        5764 :         if (lockOwners[i].owner == owner)
    2516             :         {
    2517             :             Assert(lockOwners[i].nLocks > 0);
    2518        5378 :             if (lockOwners[i].nLocks < locallock->nLocks)
    2519             :             {
    2520             :                 /*
    2521             :                  * We will still hold this lock after forgetting this
    2522             :                  * ResourceOwner.
    2523             :                  */
    2524        1344 :                 locallock->nLocks -= lockOwners[i].nLocks;
    2525             :                 /* compact out unused slot */
    2526        1344 :                 locallock->numLockOwners--;
    2527        1344 :                 if (owner != NULL)
    2528        1344 :                     ResourceOwnerForgetLock(owner, locallock);
    2529        1344 :                 if (i < locallock->numLockOwners)
    2530           0 :                     lockOwners[i] = lockOwners[locallock->numLockOwners];
    2531             :             }
    2532             :             else
    2533             :             {
    2534             :                 Assert(lockOwners[i].nLocks == locallock->nLocks);
    2535             :                 /* We want to call LockRelease just once */
    2536        4034 :                 lockOwners[i].nLocks = 1;
    2537        4034 :                 locallock->nLocks = 1;
    2538        4034 :                 if (!LockRelease(&locallock->tag.lock,
    2539             :                                  locallock->tag.mode,
    2540             :                                  sessionLock))
    2541           0 :                     elog(WARNING, "ReleaseLockIfHeld: failed??");
    2542             :             }
    2543        5378 :             break;
    2544             :         }
    2545             :     }
    2546        5764 : }
    2547             : 
    2548             : /*
    2549             :  * LockReassignCurrentOwner
    2550             :  *      Reassign all locks belonging to CurrentResourceOwner to belong
    2551             :  *      to its parent resource owner.
    2552             :  *
    2553             :  * If the caller knows what those locks are, it can pass them as an array.
    2554             :  * That speeds up the call significantly, when a lot of locks are held
    2555             :  * (e.g pg_dump with a large schema).  Otherwise, pass NULL for locallocks,
    2556             :  * and we'll traverse through our hash table to find them.
    2557             :  */
    2558             : void
    2559      623950 : LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
    2560             : {
    2561      623950 :     ResourceOwner parent = ResourceOwnerGetParent(CurrentResourceOwner);
    2562             : 
    2563             :     Assert(parent != NULL);
    2564             : 
    2565      623950 :     if (locallocks == NULL)
    2566             :     {
    2567             :         HASH_SEQ_STATUS status;
    2568             :         LOCALLOCK  *locallock;
    2569             : 
    2570        6290 :         hash_seq_init(&status, LockMethodLocalHash);
    2571             : 
    2572      174060 :         while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    2573      167770 :             LockReassignOwner(locallock, parent);
    2574             :     }
    2575             :     else
    2576             :     {
    2577             :         int         i;
    2578             : 
    2579     1297020 :         for (i = nlocks - 1; i >= 0; i--)
    2580      679360 :             LockReassignOwner(locallocks[i], parent);
    2581             :     }
    2582      623950 : }
    2583             : 
    2584             : /*
    2585             :  * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
    2586             :  * CurrentResourceOwner to its parent.
    2587             :  */
    2588             : static void
    2589      847130 : LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
    2590             : {
    2591             :     LOCALLOCKOWNER *lockOwners;
    2592             :     int         i;
    2593      847130 :     int         ic = -1;
    2594      847130 :     int         ip = -1;
    2595             : 
    2596             :     /*
    2597             :      * Scan to see if there are any locks belonging to current owner or its
    2598             :      * parent
    2599             :      */
    2600      847130 :     lockOwners = locallock->lockOwners;
    2601     1992122 :     for (i = locallock->numLockOwners - 1; i >= 0; i--)
    2602             :     {
    2603     1144992 :         if (lockOwners[i].owner == CurrentResourceOwner)
    2604      813866 :             ic = i;
    2605      331126 :         else if (lockOwners[i].owner == parent)
    2606      247448 :             ip = i;
    2607             :     }
    2608             : 
    2609      847130 :     if (ic < 0)
    2610       33264 :         return;                 /* no current locks */
    2611             : 
    2612      813866 :     if (ip < 0)
    2613             :     {
    2614             :         /* Parent has no slot, so just give it the child's slot */
    2615      599624 :         lockOwners[ic].owner = parent;
    2616      599624 :         ResourceOwnerRememberLock(parent, locallock);
    2617             :     }
    2618             :     else
    2619             :     {
    2620             :         /* Merge child's count with parent's */
    2621      214242 :         lockOwners[ip].nLocks += lockOwners[ic].nLocks;
    2622             :         /* compact out unused slot */
    2623      214242 :         locallock->numLockOwners--;
    2624      214242 :         if (ic < locallock->numLockOwners)
    2625        1342 :             lockOwners[ic] = lockOwners[locallock->numLockOwners];
    2626             :     }
    2627      813866 :     ResourceOwnerForgetLock(CurrentResourceOwner, locallock);
    2628             : }
    2629             : 
    2630             : /*
    2631             :  * FastPathGrantRelationLock
    2632             :  *      Grant lock using per-backend fast-path array, if there is space.
    2633             :  */
    2634             : static bool
    2635    22894964 : FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
    2636             : {
    2637             :     uint32      f;
    2638    22894964 :     uint32      unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
    2639             : 
    2640             :     /* Scan for existing entry for this relid, remembering empty slot. */
    2641   387721484 :     for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
    2642             :     {
    2643   365439926 :         if (FAST_PATH_GET_BITS(MyProc, f) == 0)
    2644   317967300 :             unused_slot = f;
    2645    47472626 :         else if (MyProc->fpRelId[f] == relid)
    2646             :         {
    2647             :             Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
    2648      613406 :             FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
    2649      613406 :             return true;
    2650             :         }
    2651             :     }
    2652             : 
    2653             :     /* If no existing entry, use any empty slot. */
    2654    22281558 :     if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
    2655             :     {
    2656    22281558 :         MyProc->fpRelId[unused_slot] = relid;
    2657    22281558 :         FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
    2658    22281558 :         ++FastPathLocalUseCount;
    2659    22281558 :         return true;
    2660             :     }
    2661             : 
    2662             :     /* No existing entry, and no empty slot. */
    2663           0 :     return false;
    2664             : }
    2665             : 
    2666             : /*
    2667             :  * FastPathUnGrantRelationLock
    2668             :  *      Release fast-path lock, if present.  Update backend-private local
    2669             :  *      use count, while we're at it.
    2670             :  */
    2671             : static bool
    2672    23666250 : FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
    2673             : {
    2674             :     uint32      f;
    2675    23666250 :     bool        result = false;
    2676             : 
    2677    23666250 :     FastPathLocalUseCount = 0;
    2678   402326250 :     for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
    2679             :     {
    2680   378660000 :         if (MyProc->fpRelId[f] == relid
    2681    27859336 :             && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
    2682             :         {
    2683             :             Assert(!result);
    2684    22892230 :             FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
    2685    22892230 :             result = true;
    2686             :             /* we continue iterating so as to update FastPathLocalUseCount */
    2687             :         }
    2688   378660000 :         if (FAST_PATH_GET_BITS(MyProc, f) != 0)
    2689    57595238 :             ++FastPathLocalUseCount;
    2690             :     }
    2691    23666250 :     return result;
    2692             : }
    2693             : 
    2694             : /*
    2695             :  * FastPathTransferRelationLocks
    2696             :  *      Transfer locks matching the given lock tag from per-backend fast-path
    2697             :  *      arrays to the shared hash table.
    2698             :  *
    2699             :  * Returns true if successful, false if ran out of shared memory.
    2700             :  */
    2701             : static bool
    2702      327748 : FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
    2703             :                               uint32 hashcode)
    2704             : {
    2705      327748 :     LWLock     *partitionLock = LockHashPartitionLock(hashcode);
    2706      327748 :     Oid         relid = locktag->locktag_field2;
    2707             :     uint32      i;
    2708             : 
    2709             :     /*
    2710             :      * Every PGPROC that can potentially hold a fast-path lock is present in
    2711             :      * ProcGlobal->allProcs.  Prepared transactions are not, but any
    2712             :      * outstanding fast-path locks held by prepared transactions are
    2713             :      * transferred to the main lock table.
    2714             :      */
    2715    33813042 :     for (i = 0; i < ProcGlobal->allProcCount; i++)
    2716             :     {
    2717    33485294 :         PGPROC     *proc = &ProcGlobal->allProcs[i];
    2718             :         uint32      f;
    2719             : 
    2720    33485294 :         LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
    2721             : 
    2722             :         /*
    2723             :          * If the target backend isn't referencing the same database as the
    2724             :          * lock, then we needn't examine the individual relation IDs at all;
    2725             :          * none of them can be relevant.
    2726             :          *
    2727             :          * proc->databaseId is set at backend startup time and never changes
    2728             :          * thereafter, so it might be safe to perform this test before
    2729             :          * acquiring &proc->fpInfoLock.  In particular, it's certainly safe to
    2730             :          * assume that if the target backend holds any fast-path locks, it
    2731             :          * must have performed a memory-fencing operation (in particular, an
    2732             :          * LWLock acquisition) since setting proc->databaseId.  However, it's
    2733             :          * less clear that our backend is certain to have performed a memory
    2734             :          * fencing operation since the other backend set proc->databaseId.  So
    2735             :          * for now, we test it after acquiring the LWLock just to be safe.
    2736             :          */
    2737    33485294 :         if (proc->databaseId != locktag->locktag_field1)
    2738             :         {
    2739    16306408 :             LWLockRelease(&proc->fpInfoLock);
    2740    16306408 :             continue;
    2741             :         }
    2742             : 
    2743   292038654 :         for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
    2744             :         {
    2745             :             uint32      lockmode;
    2746             : 
    2747             :             /* Look for an allocated slot matching the given relid. */
    2748   274861646 :             if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
    2749   274859768 :                 continue;
    2750             : 
    2751             :             /* Find or create lock object. */
    2752        1878 :             LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    2753        7512 :             for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
    2754             :                  lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
    2755        5634 :                  ++lockmode)
    2756             :             {
    2757             :                 PROCLOCK   *proclock;
    2758             : 
    2759        5634 :                 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
    2760        3644 :                     continue;
    2761        1990 :                 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
    2762             :                                             hashcode, lockmode);
    2763        1990 :                 if (!proclock)
    2764             :                 {
    2765           0 :                     LWLockRelease(partitionLock);
    2766           0 :                     LWLockRelease(&proc->fpInfoLock);
    2767           0 :                     return false;
    2768             :                 }
    2769        1990 :                 GrantLock(proclock->tag.myLock, proclock, lockmode);
    2770        1990 :                 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
    2771             :             }
    2772        1878 :             LWLockRelease(partitionLock);
    2773             : 
    2774             :             /* No need to examine remaining slots. */
    2775        1878 :             break;
    2776             :         }
    2777    17178886 :         LWLockRelease(&proc->fpInfoLock);
    2778             :     }
    2779      327748 :     return true;
    2780             : }
    2781             : 
    2782             : /*
    2783             :  * FastPathGetRelationLockEntry
    2784             :  *      Return the PROCLOCK for a lock originally taken via the fast-path,
    2785             :  *      transferring it to the primary lock table if necessary.
    2786             :  *
    2787             :  * Note: caller takes care of updating the locallock object.
    2788             :  */
    2789             : static PROCLOCK *
    2790         760 : FastPathGetRelationLockEntry(LOCALLOCK *locallock)
    2791             : {
    2792         760 :     LockMethod  lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
    2793         760 :     LOCKTAG    *locktag = &locallock->tag.lock;
    2794         760 :     PROCLOCK   *proclock = NULL;
    2795         760 :     LWLock     *partitionLock = LockHashPartitionLock(locallock->hashcode);
    2796         760 :     Oid         relid = locktag->locktag_field2;
    2797             :     uint32      f;
    2798             : 
    2799         760 :     LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    2800             : 
    2801       12136 :     for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
    2802             :     {
    2803             :         uint32      lockmode;
    2804             : 
    2805             :         /* Look for an allocated slot matching the given relid. */
    2806       12120 :         if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
    2807       11376 :             continue;
    2808             : 
    2809             :         /* If we don't have a lock of the given mode, forget it! */
    2810         744 :         lockmode = locallock->tag.mode;
    2811         744 :         if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
    2812           0 :             break;
    2813             : 
    2814             :         /* Find or create lock object. */
    2815         744 :         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    2816             : 
    2817         744 :         proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
    2818             :                                     locallock->hashcode, lockmode);
    2819         744 :         if (!proclock)
    2820             :         {
    2821           0 :             LWLockRelease(partitionLock);
    2822           0 :             LWLockRelease(&MyProc->fpInfoLock);
    2823           0 :             ereport(ERROR,
    2824             :                     (errcode(ERRCODE_OUT_OF_MEMORY),
    2825             :                      errmsg("out of shared memory"),
    2826             :                      errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
    2827             :         }
    2828         744 :         GrantLock(proclock->tag.myLock, proclock, lockmode);
    2829         744 :         FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
    2830             : 
    2831         744 :         LWLockRelease(partitionLock);
    2832             : 
    2833             :         /* No need to examine remaining slots. */
    2834         744 :         break;
    2835             :     }
    2836             : 
    2837         760 :     LWLockRelease(&MyProc->fpInfoLock);
    2838             : 
    2839             :     /* Lock may have already been transferred by some other backend. */
    2840         760 :     if (proclock == NULL)
    2841             :     {
    2842             :         LOCK       *lock;
    2843             :         PROCLOCKTAG proclocktag;
    2844             :         uint32      proclock_hashcode;
    2845             : 
    2846          16 :         LWLockAcquire(partitionLock, LW_SHARED);
    2847             : 
    2848          16 :         lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    2849             :                                                     locktag,
    2850             :                                                     locallock->hashcode,
    2851             :                                                     HASH_FIND,
    2852             :                                                     NULL);
    2853          16 :         if (!lock)
    2854           0 :             elog(ERROR, "failed to re-find shared lock object");
    2855             : 
    2856          16 :         proclocktag.myLock = lock;
    2857          16 :         proclocktag.myProc = MyProc;
    2858             : 
    2859          16 :         proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
    2860             :         proclock = (PROCLOCK *)
    2861          16 :             hash_search_with_hash_value(LockMethodProcLockHash,
    2862             :                                         &proclocktag,
    2863             :                                         proclock_hashcode,
    2864             :                                         HASH_FIND,
    2865             :                                         NULL);
    2866          16 :         if (!proclock)
    2867           0 :             elog(ERROR, "failed to re-find shared proclock object");
    2868          16 :         LWLockRelease(partitionLock);
    2869             :     }
    2870             : 
    2871         760 :     return proclock;
    2872             : }
    2873             : 
    2874             : /*
    2875             :  * GetLockConflicts
    2876             :  *      Get an array of VirtualTransactionIds of xacts currently holding locks
    2877             :  *      that would conflict with the specified lock/lockmode.
    2878             :  *      xacts merely awaiting such a lock are NOT reported.
    2879             :  *
    2880             :  * The result array is palloc'd and is terminated with an invalid VXID.
    2881             :  * *countp, if not null, is updated to the number of items set.
    2882             :  *
    2883             :  * Of course, the result could be out of date by the time it's returned, so
    2884             :  * use of this function has to be thought about carefully.  Similarly, a
    2885             :  * PGPROC with no "lxid" will be considered non-conflicting regardless of any
    2886             :  * lock it holds.  Existing callers don't care about a locker after that
    2887             :  * locker's pg_xact updates complete.  CommitTransaction() clears "lxid" after
    2888             :  * pg_xact updates and before releasing locks.
    2889             :  *
    2890             :  * Note we never include the current xact's vxid in the result array,
    2891             :  * since an xact never blocks itself.
    2892             :  */
    2893             : VirtualTransactionId *
    2894        2480 : GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
    2895             : {
    2896             :     static VirtualTransactionId *vxids;
    2897        2480 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
    2898             :     LockMethod  lockMethodTable;
    2899             :     LOCK       *lock;
    2900             :     LOCKMASK    conflictMask;
    2901             :     dlist_iter  proclock_iter;
    2902             :     PROCLOCK   *proclock;
    2903             :     uint32      hashcode;
    2904             :     LWLock     *partitionLock;
    2905        2480 :     int         count = 0;
    2906        2480 :     int         fast_count = 0;
    2907             : 
    2908        2480 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    2909           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    2910        2480 :     lockMethodTable = LockMethods[lockmethodid];
    2911        2480 :     if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
    2912           0 :         elog(ERROR, "unrecognized lock mode: %d", lockmode);
    2913             : 
    2914             :     /*
    2915             :      * Allocate memory to store results, and fill with InvalidVXID.  We only
    2916             :      * need enough space for MaxBackends + max_prepared_xacts + a terminator.
    2917             :      * InHotStandby allocate once in TopMemoryContext.
    2918             :      */
    2919        2480 :     if (InHotStandby)
    2920             :     {
    2921           8 :         if (vxids == NULL)
    2922           2 :             vxids = (VirtualTransactionId *)
    2923           2 :                 MemoryContextAlloc(TopMemoryContext,
    2924             :                                    sizeof(VirtualTransactionId) *
    2925           2 :                                    (MaxBackends + max_prepared_xacts + 1));
    2926             :     }
    2927             :     else
    2928        2472 :         vxids = (VirtualTransactionId *)
    2929        2472 :             palloc0(sizeof(VirtualTransactionId) *
    2930        2472 :                     (MaxBackends + max_prepared_xacts + 1));
    2931             : 
    2932             :     /* Compute hash code and partition lock, and look up conflicting modes. */
    2933        2480 :     hashcode = LockTagHashCode(locktag);
    2934        2480 :     partitionLock = LockHashPartitionLock(hashcode);
    2935        2480 :     conflictMask = lockMethodTable->conflictTab[lockmode];
    2936             : 
    2937             :     /*
    2938             :      * Fast path locks might not have been entered in the primary lock table.
    2939             :      * If the lock we're dealing with could conflict with such a lock, we must
    2940             :      * examine each backend's fast-path array for conflicts.
    2941             :      */
    2942        2480 :     if (ConflictsWithRelationFastPath(locktag, lockmode))
    2943             :     {
    2944             :         int         i;
    2945        2480 :         Oid         relid = locktag->locktag_field2;
    2946             :         VirtualTransactionId vxid;
    2947             : 
    2948             :         /*
    2949             :          * Iterate over relevant PGPROCs.  Anything held by a prepared
    2950             :          * transaction will have been transferred to the primary lock table,
    2951             :          * so we need not worry about those.  This is all a bit fuzzy, because
    2952             :          * new locks could be taken after we've visited a particular
    2953             :          * partition, but the callers had better be prepared to deal with that
    2954             :          * anyway, since the locks could equally well be taken between the
    2955             :          * time we return the value and the time the caller does something
    2956             :          * with it.
    2957             :          */
    2958      279500 :         for (i = 0; i < ProcGlobal->allProcCount; i++)
    2959             :         {
    2960      277020 :             PGPROC     *proc = &ProcGlobal->allProcs[i];
    2961             :             uint32      f;
    2962             : 
    2963             :             /* A backend never blocks itself */
    2964      277020 :             if (proc == MyProc)
    2965        2480 :                 continue;
    2966             : 
    2967      274540 :             LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
    2968             : 
    2969             :             /*
    2970             :              * If the target backend isn't referencing the same database as
    2971             :              * the lock, then we needn't examine the individual relation IDs
    2972             :              * at all; none of them can be relevant.
    2973             :              *
    2974             :              * See FastPathTransferRelationLocks() for discussion of why we do
    2975             :              * this test after acquiring the lock.
    2976             :              */
    2977      274540 :             if (proc->databaseId != locktag->locktag_field1)
    2978             :             {
    2979      113342 :                 LWLockRelease(&proc->fpInfoLock);
    2980      113342 :                 continue;
    2981             :             }
    2982             : 
    2983     2739900 :             for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
    2984             :             {
    2985             :                 uint32      lockmask;
    2986             : 
    2987             :                 /* Look for an allocated slot matching the given relid. */
    2988     2579072 :                 if (relid != proc->fpRelId[f])
    2989     2577092 :                     continue;
    2990        1980 :                 lockmask = FAST_PATH_GET_BITS(proc, f);
    2991        1980 :                 if (!lockmask)
    2992        1610 :                     continue;
    2993         370 :                 lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
    2994             : 
    2995             :                 /*
    2996             :                  * There can only be one entry per relation, so if we found it
    2997             :                  * and it doesn't conflict, we can skip the rest of the slots.
    2998             :                  */
    2999         370 :                 if ((lockmask & conflictMask) == 0)
    3000          10 :                     break;
    3001             : 
    3002             :                 /* Conflict! */
    3003         360 :                 GET_VXID_FROM_PGPROC(vxid, *proc);
    3004             : 
    3005         360 :                 if (VirtualTransactionIdIsValid(vxid))
    3006         360 :                     vxids[count++] = vxid;
    3007             :                 /* else, xact already committed or aborted */
    3008             : 
    3009             :                 /* No need to examine remaining slots. */
    3010         360 :                 break;
    3011             :             }
    3012             : 
    3013      161198 :             LWLockRelease(&proc->fpInfoLock);
    3014             :         }
    3015             :     }
    3016             : 
    3017             :     /* Remember how many fast-path conflicts we found. */
    3018        2480 :     fast_count = count;
    3019             : 
    3020             :     /*
    3021             :      * Look up the lock object matching the tag.
    3022             :      */
    3023        2480 :     LWLockAcquire(partitionLock, LW_SHARED);
    3024             : 
    3025        2480 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    3026             :                                                 locktag,
    3027             :                                                 hashcode,
    3028             :                                                 HASH_FIND,
    3029             :                                                 NULL);
    3030        2480 :     if (!lock)
    3031             :     {
    3032             :         /*
    3033             :          * If the lock object doesn't exist, there is nothing holding a lock
    3034             :          * on this lockable object.
    3035             :          */
    3036         140 :         LWLockRelease(partitionLock);
    3037         140 :         vxids[count].procNumber = INVALID_PROC_NUMBER;
    3038         140 :         vxids[count].localTransactionId = InvalidLocalTransactionId;
    3039         140 :         if (countp)
    3040           0 :             *countp = count;
    3041         140 :         return vxids;
    3042             :     }
    3043             : 
    3044             :     /*
    3045             :      * Examine each existing holder (or awaiter) of the lock.
    3046             :      */
    3047        4706 :     dlist_foreach(proclock_iter, &lock->procLocks)
    3048             :     {
    3049        2366 :         proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
    3050             : 
    3051        2366 :         if (conflictMask & proclock->holdMask)
    3052             :         {
    3053        2358 :             PGPROC     *proc = proclock->tag.myProc;
    3054             : 
    3055             :             /* A backend never blocks itself */
    3056        2358 :             if (proc != MyProc)
    3057             :             {
    3058             :                 VirtualTransactionId vxid;
    3059             : 
    3060          26 :                 GET_VXID_FROM_PGPROC(vxid, *proc);
    3061             : 
    3062          26 :                 if (VirtualTransactionIdIsValid(vxid))
    3063             :                 {
    3064             :                     int         i;
    3065             : 
    3066             :                     /* Avoid duplicate entries. */
    3067          36 :                     for (i = 0; i < fast_count; ++i)
    3068          12 :                         if (VirtualTransactionIdEquals(vxids[i], vxid))
    3069           2 :                             break;
    3070          26 :                     if (i >= fast_count)
    3071          24 :                         vxids[count++] = vxid;
    3072             :                 }
    3073             :                 /* else, xact already committed or aborted */
    3074             :             }
    3075             :         }
    3076             :     }
    3077             : 
    3078        2340 :     LWLockRelease(partitionLock);
    3079             : 
    3080        2340 :     if (count > MaxBackends + max_prepared_xacts)    /* should never happen */
    3081           0 :         elog(PANIC, "too many conflicting locks found");
    3082             : 
    3083        2340 :     vxids[count].procNumber = INVALID_PROC_NUMBER;
    3084        2340 :     vxids[count].localTransactionId = InvalidLocalTransactionId;
    3085        2340 :     if (countp)
    3086        2334 :         *countp = count;
    3087        2340 :     return vxids;
    3088             : }
    3089             : 
    3090             : /*
    3091             :  * Find a lock in the shared lock table and release it.  It is the caller's
    3092             :  * responsibility to verify that this is a sane thing to do.  (For example, it
    3093             :  * would be bad to release a lock here if there might still be a LOCALLOCK
    3094             :  * object with pointers to it.)
    3095             :  *
    3096             :  * We currently use this in two situations: first, to release locks held by
    3097             :  * prepared transactions on commit (see lock_twophase_postcommit); and second,
    3098             :  * to release locks taken via the fast-path, transferred to the main hash
    3099             :  * table, and then released (see LockReleaseAll).
    3100             :  */
    3101             : static void
    3102        4242 : LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
    3103             :                      LOCKTAG *locktag, LOCKMODE lockmode,
    3104             :                      bool decrement_strong_lock_count)
    3105             : {
    3106             :     LOCK       *lock;
    3107             :     PROCLOCK   *proclock;
    3108             :     PROCLOCKTAG proclocktag;
    3109             :     uint32      hashcode;
    3110             :     uint32      proclock_hashcode;
    3111             :     LWLock     *partitionLock;
    3112             :     bool        wakeupNeeded;
    3113             : 
    3114        4242 :     hashcode = LockTagHashCode(locktag);
    3115        4242 :     partitionLock = LockHashPartitionLock(hashcode);
    3116             : 
    3117        4242 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    3118             : 
    3119             :     /*
    3120             :      * Re-find the lock object (it had better be there).
    3121             :      */
    3122        4242 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    3123             :                                                 locktag,
    3124             :                                                 hashcode,
    3125             :                                                 HASH_FIND,
    3126             :                                                 NULL);
    3127        4242 :     if (!lock)
    3128           0 :         elog(PANIC, "failed to re-find shared lock object");
    3129             : 
    3130             :     /*
    3131             :      * Re-find the proclock object (ditto).
    3132             :      */
    3133        4242 :     proclocktag.myLock = lock;
    3134        4242 :     proclocktag.myProc = proc;
    3135             : 
    3136        4242 :     proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
    3137             : 
    3138        4242 :     proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
    3139             :                                                         &proclocktag,
    3140             :                                                         proclock_hashcode,
    3141             :                                                         HASH_FIND,
    3142             :                                                         NULL);
    3143        4242 :     if (!proclock)
    3144           0 :         elog(PANIC, "failed to re-find shared proclock object");
    3145             : 
    3146             :     /*
    3147             :      * Double-check that we are actually holding a lock of the type we want to
    3148             :      * release.
    3149             :      */
    3150        4242 :     if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
    3151             :     {
    3152             :         PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
    3153           0 :         LWLockRelease(partitionLock);
    3154           0 :         elog(WARNING, "you don't own a lock of type %s",
    3155             :              lockMethodTable->lockModeNames[lockmode]);
    3156           0 :         return;
    3157             :     }
    3158             : 
    3159             :     /*
    3160             :      * Do the releasing.  CleanUpLock will waken any now-wakable waiters.
    3161             :      */
    3162        4242 :     wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
    3163             : 
    3164        4242 :     CleanUpLock(lock, proclock,
    3165             :                 lockMethodTable, hashcode,
    3166             :                 wakeupNeeded);
    3167             : 
    3168        4242 :     LWLockRelease(partitionLock);
    3169             : 
    3170             :     /*
    3171             :      * Decrement strong lock count.  This logic is needed only for 2PC.
    3172             :      */
    3173        4242 :     if (decrement_strong_lock_count
    3174        1750 :         && ConflictsWithRelationFastPath(locktag, lockmode))
    3175             :     {
    3176         142 :         uint32      fasthashcode = FastPathStrongLockHashPartition(hashcode);
    3177             : 
    3178         142 :         SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    3179             :         Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
    3180         142 :         FastPathStrongRelationLocks->count[fasthashcode]--;
    3181         142 :         SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    3182             :     }
    3183             : }
    3184             : 
    3185             : /*
    3186             :  * CheckForSessionAndXactLocks
    3187             :  *      Check to see if transaction holds both session-level and xact-level
    3188             :  *      locks on the same object; if so, throw an error.
    3189             :  *
    3190             :  * If we have both session- and transaction-level locks on the same object,
    3191             :  * PREPARE TRANSACTION must fail.  This should never happen with regular
    3192             :  * locks, since we only take those at session level in some special operations
    3193             :  * like VACUUM.  It's possible to hit this with advisory locks, though.
    3194             :  *
    3195             :  * It would be nice if we could keep the session hold and give away the
    3196             :  * transactional hold to the prepared xact.  However, that would require two
    3197             :  * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
    3198             :  * available when it comes time for PostPrepare_Locks to do the deed.
    3199             :  * So for now, we error out while we can still do so safely.
    3200             :  *
    3201             :  * Since the LOCALLOCK table stores a separate entry for each lockmode,
    3202             :  * we can't implement this check by examining LOCALLOCK entries in isolation.
    3203             :  * We must build a transient hashtable that is indexed by locktag only.
    3204             :  */
    3205             : static void
    3206         746 : CheckForSessionAndXactLocks(void)
    3207             : {
    3208             :     typedef struct
    3209             :     {
    3210             :         LOCKTAG     lock;       /* identifies the lockable object */
    3211             :         bool        sessLock;   /* is any lockmode held at session level? */
    3212             :         bool        xactLock;   /* is any lockmode held at xact level? */
    3213             :     } PerLockTagEntry;
    3214             : 
    3215             :     HASHCTL     hash_ctl;
    3216             :     HTAB       *lockhtab;
    3217             :     HASH_SEQ_STATUS status;
    3218             :     LOCALLOCK  *locallock;
    3219             : 
    3220             :     /* Create a local hash table keyed by LOCKTAG only */
    3221         746 :     hash_ctl.keysize = sizeof(LOCKTAG);
    3222         746 :     hash_ctl.entrysize = sizeof(PerLockTagEntry);
    3223         746 :     hash_ctl.hcxt = CurrentMemoryContext;
    3224             : 
    3225         746 :     lockhtab = hash_create("CheckForSessionAndXactLocks table",
    3226             :                            256, /* arbitrary initial size */
    3227             :                            &hash_ctl,
    3228             :                            HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
    3229             : 
    3230             :     /* Scan local lock table to find entries for each LOCKTAG */
    3231         746 :     hash_seq_init(&status, LockMethodLocalHash);
    3232             : 
    3233        2494 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    3234             :     {
    3235        1752 :         LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    3236             :         PerLockTagEntry *hentry;
    3237             :         bool        found;
    3238             :         int         i;
    3239             : 
    3240             :         /*
    3241             :          * Ignore VXID locks.  We don't want those to be held by prepared
    3242             :          * transactions, since they aren't meaningful after a restart.
    3243             :          */
    3244        1752 :         if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
    3245           0 :             continue;
    3246             : 
    3247             :         /* Ignore it if we don't actually hold the lock */
    3248        1752 :         if (locallock->nLocks <= 0)
    3249           0 :             continue;
    3250             : 
    3251             :         /* Otherwise, find or make an entry in lockhtab */
    3252        1752 :         hentry = (PerLockTagEntry *) hash_search(lockhtab,
    3253        1752 :                                                  &locallock->tag.lock,
    3254             :                                                  HASH_ENTER, &found);
    3255        1752 :         if (!found)             /* initialize, if newly created */
    3256        1646 :             hentry->sessLock = hentry->xactLock = false;
    3257             : 
    3258             :         /* Scan to see if we hold lock at session or xact level or both */
    3259        3504 :         for (i = locallock->numLockOwners - 1; i >= 0; i--)
    3260             :         {
    3261        1752 :             if (lockOwners[i].owner == NULL)
    3262          18 :                 hentry->sessLock = true;
    3263             :             else
    3264        1734 :                 hentry->xactLock = true;
    3265             :         }
    3266             : 
    3267             :         /*
    3268             :          * We can throw error immediately when we see both types of locks; no
    3269             :          * need to wait around to see if there are more violations.
    3270             :          */
    3271        1752 :         if (hentry->sessLock && hentry->xactLock)
    3272           4 :             ereport(ERROR,
    3273             :                     (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    3274             :                      errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
    3275             :     }
    3276             : 
    3277             :     /* Success, so clean up */
    3278         742 :     hash_destroy(lockhtab);
    3279         742 : }
    3280             : 
    3281             : /*
    3282             :  * AtPrepare_Locks
    3283             :  *      Do the preparatory work for a PREPARE: make 2PC state file records
    3284             :  *      for all locks currently held.
    3285             :  *
    3286             :  * Session-level locks are ignored, as are VXID locks.
    3287             :  *
    3288             :  * For the most part, we don't need to touch shared memory for this ---
    3289             :  * all the necessary state information is in the locallock table.
    3290             :  * Fast-path locks are an exception, however: we move any such locks to
    3291             :  * the main table before allowing PREPARE TRANSACTION to succeed.
    3292             :  */
    3293             : void
    3294         746 : AtPrepare_Locks(void)
    3295             : {
    3296             :     HASH_SEQ_STATUS status;
    3297             :     LOCALLOCK  *locallock;
    3298             : 
    3299             :     /* First, verify there aren't locks of both xact and session level */
    3300         746 :     CheckForSessionAndXactLocks();
    3301             : 
    3302             :     /* Now do the per-locallock cleanup work */
    3303         742 :     hash_seq_init(&status, LockMethodLocalHash);
    3304             : 
    3305        2484 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    3306             :     {
    3307             :         TwoPhaseLockRecord record;
    3308        1742 :         LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    3309             :         bool        haveSessionLock;
    3310             :         bool        haveXactLock;
    3311             :         int         i;
    3312             : 
    3313             :         /*
    3314             :          * Ignore VXID locks.  We don't want those to be held by prepared
    3315             :          * transactions, since they aren't meaningful after a restart.
    3316             :          */
    3317        1742 :         if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
    3318          14 :             continue;
    3319             : 
    3320             :         /* Ignore it if we don't actually hold the lock */
    3321        1742 :         if (locallock->nLocks <= 0)
    3322           0 :             continue;
    3323             : 
    3324             :         /* Scan to see whether we hold it at session or transaction level */
    3325        1742 :         haveSessionLock = haveXactLock = false;
    3326        3484 :         for (i = locallock->numLockOwners - 1; i >= 0; i--)
    3327             :         {
    3328        1742 :             if (lockOwners[i].owner == NULL)
    3329          14 :                 haveSessionLock = true;
    3330             :             else
    3331        1728 :                 haveXactLock = true;
    3332             :         }
    3333             : 
    3334             :         /* Ignore it if we have only session lock */
    3335        1742 :         if (!haveXactLock)
    3336          14 :             continue;
    3337             : 
    3338             :         /* This can't happen, because we already checked it */
    3339        1728 :         if (haveSessionLock)
    3340           0 :             ereport(ERROR,
    3341             :                     (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    3342             :                      errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
    3343             : 
    3344             :         /*
    3345             :          * If the local lock was taken via the fast-path, we need to move it
    3346             :          * to the primary lock table, or just get a pointer to the existing
    3347             :          * primary lock table entry if by chance it's already been
    3348             :          * transferred.
    3349             :          */
    3350        1728 :         if (locallock->proclock == NULL)
    3351             :         {
    3352         760 :             locallock->proclock = FastPathGetRelationLockEntry(locallock);
    3353         760 :             locallock->lock = locallock->proclock->tag.myLock;
    3354             :         }
    3355             : 
    3356             :         /*
    3357             :          * Arrange to not release any strong lock count held by this lock
    3358             :          * entry.  We must retain the count until the prepared transaction is
    3359             :          * committed or rolled back.
    3360             :          */
    3361        1728 :         locallock->holdsStrongLockCount = false;
    3362             : 
    3363             :         /*
    3364             :          * Create a 2PC record.
    3365             :          */
    3366        1728 :         memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
    3367        1728 :         record.lockmode = locallock->tag.mode;
    3368             : 
    3369        1728 :         RegisterTwoPhaseRecord(TWOPHASE_RM_LOCK_ID, 0,
    3370             :                                &record, sizeof(TwoPhaseLockRecord));
    3371             :     }
    3372         742 : }
    3373             : 
    3374             : /*
    3375             :  * PostPrepare_Locks
    3376             :  *      Clean up after successful PREPARE
    3377             :  *
    3378             :  * Here, we want to transfer ownership of our locks to a dummy PGPROC
    3379             :  * that's now associated with the prepared transaction, and we want to
    3380             :  * clean out the corresponding entries in the LOCALLOCK table.
    3381             :  *
    3382             :  * Note: by removing the LOCALLOCK entries, we are leaving dangling
    3383             :  * pointers in the transaction's resource owner.  This is OK at the
    3384             :  * moment since resowner.c doesn't try to free locks retail at a toplevel
    3385             :  * transaction commit or abort.  We could alternatively zero out nLocks
    3386             :  * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
    3387             :  * but that probably costs more cycles.
    3388             :  */
    3389             : void
    3390         742 : PostPrepare_Locks(TransactionId xid)
    3391             : {
    3392         742 :     PGPROC     *newproc = TwoPhaseGetDummyProc(xid, false);
    3393             :     HASH_SEQ_STATUS status;
    3394             :     LOCALLOCK  *locallock;
    3395             :     LOCK       *lock;
    3396             :     PROCLOCK   *proclock;
    3397             :     PROCLOCKTAG proclocktag;
    3398             :     int         partition;
    3399             : 
    3400             :     /* Can't prepare a lock group follower. */
    3401             :     Assert(MyProc->lockGroupLeader == NULL ||
    3402             :            MyProc->lockGroupLeader == MyProc);
    3403             : 
    3404             :     /* This is a critical section: any error means big trouble */
    3405         742 :     START_CRIT_SECTION();
    3406             : 
    3407             :     /*
    3408             :      * First we run through the locallock table and get rid of unwanted
    3409             :      * entries, then we scan the process's proclocks and transfer them to the
    3410             :      * target proc.
    3411             :      *
    3412             :      * We do this separately because we may have multiple locallock entries
    3413             :      * pointing to the same proclock, and we daren't end up with any dangling
    3414             :      * pointers.
    3415             :      */
    3416         742 :     hash_seq_init(&status, LockMethodLocalHash);
    3417             : 
    3418        2484 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    3419             :     {
    3420        1742 :         LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    3421             :         bool        haveSessionLock;
    3422             :         bool        haveXactLock;
    3423             :         int         i;
    3424             : 
    3425        1742 :         if (locallock->proclock == NULL || locallock->lock == NULL)
    3426             :         {
    3427             :             /*
    3428             :              * We must've run out of shared memory while trying to set up this
    3429             :              * lock.  Just forget the local entry.
    3430             :              */
    3431             :             Assert(locallock->nLocks == 0);
    3432           0 :             RemoveLocalLock(locallock);
    3433           0 :             continue;
    3434             :         }
    3435             : 
    3436             :         /* Ignore VXID locks */
    3437        1742 :         if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
    3438           0 :             continue;
    3439             : 
    3440             :         /* Scan to see whether we hold it at session or transaction level */
    3441        1742 :         haveSessionLock = haveXactLock = false;
    3442        3484 :         for (i = locallock->numLockOwners - 1; i >= 0; i--)
    3443             :         {
    3444        1742 :             if (lockOwners[i].owner == NULL)
    3445          14 :                 haveSessionLock = true;
    3446             :             else
    3447        1728 :                 haveXactLock = true;
    3448             :         }
    3449             : 
    3450             :         /* Ignore it if we have only session lock */
    3451        1742 :         if (!haveXactLock)
    3452          14 :             continue;
    3453             : 
    3454             :         /* This can't happen, because we already checked it */
    3455        1728 :         if (haveSessionLock)
    3456           0 :             ereport(PANIC,
    3457             :                     (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    3458             :                      errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
    3459             : 
    3460             :         /* Mark the proclock to show we need to release this lockmode */
    3461        1728 :         if (locallock->nLocks > 0)
    3462        1728 :             locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
    3463             : 
    3464             :         /* And remove the locallock hashtable entry */
    3465        1728 :         RemoveLocalLock(locallock);
    3466             :     }
    3467             : 
    3468             :     /*
    3469             :      * Now, scan each lock partition separately.
    3470             :      */
    3471       12614 :     for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
    3472             :     {
    3473             :         LWLock     *partitionLock;
    3474       11872 :         dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
    3475             :         dlist_mutable_iter proclock_iter;
    3476             : 
    3477       11872 :         partitionLock = LockHashPartitionLockByIndex(partition);
    3478             : 
    3479             :         /*
    3480             :          * If the proclock list for this partition is empty, we can skip
    3481             :          * acquiring the partition lock.  This optimization is safer than the
    3482             :          * situation in LockReleaseAll, because we got rid of any fast-path
    3483             :          * locks during AtPrepare_Locks, so there cannot be any case where
    3484             :          * another backend is adding something to our lists now.  For safety,
    3485             :          * though, we code this the same way as in LockReleaseAll.
    3486             :          */
    3487       11872 :         if (dlist_is_empty(procLocks))
    3488       10250 :             continue;           /* needn't examine this partition */
    3489             : 
    3490        1622 :         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    3491             : 
    3492        3322 :         dlist_foreach_modify(proclock_iter, procLocks)
    3493             :         {
    3494        1700 :             proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
    3495             : 
    3496             :             Assert(proclock->tag.myProc == MyProc);
    3497             : 
    3498        1700 :             lock = proclock->tag.myLock;
    3499             : 
    3500             :             /* Ignore VXID locks */
    3501        1700 :             if (lock->tag.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
    3502          60 :                 continue;
    3503             : 
    3504             :             PROCLOCK_PRINT("PostPrepare_Locks", proclock);
    3505             :             LOCK_PRINT("PostPrepare_Locks", lock, 0);
    3506             :             Assert(lock->nRequested >= 0);
    3507             :             Assert(lock->nGranted >= 0);
    3508             :             Assert(lock->nGranted <= lock->nRequested);
    3509             :             Assert((proclock->holdMask & ~lock->grantMask) == 0);
    3510             : 
    3511             :             /* Ignore it if nothing to release (must be a session lock) */
    3512        1640 :             if (proclock->releaseMask == 0)
    3513          14 :                 continue;
    3514             : 
    3515             :             /* Else we should be releasing all locks */
    3516        1626 :             if (proclock->releaseMask != proclock->holdMask)
    3517           0 :                 elog(PANIC, "we seem to have dropped a bit somewhere");
    3518             : 
    3519             :             /*
    3520             :              * We cannot simply modify proclock->tag.myProc to reassign
    3521             :              * ownership of the lock, because that's part of the hash key and
    3522             :              * the proclock would then be in the wrong hash chain.  Instead
    3523             :              * use hash_update_hash_key.  (We used to create a new hash entry,
    3524             :              * but that risks out-of-memory failure if other processes are
    3525             :              * busy making proclocks too.)  We must unlink the proclock from
    3526             :              * our procLink chain and put it into the new proc's chain, too.
    3527             :              *
    3528             :              * Note: the updated proclock hash key will still belong to the
    3529             :              * same hash partition, cf proclock_hash().  So the partition lock
    3530             :              * we already hold is sufficient for this.
    3531             :              */
    3532        1626 :             dlist_delete(&proclock->procLink);
    3533             : 
    3534             :             /*
    3535             :              * Create the new hash key for the proclock.
    3536             :              */
    3537        1626 :             proclocktag.myLock = lock;
    3538        1626 :             proclocktag.myProc = newproc;
    3539             : 
    3540             :             /*
    3541             :              * Update groupLeader pointer to point to the new proc.  (We'd
    3542             :              * better not be a member of somebody else's lock group!)
    3543             :              */
    3544             :             Assert(proclock->groupLeader == proclock->tag.myProc);
    3545        1626 :             proclock->groupLeader = newproc;
    3546             : 
    3547             :             /*
    3548             :              * Update the proclock.  We should not find any existing entry for
    3549             :              * the same hash key, since there can be only one entry for any
    3550             :              * given lock with my own proc.
    3551             :              */
    3552        1626 :             if (!hash_update_hash_key(LockMethodProcLockHash,
    3553             :                                       proclock,
    3554             :                                       &proclocktag))
    3555           0 :                 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
    3556             : 
    3557             :             /* Re-link into the new proc's proclock list */
    3558        1626 :             dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
    3559             : 
    3560             :             PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
    3561             :         }                       /* loop over PROCLOCKs within this partition */
    3562             : 
    3563        1622 :         LWLockRelease(partitionLock);
    3564             :     }                           /* loop over partitions */
    3565             : 
    3566         742 :     END_CRIT_SECTION();
    3567         742 : }
    3568             : 
    3569             : 
    3570             : /*
    3571             :  * Estimate shared-memory space used for lock tables
    3572             :  */
    3573             : Size
    3574        3326 : LockShmemSize(void)
    3575             : {
    3576        3326 :     Size        size = 0;
    3577             :     long        max_table_size;
    3578             : 
    3579             :     /* lock hash table */
    3580        3326 :     max_table_size = NLOCKENTS();
    3581        3326 :     size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
    3582             : 
    3583             :     /* proclock hash table */
    3584        3326 :     max_table_size *= 2;
    3585        3326 :     size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
    3586             : 
    3587             :     /*
    3588             :      * Since NLOCKENTS is only an estimate, add 10% safety margin.
    3589             :      */
    3590        3326 :     size = add_size(size, size / 10);
    3591             : 
    3592        3326 :     return size;
    3593             : }
    3594             : 
    3595             : /*
    3596             :  * GetLockStatusData - Return a summary of the lock manager's internal
    3597             :  * status, for use in a user-level reporting function.
    3598             :  *
    3599             :  * The return data consists of an array of LockInstanceData objects,
    3600             :  * which are a lightly abstracted version of the PROCLOCK data structures,
    3601             :  * i.e. there is one entry for each unique lock and interested PGPROC.
    3602             :  * It is the caller's responsibility to match up related items (such as
    3603             :  * references to the same lockable object or PGPROC) if wanted.
    3604             :  *
    3605             :  * The design goal is to hold the LWLocks for as short a time as possible;
    3606             :  * thus, this function simply makes a copy of the necessary data and releases
    3607             :  * the locks, allowing the caller to contemplate and format the data for as
    3608             :  * long as it pleases.
    3609             :  */
    3610             : LockData *
    3611         668 : GetLockStatusData(void)
    3612             : {
    3613             :     LockData   *data;
    3614             :     PROCLOCK   *proclock;
    3615             :     HASH_SEQ_STATUS seqstat;
    3616             :     int         els;
    3617             :     int         el;
    3618             :     int         i;
    3619             : 
    3620         668 :     data = (LockData *) palloc(sizeof(LockData));
    3621             : 
    3622             :     /* Guess how much space we'll need. */
    3623         668 :     els = MaxBackends;
    3624         668 :     el = 0;
    3625         668 :     data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
    3626             : 
    3627             :     /*
    3628             :      * First, we iterate through the per-backend fast-path arrays, locking
    3629             :      * them one at a time.  This might produce an inconsistent picture of the
    3630             :      * system state, but taking all of those LWLocks at the same time seems
    3631             :      * impractical (in particular, note MAX_SIMUL_LWLOCKS).  It shouldn't
    3632             :      * matter too much, because none of these locks can be involved in lock
    3633             :      * conflicts anyway - anything that might must be present in the main lock
    3634             :      * table.  (For the same reason, we don't sweat about making leaderPid
    3635             :      * completely valid.  We cannot safely dereference another backend's
    3636             :      * lockGroupLeader field without holding all lock partition locks, and
    3637             :      * it's not worth that.)
    3638             :      */
    3639       72492 :     for (i = 0; i < ProcGlobal->allProcCount; ++i)
    3640             :     {
    3641       71824 :         PGPROC     *proc = &ProcGlobal->allProcs[i];
    3642             :         uint32      f;
    3643             : 
    3644       71824 :         LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
    3645             : 
    3646     1221008 :         for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
    3647             :         {
    3648             :             LockInstanceData *instance;
    3649     1149184 :             uint32      lockbits = FAST_PATH_GET_BITS(proc, f);
    3650             : 
    3651             :             /* Skip unallocated slots. */
    3652     1149184 :             if (!lockbits)
    3653     1143536 :                 continue;
    3654             : 
    3655        5648 :             if (el >= els)
    3656             :             {
    3657           8 :                 els += MaxBackends;
    3658           8 :                 data->locks = (LockInstanceData *)
    3659           8 :                     repalloc(data->locks, sizeof(LockInstanceData) * els);
    3660             :             }
    3661             : 
    3662        5648 :             instance = &data->locks[el];
    3663        5648 :             SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
    3664             :                                  proc->fpRelId[f]);
    3665        5648 :             instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
    3666        5648 :             instance->waitLockMode = NoLock;
    3667        5648 :             instance->vxid.procNumber = proc->vxid.procNumber;
    3668        5648 :             instance->vxid.localTransactionId = proc->vxid.lxid;
    3669        5648 :             instance->pid = proc->pid;
    3670        5648 :             instance->leaderPid = proc->pid;
    3671        5648 :             instance->fastpath = true;
    3672             : 
    3673             :             /*
    3674             :              * Successfully taking fast path lock means there were no
    3675             :              * conflicting locks.
    3676             :              */
    3677        5648 :             instance->waitStart = 0;
    3678             : 
    3679        5648 :             el++;
    3680             :         }
    3681             : 
    3682       71824 :         if (proc->fpVXIDLock)
    3683             :         {
    3684             :             VirtualTransactionId vxid;
    3685             :             LockInstanceData *instance;
    3686             : 
    3687        1850 :             if (el >= els)
    3688             :             {
    3689           6 :                 els += MaxBackends;
    3690           6 :                 data->locks = (LockInstanceData *)
    3691           6 :                     repalloc(data->locks, sizeof(LockInstanceData) * els);
    3692             :             }
    3693             : 
    3694        1850 :             vxid.procNumber = proc->vxid.procNumber;
    3695        1850 :             vxid.localTransactionId = proc->fpLocalTransactionId;
    3696             : 
    3697        1850 :             instance = &data->locks[el];
    3698        1850 :             SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
    3699        1850 :             instance->holdMask = LOCKBIT_ON(ExclusiveLock);
    3700        1850 :             instance->waitLockMode = NoLock;
    3701        1850 :             instance->vxid.procNumber = proc->vxid.procNumber;
    3702        1850 :             instance->vxid.localTransactionId = proc->vxid.lxid;
    3703        1850 :             instance->pid = proc->pid;
    3704        1850 :             instance->leaderPid = proc->pid;
    3705        1850 :             instance->fastpath = true;
    3706        1850 :             instance->waitStart = 0;
    3707             : 
    3708        1850 :             el++;
    3709             :         }
    3710             : 
    3711       71824 :         LWLockRelease(&proc->fpInfoLock);
    3712             :     }
    3713             : 
    3714             :     /*
    3715             :      * Next, acquire lock on the entire shared lock data structure.  We do
    3716             :      * this so that, at least for locks in the primary lock table, the state
    3717             :      * will be self-consistent.
    3718             :      *
    3719             :      * Since this is a read-only operation, we take shared instead of
    3720             :      * exclusive lock.  There's not a whole lot of point to this, because all
    3721             :      * the normal operations require exclusive lock, but it doesn't hurt
    3722             :      * anything either. It will at least allow two backends to do
    3723             :      * GetLockStatusData in parallel.
    3724             :      *
    3725             :      * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
    3726             :      */
    3727       11356 :     for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    3728       10688 :         LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
    3729             : 
    3730             :     /* Now we can safely count the number of proclocks */
    3731         668 :     data->nelements = el + hash_get_num_entries(LockMethodProcLockHash);
    3732         668 :     if (data->nelements > els)
    3733             :     {
    3734          14 :         els = data->nelements;
    3735          14 :         data->locks = (LockInstanceData *)
    3736          14 :             repalloc(data->locks, sizeof(LockInstanceData) * els);
    3737             :     }
    3738             : 
    3739             :     /* Now scan the tables to copy the data */
    3740         668 :     hash_seq_init(&seqstat, LockMethodProcLockHash);
    3741             : 
    3742        4756 :     while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
    3743             :     {
    3744        4088 :         PGPROC     *proc = proclock->tag.myProc;
    3745        4088 :         LOCK       *lock = proclock->tag.myLock;
    3746        4088 :         LockInstanceData *instance = &data->locks[el];
    3747             : 
    3748        4088 :         memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
    3749        4088 :         instance->holdMask = proclock->holdMask;
    3750        4088 :         if (proc->waitLock == proclock->tag.myLock)
    3751          18 :             instance->waitLockMode = proc->waitLockMode;
    3752             :         else
    3753        4070 :             instance->waitLockMode = NoLock;
    3754        4088 :         instance->vxid.procNumber = proc->vxid.procNumber;
    3755        4088 :         instance->vxid.localTransactionId = proc->vxid.lxid;
    3756        4088 :         instance->pid = proc->pid;
    3757        4088 :         instance->leaderPid = proclock->groupLeader->pid;
    3758        4088 :         instance->fastpath = false;
    3759        4088 :         instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
    3760             : 
    3761        4088 :         el++;
    3762             :     }
    3763             : 
    3764             :     /*
    3765             :      * And release locks.  We do this in reverse order for two reasons: (1)
    3766             :      * Anyone else who needs more than one of the locks will be trying to lock
    3767             :      * them in increasing order; we don't want to release the other process
    3768             :      * until it can get all the locks it needs. (2) This avoids O(N^2)
    3769             :      * behavior inside LWLockRelease.
    3770             :      */
    3771       11356 :     for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
    3772       10688 :         LWLockRelease(LockHashPartitionLockByIndex(i));
    3773             : 
    3774             :     Assert(el == data->nelements);
    3775             : 
    3776         668 :     return data;
    3777             : }
    3778             : 
    3779             : /*
    3780             :  * GetBlockerStatusData - Return a summary of the lock manager's state
    3781             :  * concerning locks that are blocking the specified PID or any member of
    3782             :  * the PID's lock group, for use in a user-level reporting function.
    3783             :  *
    3784             :  * For each PID within the lock group that is awaiting some heavyweight lock,
    3785             :  * the return data includes an array of LockInstanceData objects, which are
    3786             :  * the same data structure used by GetLockStatusData; but unlike that function,
    3787             :  * this one reports only the PROCLOCKs associated with the lock that that PID
    3788             :  * is blocked on.  (Hence, all the locktags should be the same for any one
    3789             :  * blocked PID.)  In addition, we return an array of the PIDs of those backends
    3790             :  * that are ahead of the blocked PID in the lock's wait queue.  These can be
    3791             :  * compared with the PIDs in the LockInstanceData objects to determine which
    3792             :  * waiters are ahead of or behind the blocked PID in the queue.
    3793             :  *
    3794             :  * If blocked_pid isn't a valid backend PID or nothing in its lock group is
    3795             :  * waiting on any heavyweight lock, return empty arrays.
    3796             :  *
    3797             :  * The design goal is to hold the LWLocks for as short a time as possible;
    3798             :  * thus, this function simply makes a copy of the necessary data and releases
    3799             :  * the locks, allowing the caller to contemplate and format the data for as
    3800             :  * long as it pleases.
    3801             :  */
    3802             : BlockedProcsData *
    3803        3342 : GetBlockerStatusData(int blocked_pid)
    3804             : {
    3805             :     BlockedProcsData *data;
    3806             :     PGPROC     *proc;
    3807             :     int         i;
    3808             : 
    3809        3342 :     data = (BlockedProcsData *) palloc(sizeof(BlockedProcsData));
    3810             : 
    3811             :     /*
    3812             :      * Guess how much space we'll need, and preallocate.  Most of the time
    3813             :      * this will avoid needing to do repalloc while holding the LWLocks.  (We
    3814             :      * assume, but check with an Assert, that MaxBackends is enough entries
    3815             :      * for the procs[] array; the other two could need enlargement, though.)
    3816             :      */
    3817        3342 :     data->nprocs = data->nlocks = data->npids = 0;
    3818        3342 :     data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
    3819        3342 :     data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
    3820        3342 :     data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
    3821        3342 :     data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
    3822             : 
    3823             :     /*
    3824             :      * In order to search the ProcArray for blocked_pid and assume that that
    3825             :      * entry won't immediately disappear under us, we must hold ProcArrayLock.
    3826             :      * In addition, to examine the lock grouping fields of any other backend,
    3827             :      * we must hold all the hash partition locks.  (Only one of those locks is
    3828             :      * actually relevant for any one lock group, but we can't know which one
    3829             :      * ahead of time.)  It's fairly annoying to hold all those locks
    3830             :      * throughout this, but it's no worse than GetLockStatusData(), and it
    3831             :      * does have the advantage that we're guaranteed to return a
    3832             :      * self-consistent instantaneous state.
    3833             :      */
    3834        3342 :     LWLockAcquire(ProcArrayLock, LW_SHARED);
    3835             : 
    3836        3342 :     proc = BackendPidGetProcWithLock(blocked_pid);
    3837             : 
    3838             :     /* Nothing to do if it's gone */
    3839        3342 :     if (proc != NULL)
    3840             :     {
    3841             :         /*
    3842             :          * Acquire lock on the entire shared lock data structure.  See notes
    3843             :          * in GetLockStatusData().
    3844             :          */
    3845       56814 :         for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    3846       53472 :             LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
    3847             : 
    3848        3342 :         if (proc->lockGroupLeader == NULL)
    3849             :         {
    3850             :             /* Easy case, proc is not a lock group member */
    3851        2876 :             GetSingleProcBlockerStatusData(proc, data);
    3852             :         }
    3853             :         else
    3854             :         {
    3855             :             /* Examine all procs in proc's lock group */
    3856             :             dlist_iter  iter;
    3857             : 
    3858        1028 :             dlist_foreach(iter, &proc->lockGroupLeader->lockGroupMembers)
    3859             :             {
    3860             :                 PGPROC     *memberProc;
    3861             : 
    3862         562 :                 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
    3863         562 :                 GetSingleProcBlockerStatusData(memberProc, data);
    3864             :             }
    3865             :         }
    3866             : 
    3867             :         /*
    3868             :          * And release locks.  See notes in GetLockStatusData().
    3869             :          */
    3870       56814 :         for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
    3871       53472 :             LWLockRelease(LockHashPartitionLockByIndex(i));
    3872             : 
    3873             :         Assert(data->nprocs <= data->maxprocs);
    3874             :     }
    3875             : 
    3876        3342 :     LWLockRelease(ProcArrayLock);
    3877             : 
    3878        3342 :     return data;
    3879             : }
    3880             : 
    3881             : /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
    3882             : static void
    3883        3438 : GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
    3884             : {
    3885        3438 :     LOCK       *theLock = blocked_proc->waitLock;
    3886             :     BlockedProcData *bproc;
    3887             :     dlist_iter  proclock_iter;
    3888             :     dlist_iter  proc_iter;
    3889             :     dclist_head *waitQueue;
    3890             :     int         queue_size;
    3891             : 
    3892             :     /* Nothing to do if this proc is not blocked */
    3893        3438 :     if (theLock == NULL)
    3894        1170 :         return;
    3895             : 
    3896             :     /* Set up a procs[] element */
    3897        2268 :     bproc = &data->procs[data->nprocs++];
    3898        2268 :     bproc->pid = blocked_proc->pid;
    3899        2268 :     bproc->first_lock = data->nlocks;
    3900        2268 :     bproc->first_waiter = data->npids;
    3901             : 
    3902             :     /*
    3903             :      * We may ignore the proc's fast-path arrays, since nothing in those could
    3904             :      * be related to a contended lock.
    3905             :      */
    3906             : 
    3907             :     /* Collect all PROCLOCKs associated with theLock */
    3908        6892 :     dlist_foreach(proclock_iter, &theLock->procLocks)
    3909             :     {
    3910        4624 :         PROCLOCK   *proclock =
    3911        4624 :             dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
    3912        4624 :         PGPROC     *proc = proclock->tag.myProc;
    3913        4624 :         LOCK       *lock = proclock->tag.myLock;
    3914             :         LockInstanceData *instance;
    3915             : 
    3916        4624 :         if (data->nlocks >= data->maxlocks)
    3917             :         {
    3918           0 :             data->maxlocks += MaxBackends;
    3919           0 :             data->locks = (LockInstanceData *)
    3920           0 :                 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
    3921             :         }
    3922             : 
    3923        4624 :         instance = &data->locks[data->nlocks];
    3924        4624 :         memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
    3925        4624 :         instance->holdMask = proclock->holdMask;
    3926        4624 :         if (proc->waitLock == lock)
    3927        2340 :             instance->waitLockMode = proc->waitLockMode;
    3928             :         else
    3929        2284 :             instance->waitLockMode = NoLock;
    3930        4624 :         instance->vxid.procNumber = proc->vxid.procNumber;
    3931        4624 :         instance->vxid.localTransactionId = proc->vxid.lxid;
    3932        4624 :         instance->pid = proc->pid;
    3933        4624 :         instance->leaderPid = proclock->groupLeader->pid;
    3934        4624 :         instance->fastpath = false;
    3935        4624 :         data->nlocks++;
    3936             :     }
    3937             : 
    3938             :     /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
    3939        2268 :     waitQueue = &(theLock->waitProcs);
    3940        2268 :     queue_size = dclist_count(waitQueue);
    3941             : 
    3942        2268 :     if (queue_size > data->maxpids - data->npids)
    3943             :     {
    3944           0 :         data->maxpids = Max(data->maxpids + MaxBackends,
    3945             :                             data->npids + queue_size);
    3946           0 :         data->waiter_pids = (int *) repalloc(data->waiter_pids,
    3947           0 :                                              sizeof(int) * data->maxpids);
    3948             :     }
    3949             : 
    3950             :     /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
    3951        2304 :     dclist_foreach(proc_iter, waitQueue)
    3952             :     {
    3953        2304 :         PGPROC     *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
    3954             : 
    3955        2304 :         if (queued_proc == blocked_proc)
    3956        2268 :             break;
    3957          36 :         data->waiter_pids[data->npids++] = queued_proc->pid;
    3958          36 :         queued_proc = (PGPROC *) queued_proc->links.next;
    3959             :     }
    3960             : 
    3961        2268 :     bproc->num_locks = data->nlocks - bproc->first_lock;
    3962        2268 :     bproc->num_waiters = data->npids - bproc->first_waiter;
    3963             : }
    3964             : 
    3965             : /*
    3966             :  * Returns a list of currently held AccessExclusiveLocks, for use by
    3967             :  * LogStandbySnapshot().  The result is a palloc'd array,
    3968             :  * with the number of elements returned into *nlocks.
    3969             :  *
    3970             :  * XXX This currently takes a lock on all partitions of the lock table,
    3971             :  * but it's possible to do better.  By reference counting locks and storing
    3972             :  * the value in the ProcArray entry for each backend we could tell if any
    3973             :  * locks need recording without having to acquire the partition locks and
    3974             :  * scan the lock table.  Whether that's worth the additional overhead
    3975             :  * is pretty dubious though.
    3976             :  */
    3977             : xl_standby_lock *
    3978        1508 : GetRunningTransactionLocks(int *nlocks)
    3979             : {
    3980             :     xl_standby_lock *accessExclusiveLocks;
    3981             :     PROCLOCK   *proclock;
    3982             :     HASH_SEQ_STATUS seqstat;
    3983             :     int         i;
    3984             :     int         index;
    3985             :     int         els;
    3986             : 
    3987             :     /*
    3988             :      * Acquire lock on the entire shared lock data structure.
    3989             :      *
    3990             :      * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
    3991             :      */
    3992       25636 :     for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    3993       24128 :         LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
    3994             : 
    3995             :     /* Now we can safely count the number of proclocks */
    3996        1508 :     els = hash_get_num_entries(LockMethodProcLockHash);
    3997             : 
    3998             :     /*
    3999             :      * Allocating enough space for all locks in the lock table is overkill,
    4000             :      * but it's more convenient and faster than having to enlarge the array.
    4001             :      */
    4002        1508 :     accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
    4003             : 
    4004             :     /* Now scan the tables to copy the data */
    4005        1508 :     hash_seq_init(&seqstat, LockMethodProcLockHash);
    4006             : 
    4007             :     /*
    4008             :      * If lock is a currently granted AccessExclusiveLock then it will have
    4009             :      * just one proclock holder, so locks are never accessed twice in this
    4010             :      * particular case. Don't copy this code for use elsewhere because in the
    4011             :      * general case this will give you duplicate locks when looking at
    4012             :      * non-exclusive lock types.
    4013             :      */
    4014        1508 :     index = 0;
    4015        3870 :     while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
    4016             :     {
    4017             :         /* make sure this definition matches the one used in LockAcquire */
    4018        2362 :         if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
    4019         918 :             proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
    4020             :         {
    4021         502 :             PGPROC     *proc = proclock->tag.myProc;
    4022         502 :             LOCK       *lock = proclock->tag.myLock;
    4023         502 :             TransactionId xid = proc->xid;
    4024             : 
    4025             :             /*
    4026             :              * Don't record locks for transactions if we know they have
    4027             :              * already issued their WAL record for commit but not yet released
    4028             :              * lock. It is still possible that we see locks held by already
    4029             :              * complete transactions, if they haven't yet zeroed their xids.
    4030             :              */
    4031         502 :             if (!TransactionIdIsValid(xid))
    4032           4 :                 continue;
    4033             : 
    4034         498 :             accessExclusiveLocks[index].xid = xid;
    4035         498 :             accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
    4036         498 :             accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
    4037             : 
    4038         498 :             index++;
    4039             :         }
    4040             :     }
    4041             : 
    4042             :     Assert(index <= els);
    4043             : 
    4044             :     /*
    4045             :      * And release locks.  We do this in reverse order for two reasons: (1)
    4046             :      * Anyone else who needs more than one of the locks will be trying to lock
    4047             :      * them in increasing order; we don't want to release the other process
    4048             :      * until it can get all the locks it needs. (2) This avoids O(N^2)
    4049             :      * behavior inside LWLockRelease.
    4050             :      */
    4051       25636 :     for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
    4052       24128 :         LWLockRelease(LockHashPartitionLockByIndex(i));
    4053             : 
    4054        1508 :     *nlocks = index;
    4055        1508 :     return accessExclusiveLocks;
    4056             : }
    4057             : 
    4058             : /* Provide the textual name of any lock mode */
    4059             : const char *
    4060       12056 : GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
    4061             : {
    4062             :     Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
    4063             :     Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
    4064       12056 :     return LockMethods[lockmethodid]->lockModeNames[mode];
    4065             : }
    4066             : 
    4067             : #ifdef LOCK_DEBUG
    4068             : /*
    4069             :  * Dump all locks in the given proc's myProcLocks lists.
    4070             :  *
    4071             :  * Caller is responsible for having acquired appropriate LWLocks.
    4072             :  */
    4073             : void
    4074             : DumpLocks(PGPROC *proc)
    4075             : {
    4076             :     int         i;
    4077             : 
    4078             :     if (proc == NULL)
    4079             :         return;
    4080             : 
    4081             :     if (proc->waitLock)
    4082             :         LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
    4083             : 
    4084             :     for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    4085             :     {
    4086             :         dlist_head *procLocks = &proc->myProcLocks[i];
    4087             :         dlist_iter  iter;
    4088             : 
    4089             :         dlist_foreach(iter, procLocks)
    4090             :         {
    4091             :             PROCLOCK   *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
    4092             :             LOCK       *lock = proclock->tag.myLock;
    4093             : 
    4094             :             Assert(proclock->tag.myProc == proc);
    4095             :             PROCLOCK_PRINT("DumpLocks", proclock);
    4096             :             LOCK_PRINT("DumpLocks", lock, 0);
    4097             :         }
    4098             :     }
    4099             : }
    4100             : 
    4101             : /*
    4102             :  * Dump all lmgr locks.
    4103             :  *
    4104             :  * Caller is responsible for having acquired appropriate LWLocks.
    4105             :  */
    4106             : void
    4107             : DumpAllLocks(void)
    4108             : {
    4109             :     PGPROC     *proc;
    4110             :     PROCLOCK   *proclock;
    4111             :     LOCK       *lock;
    4112             :     HASH_SEQ_STATUS status;
    4113             : 
    4114             :     proc = MyProc;
    4115             : 
    4116             :     if (proc && proc->waitLock)
    4117             :         LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
    4118             : 
    4119             :     hash_seq_init(&status, LockMethodProcLockHash);
    4120             : 
    4121             :     while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
    4122             :     {
    4123             :         PROCLOCK_PRINT("DumpAllLocks", proclock);
    4124             : 
    4125             :         lock = proclock->tag.myLock;
    4126             :         if (lock)
    4127             :             LOCK_PRINT("DumpAllLocks", lock, 0);
    4128             :         else
    4129             :             elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
    4130             :     }
    4131             : }
    4132             : #endif                          /* LOCK_DEBUG */
    4133             : 
    4134             : /*
    4135             :  * LOCK 2PC resource manager's routines
    4136             :  */
    4137             : 
    4138             : /*
    4139             :  * Re-acquire a lock belonging to a transaction that was prepared.
    4140             :  *
    4141             :  * Because this function is run at db startup, re-acquiring the locks should
    4142             :  * never conflict with running transactions because there are none.  We
    4143             :  * assume that the lock state represented by the stored 2PC files is legal.
    4144             :  *
    4145             :  * When switching from Hot Standby mode to normal operation, the locks will
    4146             :  * be already held by the startup process. The locks are acquired for the new
    4147             :  * procs without checking for conflicts, so we don't get a conflict between the
    4148             :  * startup process and the dummy procs, even though we will momentarily have
    4149             :  * a situation where two procs are holding the same AccessExclusiveLock,
    4150             :  * which isn't normally possible because the conflict. If we're in standby
    4151             :  * mode, but a recovery snapshot hasn't been established yet, it's possible
    4152             :  * that some but not all of the locks are already held by the startup process.
    4153             :  *
    4154             :  * This approach is simple, but also a bit dangerous, because if there isn't
    4155             :  * enough shared memory to acquire the locks, an error will be thrown, which
    4156             :  * is promoted to FATAL and recovery will abort, bringing down postmaster.
    4157             :  * A safer approach would be to transfer the locks like we do in
    4158             :  * AtPrepare_Locks, but then again, in hot standby mode it's possible for
    4159             :  * read-only backends to use up all the shared lock memory anyway, so that
    4160             :  * replaying the WAL record that needs to acquire a lock will throw an error
    4161             :  * and PANIC anyway.
    4162             :  */
    4163             : void
    4164         176 : lock_twophase_recover(TransactionId xid, uint16 info,
    4165             :                       void *recdata, uint32 len)
    4166             : {
    4167         176 :     TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
    4168         176 :     PGPROC     *proc = TwoPhaseGetDummyProc(xid, false);
    4169             :     LOCKTAG    *locktag;
    4170             :     LOCKMODE    lockmode;
    4171             :     LOCKMETHODID lockmethodid;
    4172             :     LOCK       *lock;
    4173             :     PROCLOCK   *proclock;
    4174             :     PROCLOCKTAG proclocktag;
    4175             :     bool        found;
    4176             :     uint32      hashcode;
    4177             :     uint32      proclock_hashcode;
    4178             :     int         partition;
    4179             :     LWLock     *partitionLock;
    4180             :     LockMethod  lockMethodTable;
    4181             : 
    4182             :     Assert(len == sizeof(TwoPhaseLockRecord));
    4183         176 :     locktag = &rec->locktag;
    4184         176 :     lockmode = rec->lockmode;
    4185         176 :     lockmethodid = locktag->locktag_lockmethodid;
    4186             : 
    4187         176 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    4188           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    4189         176 :     lockMethodTable = LockMethods[lockmethodid];
    4190             : 
    4191         176 :     hashcode = LockTagHashCode(locktag);
    4192         176 :     partition = LockHashPartition(hashcode);
    4193         176 :     partitionLock = LockHashPartitionLock(hashcode);
    4194             : 
    4195         176 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    4196             : 
    4197             :     /*
    4198             :      * Find or create a lock with this tag.
    4199             :      */
    4200         176 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    4201             :                                                 locktag,
    4202             :                                                 hashcode,
    4203             :                                                 HASH_ENTER_NULL,
    4204             :                                                 &found);
    4205         176 :     if (!lock)
    4206             :     {
    4207           0 :         LWLockRelease(partitionLock);
    4208           0 :         ereport(ERROR,
    4209             :                 (errcode(ERRCODE_OUT_OF_MEMORY),
    4210             :                  errmsg("out of shared memory"),
    4211             :                  errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
    4212             :     }
    4213             : 
    4214             :     /*
    4215             :      * if it's a new lock object, initialize it
    4216             :      */
    4217         176 :     if (!found)
    4218             :     {
    4219         152 :         lock->grantMask = 0;
    4220         152 :         lock->waitMask = 0;
    4221         152 :         dlist_init(&lock->procLocks);
    4222         152 :         dclist_init(&lock->waitProcs);
    4223         152 :         lock->nRequested = 0;
    4224         152 :         lock->nGranted = 0;
    4225         912 :         MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
    4226         152 :         MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
    4227             :         LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
    4228             :     }
    4229             :     else
    4230             :     {
    4231             :         LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
    4232             :         Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
    4233             :         Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
    4234             :         Assert(lock->nGranted <= lock->nRequested);
    4235             :     }
    4236             : 
    4237             :     /*
    4238             :      * Create the hash key for the proclock table.
    4239             :      */
    4240         176 :     proclocktag.myLock = lock;
    4241         176 :     proclocktag.myProc = proc;
    4242             : 
    4243         176 :     proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
    4244             : 
    4245             :     /*
    4246             :      * Find or create a proclock entry with this tag
    4247             :      */
    4248         176 :     proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
    4249             :                                                         &proclocktag,
    4250             :                                                         proclock_hashcode,
    4251             :                                                         HASH_ENTER_NULL,
    4252             :                                                         &found);
    4253         176 :     if (!proclock)
    4254             :     {
    4255             :         /* Oops, not enough shmem for the proclock */
    4256           0 :         if (lock->nRequested == 0)
    4257             :         {
    4258             :             /*
    4259             :              * There are no other requestors of this lock, so garbage-collect
    4260             :              * the lock object.  We *must* do this to avoid a permanent leak
    4261             :              * of shared memory, because there won't be anything to cause
    4262             :              * anyone to release the lock object later.
    4263             :              */
    4264             :             Assert(dlist_is_empty(&lock->procLocks));
    4265           0 :             if (!hash_search_with_hash_value(LockMethodLockHash,
    4266           0 :                                              &(lock->tag),
    4267             :                                              hashcode,
    4268             :                                              HASH_REMOVE,
    4269             :                                              NULL))
    4270           0 :                 elog(PANIC, "lock table corrupted");
    4271             :         }
    4272           0 :         LWLockRelease(partitionLock);
    4273           0 :         ereport(ERROR,
    4274             :                 (errcode(ERRCODE_OUT_OF_MEMORY),
    4275             :                  errmsg("out of shared memory"),
    4276             :                  errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
    4277             :     }
    4278             : 
    4279             :     /*
    4280             :      * If new, initialize the new entry
    4281             :      */
    4282         176 :     if (!found)
    4283             :     {
    4284             :         Assert(proc->lockGroupLeader == NULL);
    4285         160 :         proclock->groupLeader = proc;
    4286         160 :         proclock->holdMask = 0;
    4287         160 :         proclock->releaseMask = 0;
    4288             :         /* Add proclock to appropriate lists */
    4289         160 :         dlist_push_tail(&lock->procLocks, &proclock->lockLink);
    4290         160 :         dlist_push_tail(&proc->myProcLocks[partition],
    4291             :                         &proclock->procLink);
    4292             :         PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
    4293             :     }
    4294             :     else
    4295             :     {
    4296             :         PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
    4297             :         Assert((proclock->holdMask & ~lock->grantMask) == 0);
    4298             :     }
    4299             : 
    4300             :     /*
    4301             :      * lock->nRequested and lock->requested[] count the total number of
    4302             :      * requests, whether granted or waiting, so increment those immediately.
    4303             :      */
    4304         176 :     lock->nRequested++;
    4305         176 :     lock->requested[lockmode]++;
    4306             :     Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
    4307             : 
    4308             :     /*
    4309             :      * We shouldn't already hold the desired lock.
    4310             :      */
    4311         176 :     if (proclock->holdMask & LOCKBIT_ON(lockmode))
    4312           0 :         elog(ERROR, "lock %s on object %u/%u/%u is already held",
    4313             :              lockMethodTable->lockModeNames[lockmode],
    4314             :              lock->tag.locktag_field1, lock->tag.locktag_field2,
    4315             :              lock->tag.locktag_field3);
    4316             : 
    4317             :     /*
    4318             :      * We ignore any possible conflicts and just grant ourselves the lock. Not
    4319             :      * only because we don't bother, but also to avoid deadlocks when
    4320             :      * switching from standby to normal mode. See function comment.
    4321             :      */
    4322         176 :     GrantLock(lock, proclock, lockmode);
    4323             : 
    4324             :     /*
    4325             :      * Bump strong lock count, to make sure any fast-path lock requests won't
    4326             :      * be granted without consulting the primary lock table.
    4327             :      */
    4328         176 :     if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
    4329             :     {
    4330          36 :         uint32      fasthashcode = FastPathStrongLockHashPartition(hashcode);
    4331             : 
    4332          36 :         SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    4333          36 :         FastPathStrongRelationLocks->count[fasthashcode]++;
    4334          36 :         SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    4335             :     }
    4336             : 
    4337         176 :     LWLockRelease(partitionLock);
    4338         176 : }
    4339             : 
    4340             : /*
    4341             :  * Re-acquire a lock belonging to a transaction that was prepared, when
    4342             :  * starting up into hot standby mode.
    4343             :  */
    4344             : void
    4345           0 : lock_twophase_standby_recover(TransactionId xid, uint16 info,
    4346             :                               void *recdata, uint32 len)
    4347             : {
    4348           0 :     TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
    4349             :     LOCKTAG    *locktag;
    4350             :     LOCKMODE    lockmode;
    4351             :     LOCKMETHODID lockmethodid;
    4352             : 
    4353             :     Assert(len == sizeof(TwoPhaseLockRecord));
    4354           0 :     locktag = &rec->locktag;
    4355           0 :     lockmode = rec->lockmode;
    4356           0 :     lockmethodid = locktag->locktag_lockmethodid;
    4357             : 
    4358           0 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    4359           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    4360             : 
    4361           0 :     if (lockmode == AccessExclusiveLock &&
    4362           0 :         locktag->locktag_type == LOCKTAG_RELATION)
    4363             :     {
    4364           0 :         StandbyAcquireAccessExclusiveLock(xid,
    4365             :                                           locktag->locktag_field1 /* dboid */ ,
    4366             :                                           locktag->locktag_field2 /* reloid */ );
    4367             :     }
    4368           0 : }
    4369             : 
    4370             : 
    4371             : /*
    4372             :  * 2PC processing routine for COMMIT PREPARED case.
    4373             :  *
    4374             :  * Find and release the lock indicated by the 2PC record.
    4375             :  */
    4376             : void
    4377        1750 : lock_twophase_postcommit(TransactionId xid, uint16 info,
    4378             :                          void *recdata, uint32 len)
    4379             : {
    4380        1750 :     TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
    4381        1750 :     PGPROC     *proc = TwoPhaseGetDummyProc(xid, true);
    4382             :     LOCKTAG    *locktag;
    4383             :     LOCKMETHODID lockmethodid;
    4384             :     LockMethod  lockMethodTable;
    4385             : 
    4386             :     Assert(len == sizeof(TwoPhaseLockRecord));
    4387        1750 :     locktag = &rec->locktag;
    4388        1750 :     lockmethodid = locktag->locktag_lockmethodid;
    4389             : 
    4390        1750 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    4391           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    4392        1750 :     lockMethodTable = LockMethods[lockmethodid];
    4393             : 
    4394        1750 :     LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
    4395        1750 : }
    4396             : 
    4397             : /*
    4398             :  * 2PC processing routine for ROLLBACK PREPARED case.
    4399             :  *
    4400             :  * This is actually just the same as the COMMIT case.
    4401             :  */
    4402             : void
    4403         244 : lock_twophase_postabort(TransactionId xid, uint16 info,
    4404             :                         void *recdata, uint32 len)
    4405             : {
    4406         244 :     lock_twophase_postcommit(xid, info, recdata, len);
    4407         244 : }
    4408             : 
    4409             : /*
    4410             :  *      VirtualXactLockTableInsert
    4411             :  *
    4412             :  *      Take vxid lock via the fast-path.  There can't be any pre-existing
    4413             :  *      lockers, as we haven't advertised this vxid via the ProcArray yet.
    4414             :  *
    4415             :  *      Since MyProc->fpLocalTransactionId will normally contain the same data
    4416             :  *      as MyProc->vxid.lxid, you might wonder if we really need both.  The
    4417             :  *      difference is that MyProc->vxid.lxid is set and cleared unlocked, and
    4418             :  *      examined by procarray.c, while fpLocalTransactionId is protected by
    4419             :  *      fpInfoLock and is used only by the locking subsystem.  Doing it this
    4420             :  *      way makes it easier to verify that there are no funny race conditions.
    4421             :  *
    4422             :  *      We don't bother recording this lock in the local lock table, since it's
    4423             :  *      only ever released at the end of a transaction.  Instead,
    4424             :  *      LockReleaseAll() calls VirtualXactLockTableCleanup().
    4425             :  */
    4426             : void
    4427      561304 : VirtualXactLockTableInsert(VirtualTransactionId vxid)
    4428             : {
    4429             :     Assert(VirtualTransactionIdIsValid(vxid));
    4430             : 
    4431      561304 :     LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    4432             : 
    4433             :     Assert(MyProc->vxid.procNumber == vxid.procNumber);
    4434             :     Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId);
    4435             :     Assert(MyProc->fpVXIDLock == false);
    4436             : 
    4437      561304 :     MyProc->fpVXIDLock = true;
    4438      561304 :     MyProc->fpLocalTransactionId = vxid.localTransactionId;
    4439             : 
    4440      561304 :     LWLockRelease(&MyProc->fpInfoLock);
    4441      561304 : }
    4442             : 
    4443             : /*
    4444             :  *      VirtualXactLockTableCleanup
    4445             :  *
    4446             :  *      Check whether a VXID lock has been materialized; if so, release it,
    4447             :  *      unblocking waiters.
    4448             :  */
    4449             : void
    4450      562034 : VirtualXactLockTableCleanup(void)
    4451             : {
    4452             :     bool        fastpath;
    4453             :     LocalTransactionId lxid;
    4454             : 
    4455             :     Assert(MyProc->vxid.procNumber != INVALID_PROC_NUMBER);
    4456             : 
    4457             :     /*
    4458             :      * Clean up shared memory state.
    4459             :      */
    4460      562034 :     LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    4461             : 
    4462      562034 :     fastpath = MyProc->fpVXIDLock;
    4463      562034 :     lxid = MyProc->fpLocalTransactionId;
    4464      562034 :     MyProc->fpVXIDLock = false;
    4465      562034 :     MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
    4466             : 
    4467      562034 :     LWLockRelease(&MyProc->fpInfoLock);
    4468             : 
    4469             :     /*
    4470             :      * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
    4471             :      * that means someone transferred the lock to the main lock table.
    4472             :      */
    4473      562034 :     if (!fastpath && LocalTransactionIdIsValid(lxid))
    4474             :     {
    4475             :         VirtualTransactionId vxid;
    4476             :         LOCKTAG     locktag;
    4477             : 
    4478         524 :         vxid.procNumber = MyProcNumber;
    4479         524 :         vxid.localTransactionId = lxid;
    4480         524 :         SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
    4481             : 
    4482         524 :         LockRefindAndRelease(LockMethods[DEFAULT_LOCKMETHOD], MyProc,
    4483             :                              &locktag, ExclusiveLock, false);
    4484             :     }
    4485      562034 : }
    4486             : 
    4487             : /*
    4488             :  *      XactLockForVirtualXact
    4489             :  *
    4490             :  * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
    4491             :  * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid).  Unlike those
    4492             :  * functions, it assumes "xid" is never a subtransaction and that "xid" is
    4493             :  * prepared, committed, or aborted.
    4494             :  *
    4495             :  * If !TransactionIdIsValid(xid), this locks every prepared XID having been
    4496             :  * known as "vxid" before its PREPARE TRANSACTION.
    4497             :  */
    4498             : static bool
    4499         544 : XactLockForVirtualXact(VirtualTransactionId vxid,
    4500             :                        TransactionId xid, bool wait)
    4501             : {
    4502         544 :     bool        more = false;
    4503             : 
    4504             :     /* There is no point to wait for 2PCs if you have no 2PCs. */
    4505         544 :     if (max_prepared_xacts == 0)
    4506         176 :         return true;
    4507             : 
    4508             :     do
    4509             :     {
    4510             :         LockAcquireResult lar;
    4511             :         LOCKTAG     tag;
    4512             : 
    4513             :         /* Clear state from previous iterations. */
    4514         368 :         if (more)
    4515             :         {
    4516           0 :             xid = InvalidTransactionId;
    4517           0 :             more = false;
    4518             :         }
    4519             : 
    4520             :         /* If we have no xid, try to find one. */
    4521         368 :         if (!TransactionIdIsValid(xid))
    4522         196 :             xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
    4523         368 :         if (!TransactionIdIsValid(xid))
    4524             :         {
    4525             :             Assert(!more);
    4526         166 :             return true;
    4527             :         }
    4528             : 
    4529             :         /* Check or wait for XID completion. */
    4530         202 :         SET_LOCKTAG_TRANSACTION(tag, xid);
    4531         202 :         lar = LockAcquire(&tag, ShareLock, false, !wait);
    4532         202 :         if (lar == LOCKACQUIRE_NOT_AVAIL)
    4533           0 :             return false;
    4534         202 :         LockRelease(&tag, ShareLock, false);
    4535         202 :     } while (more);
    4536             : 
    4537         202 :     return true;
    4538             : }
    4539             : 
    4540             : /*
    4541             :  *      VirtualXactLock
    4542             :  *
    4543             :  * If wait = true, wait as long as the given VXID or any XID acquired by the
    4544             :  * same transaction is still running.  Then, return true.
    4545             :  *
    4546             :  * If wait = false, just check whether that VXID or one of those XIDs is still
    4547             :  * running, and return true or false.
    4548             :  */
    4549             : bool
    4550         624 : VirtualXactLock(VirtualTransactionId vxid, bool wait)
    4551             : {
    4552             :     LOCKTAG     tag;
    4553             :     PGPROC     *proc;
    4554         624 :     TransactionId xid = InvalidTransactionId;
    4555             : 
    4556             :     Assert(VirtualTransactionIdIsValid(vxid));
    4557             : 
    4558         624 :     if (VirtualTransactionIdIsRecoveredPreparedXact(vxid))
    4559             :         /* no vxid lock; localTransactionId is a normal, locked XID */
    4560           2 :         return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
    4561             : 
    4562         622 :     SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
    4563             : 
    4564             :     /*
    4565             :      * If a lock table entry must be made, this is the PGPROC on whose behalf
    4566             :      * it must be done.  Note that the transaction might end or the PGPROC
    4567             :      * might be reassigned to a new backend before we get around to examining
    4568             :      * it, but it doesn't matter.  If we find upon examination that the
    4569             :      * relevant lxid is no longer running here, that's enough to prove that
    4570             :      * it's no longer running anywhere.
    4571             :      */
    4572         622 :     proc = ProcNumberGetProc(vxid.procNumber);
    4573         622 :     if (proc == NULL)
    4574           8 :         return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
    4575             : 
    4576             :     /*
    4577             :      * We must acquire this lock before checking the procNumber and lxid
    4578             :      * against the ones we're waiting for.  The target backend will only set
    4579             :      * or clear lxid while holding this lock.
    4580             :      */
    4581         614 :     LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
    4582             : 
    4583         614 :     if (proc->vxid.procNumber != vxid.procNumber
    4584         614 :         || proc->fpLocalTransactionId != vxid.localTransactionId)
    4585             :     {
    4586             :         /* VXID ended */
    4587          48 :         LWLockRelease(&proc->fpInfoLock);
    4588          48 :         return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
    4589             :     }
    4590             : 
    4591             :     /*
    4592             :      * If we aren't asked to wait, there's no need to set up a lock table
    4593             :      * entry.  The transaction is still in progress, so just return false.
    4594             :      */
    4595         566 :     if (!wait)
    4596             :     {
    4597          30 :         LWLockRelease(&proc->fpInfoLock);
    4598          30 :         return false;
    4599             :     }
    4600             : 
    4601             :     /*
    4602             :      * OK, we're going to need to sleep on the VXID.  But first, we must set
    4603             :      * up the primary lock table entry, if needed (ie, convert the proc's
    4604             :      * fast-path lock on its VXID to a regular lock).
    4605             :      */
    4606         536 :     if (proc->fpVXIDLock)
    4607             :     {
    4608             :         PROCLOCK   *proclock;
    4609             :         uint32      hashcode;
    4610             :         LWLock     *partitionLock;
    4611             : 
    4612         524 :         hashcode = LockTagHashCode(&tag);
    4613             : 
    4614         524 :         partitionLock = LockHashPartitionLock(hashcode);
    4615         524 :         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    4616             : 
    4617         524 :         proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc,
    4618             :                                     &tag, hashcode, ExclusiveLock);
    4619         524 :         if (!proclock)
    4620             :         {
    4621           0 :             LWLockRelease(partitionLock);
    4622           0 :             LWLockRelease(&proc->fpInfoLock);
    4623           0 :             ereport(ERROR,
    4624             :                     (errcode(ERRCODE_OUT_OF_MEMORY),
    4625             :                      errmsg("out of shared memory"),
    4626             :                      errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
    4627             :         }
    4628         524 :         GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
    4629             : 
    4630         524 :         LWLockRelease(partitionLock);
    4631             : 
    4632         524 :         proc->fpVXIDLock = false;
    4633             :     }
    4634             : 
    4635             :     /*
    4636             :      * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
    4637             :      * search.  The proc might have assigned this XID but not yet locked it,
    4638             :      * in which case the proc will lock this XID before releasing the VXID.
    4639             :      * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
    4640             :      * so we won't save an XID of a different VXID.  It doesn't matter whether
    4641             :      * we save this before or after setting up the primary lock table entry.
    4642             :      */
    4643         536 :     xid = proc->xid;
    4644             : 
    4645             :     /* Done with proc->fpLockBits */
    4646         536 :     LWLockRelease(&proc->fpInfoLock);
    4647             : 
    4648             :     /* Time to wait. */
    4649         536 :     (void) LockAcquire(&tag, ShareLock, false, false);
    4650             : 
    4651         486 :     LockRelease(&tag, ShareLock, false);
    4652         486 :     return XactLockForVirtualXact(vxid, xid, wait);
    4653             : }
    4654             : 
    4655             : /*
    4656             :  * LockWaiterCount
    4657             :  *
    4658             :  * Find the number of lock requester on this locktag
    4659             :  */
    4660             : int
    4661      112304 : LockWaiterCount(const LOCKTAG *locktag)
    4662             : {
    4663      112304 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
    4664             :     LOCK       *lock;
    4665             :     bool        found;
    4666             :     uint32      hashcode;
    4667             :     LWLock     *partitionLock;
    4668      112304 :     int         waiters = 0;
    4669             : 
    4670      112304 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    4671           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    4672             : 
    4673      112304 :     hashcode = LockTagHashCode(locktag);
    4674      112304 :     partitionLock = LockHashPartitionLock(hashcode);
    4675      112304 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    4676             : 
    4677      112304 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    4678             :                                                 locktag,
    4679             :                                                 hashcode,
    4680             :                                                 HASH_FIND,
    4681             :                                                 &found);
    4682      112304 :     if (found)
    4683             :     {
    4684             :         Assert(lock != NULL);
    4685          44 :         waiters = lock->nRequested;
    4686             :     }
    4687      112304 :     LWLockRelease(partitionLock);
    4688             : 
    4689      112304 :     return waiters;
    4690             : }

Generated by: LCOV version 1.14