LCOV - code coverage report
Current view: top level - src/backend/storage/lmgr - lock.c (source / functions) Hit Total Coverage
Test: PostgreSQL 13beta1 Lines: 1027 1205 85.2 %
Date: 2020-06-01 09:07:10 Functions: 52 55 94.5 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * lock.c
       4             :  *    POSTGRES primary lock mechanism
       5             :  *
       6             :  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/storage/lmgr/lock.c
      12             :  *
      13             :  * NOTES
      14             :  *    A lock table is a shared memory hash table.  When
      15             :  *    a process tries to acquire a lock of a type that conflicts
      16             :  *    with existing locks, it is put to sleep using the routines
      17             :  *    in storage/lmgr/proc.c.
      18             :  *
      19             :  *    For the most part, this code should be invoked via lmgr.c
      20             :  *    or another lock-management module, not directly.
      21             :  *
      22             :  *  Interface:
      23             :  *
      24             :  *  InitLocks(), GetLocksMethodTable(), GetLockTagsMethodTable(),
      25             :  *  LockAcquire(), LockRelease(), LockReleaseAll(),
      26             :  *  LockCheckConflicts(), GrantLock()
      27             :  *
      28             :  *-------------------------------------------------------------------------
      29             :  */
      30             : #include "postgres.h"
      31             : 
      32             : #include <signal.h>
      33             : #include <unistd.h>
      34             : 
      35             : #include "access/transam.h"
      36             : #include "access/twophase.h"
      37             : #include "access/twophase_rmgr.h"
      38             : #include "access/xact.h"
      39             : #include "access/xlog.h"
      40             : #include "miscadmin.h"
      41             : #include "pg_trace.h"
      42             : #include "pgstat.h"
      43             : #include "storage/proc.h"
      44             : #include "storage/procarray.h"
      45             : #include "storage/sinvaladt.h"
      46             : #include "storage/spin.h"
      47             : #include "storage/standby.h"
      48             : #include "utils/memutils.h"
      49             : #include "utils/ps_status.h"
      50             : #include "utils/resowner_private.h"
      51             : 
      52             : 
      53             : /* This configuration variable is used to set the lock table size */
      54             : int         max_locks_per_xact; /* set by guc.c */
      55             : 
      56             : #define NLOCKENTS() \
      57             :     mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
      58             : 
      59             : 
      60             : /*
      61             :  * Data structures defining the semantics of the standard lock methods.
      62             :  *
      63             :  * The conflict table defines the semantics of the various lock modes.
      64             :  */
      65             : static const LOCKMASK LockConflicts[] = {
      66             :     0,
      67             : 
      68             :     /* AccessShareLock */
      69             :     LOCKBIT_ON(AccessExclusiveLock),
      70             : 
      71             :     /* RowShareLock */
      72             :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      73             : 
      74             :     /* RowExclusiveLock */
      75             :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
      76             :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      77             : 
      78             :     /* ShareUpdateExclusiveLock */
      79             :     LOCKBIT_ON(ShareUpdateExclusiveLock) |
      80             :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
      81             :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      82             : 
      83             :     /* ShareLock */
      84             :     LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
      85             :     LOCKBIT_ON(ShareRowExclusiveLock) |
      86             :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      87             : 
      88             :     /* ShareRowExclusiveLock */
      89             :     LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
      90             :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
      91             :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      92             : 
      93             :     /* ExclusiveLock */
      94             :     LOCKBIT_ON(RowShareLock) |
      95             :     LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
      96             :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
      97             :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      98             : 
      99             :     /* AccessExclusiveLock */
     100             :     LOCKBIT_ON(AccessShareLock) | LOCKBIT_ON(RowShareLock) |
     101             :     LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
     102             :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
     103             :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock)
     104             : 
     105             : };
     106             : 
     107             : /* Names of lock modes, for debug printouts */
     108             : static const char *const lock_mode_names[] =
     109             : {
     110             :     "INVALID",
     111             :     "AccessShareLock",
     112             :     "RowShareLock",
     113             :     "RowExclusiveLock",
     114             :     "ShareUpdateExclusiveLock",
     115             :     "ShareLock",
     116             :     "ShareRowExclusiveLock",
     117             :     "ExclusiveLock",
     118             :     "AccessExclusiveLock"
     119             : };
     120             : 
     121             : #ifndef LOCK_DEBUG
     122             : static bool Dummy_trace = false;
     123             : #endif
     124             : 
     125             : static const LockMethodData default_lockmethod = {
     126             :     AccessExclusiveLock,        /* highest valid lock mode number */
     127             :     LockConflicts,
     128             :     lock_mode_names,
     129             : #ifdef LOCK_DEBUG
     130             :     &Trace_locks
     131             : #else
     132             :     &Dummy_trace
     133             : #endif
     134             : };
     135             : 
     136             : static const LockMethodData user_lockmethod = {
     137             :     AccessExclusiveLock,        /* highest valid lock mode number */
     138             :     LockConflicts,
     139             :     lock_mode_names,
     140             : #ifdef LOCK_DEBUG
     141             :     &Trace_userlocks
     142             : #else
     143             :     &Dummy_trace
     144             : #endif
     145             : };
     146             : 
     147             : /*
     148             :  * map from lock method id to the lock table data structures
     149             :  */
     150             : static const LockMethod LockMethods[] = {
     151             :     NULL,
     152             :     &default_lockmethod,
     153             :     &user_lockmethod
     154             : };
     155             : 
     156             : 
     157             : /* Record that's written to 2PC state file when a lock is persisted */
     158             : typedef struct TwoPhaseLockRecord
     159             : {
     160             :     LOCKTAG     locktag;
     161             :     LOCKMODE    lockmode;
     162             : } TwoPhaseLockRecord;
     163             : 
     164             : 
     165             : /*
     166             :  * Count of the number of fast path lock slots we believe to be used.  This
     167             :  * might be higher than the real number if another backend has transferred
     168             :  * our locks to the primary lock table, but it can never be lower than the
     169             :  * real value, since only we can acquire locks on our own behalf.
     170             :  */
     171             : static int  FastPathLocalUseCount = 0;
     172             : 
     173             : /*
     174             :  * Flag to indicate if the relation extension lock is held by this backend.
     175             :  * This flag is used to ensure that while holding the relation extension lock
     176             :  * we don't try to acquire a heavyweight lock on any other object.  This
     177             :  * restriction implies that the relation extension lock won't ever participate
     178             :  * in the deadlock cycle because we can never wait for any other heavyweight
     179             :  * lock after acquiring this lock.
     180             :  *
     181             :  * Such a restriction is okay for relation extension locks as unlike other
     182             :  * heavyweight locks these are not held till the transaction end.  These are
     183             :  * taken for a short duration to extend a particular relation and then
     184             :  * released.
     185             :  */
     186             : static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
     187             : 
     188             : /*
     189             :  * Flag to indicate if the page lock is held by this backend.  We don't
     190             :  * acquire any other heavyweight lock while holding the page lock except for
     191             :  * relation extension.  However, these locks are never taken in reverse order
     192             :  * which implies that page locks will also never participate in the deadlock
     193             :  * cycle.
     194             :  *
     195             :  * Similar to relation extension, page locks are also held for a short
     196             :  * duration, so imposing such a restriction won't hurt.
     197             :  */
     198             : static bool IsPageLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
     199             : 
     200             : /* Macros for manipulating proc->fpLockBits */
     201             : #define FAST_PATH_BITS_PER_SLOT         3
     202             : #define FAST_PATH_LOCKNUMBER_OFFSET     1
     203             : #define FAST_PATH_MASK                  ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
     204             : #define FAST_PATH_GET_BITS(proc, n) \
     205             :     (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
     206             : #define FAST_PATH_BIT_POSITION(n, l) \
     207             :     (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
     208             :      AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
     209             :      AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
     210             :      ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (n)))
     211             : #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
     212             :      (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
     213             : #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
     214             :      (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
     215             : #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
     216             :      ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
     217             : 
     218             : /*
     219             :  * The fast-path lock mechanism is concerned only with relation locks on
     220             :  * unshared relations by backends bound to a database.  The fast-path
     221             :  * mechanism exists mostly to accelerate acquisition and release of locks
     222             :  * that rarely conflict.  Because ShareUpdateExclusiveLock is
     223             :  * self-conflicting, it can't use the fast-path mechanism; but it also does
     224             :  * not conflict with any of the locks that do, so we can ignore it completely.
     225             :  */
     226             : #define EligibleForRelationFastPath(locktag, mode) \
     227             :     ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
     228             :     (locktag)->locktag_type == LOCKTAG_RELATION && \
     229             :     (locktag)->locktag_field1 == MyDatabaseId && \
     230             :     MyDatabaseId != InvalidOid && \
     231             :     (mode) < ShareUpdateExclusiveLock)
     232             : #define ConflictsWithRelationFastPath(locktag, mode) \
     233             :     ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
     234             :     (locktag)->locktag_type == LOCKTAG_RELATION && \
     235             :     (locktag)->locktag_field1 != InvalidOid && \
     236             :     (mode) > ShareUpdateExclusiveLock)
     237             : 
     238             : static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
     239             : static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
     240             : static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
     241             :                                           const LOCKTAG *locktag, uint32 hashcode);
     242             : static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
     243             : 
     244             : /*
     245             :  * To make the fast-path lock mechanism work, we must have some way of
     246             :  * preventing the use of the fast-path when a conflicting lock might be present.
     247             :  * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
     248             :  * and maintain an integer count of the number of "strong" lockers
     249             :  * in each partition.  When any "strong" lockers are present (which is
     250             :  * hopefully not very often), the fast-path mechanism can't be used, and we
     251             :  * must fall back to the slower method of pushing matching locks directly
     252             :  * into the main lock tables.
     253             :  *
     254             :  * The deadlock detector does not know anything about the fast path mechanism,
     255             :  * so any locks that might be involved in a deadlock must be transferred from
     256             :  * the fast-path queues to the main lock table.
     257             :  */
     258             : 
     259             : #define FAST_PATH_STRONG_LOCK_HASH_BITS         10
     260             : #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
     261             :     (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
     262             : #define FastPathStrongLockHashPartition(hashcode) \
     263             :     ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
     264             : 
     265             : typedef struct
     266             : {
     267             :     slock_t     mutex;
     268             :     uint32      count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
     269             : } FastPathStrongRelationLockData;
     270             : 
     271             : static volatile FastPathStrongRelationLockData *FastPathStrongRelationLocks;
     272             : 
     273             : 
     274             : /*
     275             :  * Pointers to hash tables containing lock state
     276             :  *
     277             :  * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
     278             :  * shared memory; LockMethodLocalHash is local to each backend.
     279             :  */
     280             : static HTAB *LockMethodLockHash;
     281             : static HTAB *LockMethodProcLockHash;
     282             : static HTAB *LockMethodLocalHash;
     283             : 
     284             : 
     285             : /* private state for error cleanup */
     286             : static LOCALLOCK *StrongLockInProgress;
     287             : static LOCALLOCK *awaitedLock;
     288             : static ResourceOwner awaitedOwner;
     289             : 
     290             : 
     291             : #ifdef LOCK_DEBUG
     292             : 
     293             : /*------
     294             :  * The following configuration options are available for lock debugging:
     295             :  *
     296             :  *     TRACE_LOCKS      -- give a bunch of output what's going on in this file
     297             :  *     TRACE_USERLOCKS  -- same but for user locks
     298             :  *     TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
     299             :  *                         (use to avoid output on system tables)
     300             :  *     TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
     301             :  *     DEBUG_DEADLOCKS  -- currently dumps locks at untimely occasions ;)
     302             :  *
     303             :  * Furthermore, but in storage/lmgr/lwlock.c:
     304             :  *     TRACE_LWLOCKS    -- trace lightweight locks (pretty useless)
     305             :  *
     306             :  * Define LOCK_DEBUG at compile time to get all these enabled.
     307             :  * --------
     308             :  */
     309             : 
     310             : int         Trace_lock_oidmin = FirstNormalObjectId;
     311             : bool        Trace_locks = false;
     312             : bool        Trace_userlocks = false;
     313             : int         Trace_lock_table = 0;
     314             : bool        Debug_deadlocks = false;
     315             : 
     316             : 
     317             : inline static bool
     318             : LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
     319             : {
     320             :     return
     321             :         (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
     322             :          ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
     323             :         || (Trace_lock_table &&
     324             :             (tag->locktag_field2 == Trace_lock_table));
     325             : }
     326             : 
     327             : 
     328             : inline static void
     329             : LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
     330             : {
     331             :     if (LOCK_DEBUG_ENABLED(&lock->tag))
     332             :         elog(LOG,
     333             :              "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
     334             :              "req(%d,%d,%d,%d,%d,%d,%d)=%d "
     335             :              "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
     336             :              where, lock,
     337             :              lock->tag.locktag_field1, lock->tag.locktag_field2,
     338             :              lock->tag.locktag_field3, lock->tag.locktag_field4,
     339             :              lock->tag.locktag_type, lock->tag.locktag_lockmethodid,
     340             :              lock->grantMask,
     341             :              lock->requested[1], lock->requested[2], lock->requested[3],
     342             :              lock->requested[4], lock->requested[5], lock->requested[6],
     343             :              lock->requested[7], lock->nRequested,
     344             :              lock->granted[1], lock->granted[2], lock->granted[3],
     345             :              lock->granted[4], lock->granted[5], lock->granted[6],
     346             :              lock->granted[7], lock->nGranted,
     347             :              lock->waitProcs.size,
     348             :              LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
     349             : }
     350             : 
     351             : 
     352             : inline static void
     353             : PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
     354             : {
     355             :     if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
     356             :         elog(LOG,
     357             :              "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
     358             :              where, proclockP, proclockP->tag.myLock,
     359             :              PROCLOCK_LOCKMETHOD(*(proclockP)),
     360             :              proclockP->tag.myProc, (int) proclockP->holdMask);
     361             : }
     362             : #else                           /* not LOCK_DEBUG */
     363             : 
     364             : #define LOCK_PRINT(where, lock, type)  ((void) 0)
     365             : #define PROCLOCK_PRINT(where, proclockP)  ((void) 0)
     366             : #endif                          /* not LOCK_DEBUG */
     367             : 
     368             : 
     369             : static uint32 proclock_hash(const void *key, Size keysize);
     370             : static void RemoveLocalLock(LOCALLOCK *locallock);
     371             : static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
     372             :                                   const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
     373             : static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
     374             : static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
     375             : static void FinishStrongLockAcquire(void);
     376             : static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
     377             : static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
     378             : static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
     379             : static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
     380             :                         PROCLOCK *proclock, LockMethod lockMethodTable);
     381             : static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
     382             :                         LockMethod lockMethodTable, uint32 hashcode,
     383             :                         bool wakeupNeeded);
     384             : static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
     385             :                                  LOCKTAG *locktag, LOCKMODE lockmode,
     386             :                                  bool decrement_strong_lock_count);
     387             : static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
     388             :                                            BlockedProcsData *data);
     389             : 
     390             : 
     391             : /*
     392             :  * InitLocks -- Initialize the lock manager's data structures.
     393             :  *
     394             :  * This is called from CreateSharedMemoryAndSemaphores(), which see for
     395             :  * more comments.  In the normal postmaster case, the shared hash tables
     396             :  * are created here, as well as a locallock hash table that will remain
     397             :  * unused and empty in the postmaster itself.  Backends inherit the pointers
     398             :  * to the shared tables via fork(), and also inherit an image of the locallock
     399             :  * hash table, which they proceed to use.  In the EXEC_BACKEND case, each
     400             :  * backend re-executes this code to obtain pointers to the already existing
     401             :  * shared hash tables and to create its locallock hash table.
     402             :  */
     403             : void
     404        2170 : InitLocks(void)
     405             : {
     406             :     HASHCTL     info;
     407             :     long        init_table_size,
     408             :                 max_table_size;
     409             :     bool        found;
     410             : 
     411             :     /*
     412             :      * Compute init/max size to request for lock hashtables.  Note these
     413             :      * calculations must agree with LockShmemSize!
     414             :      */
     415        2170 :     max_table_size = NLOCKENTS();
     416        2170 :     init_table_size = max_table_size / 2;
     417             : 
     418             :     /*
     419             :      * Allocate hash table for LOCK structs.  This stores per-locked-object
     420             :      * information.
     421             :      */
     422       30380 :     MemSet(&info, 0, sizeof(info));
     423        2170 :     info.keysize = sizeof(LOCKTAG);
     424        2170 :     info.entrysize = sizeof(LOCK);
     425        2170 :     info.num_partitions = NUM_LOCK_PARTITIONS;
     426             : 
     427        2170 :     LockMethodLockHash = ShmemInitHash("LOCK hash",
     428             :                                        init_table_size,
     429             :                                        max_table_size,
     430             :                                        &info,
     431             :                                        HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
     432             : 
     433             :     /* Assume an average of 2 holders per lock */
     434        2170 :     max_table_size *= 2;
     435        2170 :     init_table_size *= 2;
     436             : 
     437             :     /*
     438             :      * Allocate hash table for PROCLOCK structs.  This stores
     439             :      * per-lock-per-holder information.
     440             :      */
     441        2170 :     info.keysize = sizeof(PROCLOCKTAG);
     442        2170 :     info.entrysize = sizeof(PROCLOCK);
     443        2170 :     info.hash = proclock_hash;
     444        2170 :     info.num_partitions = NUM_LOCK_PARTITIONS;
     445             : 
     446        2170 :     LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
     447             :                                            init_table_size,
     448             :                                            max_table_size,
     449             :                                            &info,
     450             :                                            HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
     451             : 
     452             :     /*
     453             :      * Allocate fast-path structures.
     454             :      */
     455        2170 :     FastPathStrongRelationLocks =
     456        2170 :         ShmemInitStruct("Fast Path Strong Relation Lock Data",
     457             :                         sizeof(FastPathStrongRelationLockData), &found);
     458        2170 :     if (!found)
     459        2170 :         SpinLockInit(&FastPathStrongRelationLocks->mutex);
     460             : 
     461             :     /*
     462             :      * Allocate non-shared hash table for LOCALLOCK structs.  This stores lock
     463             :      * counts and resource owner information.
     464             :      *
     465             :      * The non-shared table could already exist in this process (this occurs
     466             :      * when the postmaster is recreating shared memory after a backend crash).
     467             :      * If so, delete and recreate it.  (We could simply leave it, since it
     468             :      * ought to be empty in the postmaster, but for safety let's zap it.)
     469             :      */
     470        2170 :     if (LockMethodLocalHash)
     471           4 :         hash_destroy(LockMethodLocalHash);
     472             : 
     473        2170 :     info.keysize = sizeof(LOCALLOCKTAG);
     474        2170 :     info.entrysize = sizeof(LOCALLOCK);
     475             : 
     476        2170 :     LockMethodLocalHash = hash_create("LOCALLOCK hash",
     477             :                                       16,
     478             :                                       &info,
     479             :                                       HASH_ELEM | HASH_BLOBS);
     480        2170 : }
     481             : 
     482             : 
     483             : /*
     484             :  * Fetch the lock method table associated with a given lock
     485             :  */
     486             : LockMethod
     487         140 : GetLocksMethodTable(const LOCK *lock)
     488             : {
     489         140 :     LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
     490             : 
     491             :     Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
     492         140 :     return LockMethods[lockmethodid];
     493             : }
     494             : 
     495             : /*
     496             :  * Fetch the lock method table associated with a given locktag
     497             :  */
     498             : LockMethod
     499        1656 : GetLockTagsMethodTable(const LOCKTAG *locktag)
     500             : {
     501        1656 :     LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
     502             : 
     503             :     Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
     504        1656 :     return LockMethods[lockmethodid];
     505             : }
     506             : 
     507             : 
     508             : /*
     509             :  * Compute the hash code associated with a LOCKTAG.
     510             :  *
     511             :  * To avoid unnecessary recomputations of the hash code, we try to do this
     512             :  * just once per function, and then pass it around as needed.  Aside from
     513             :  * passing the hashcode to hash_search_with_hash_value(), we can extract
     514             :  * the lock partition number from the hashcode.
     515             :  */
     516             : uint32
     517    28281438 : LockTagHashCode(const LOCKTAG *locktag)
     518             : {
     519    28281438 :     return get_hash_value(LockMethodLockHash, (const void *) locktag);
     520             : }
     521             : 
     522             : /*
     523             :  * Compute the hash code associated with a PROCLOCKTAG.
     524             :  *
     525             :  * Because we want to use just one set of partition locks for both the
     526             :  * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
     527             :  * fall into the same partition number as their associated LOCKs.
     528             :  * dynahash.c expects the partition number to be the low-order bits of
     529             :  * the hash code, and therefore a PROCLOCKTAG's hash code must have the
     530             :  * same low-order bits as the associated LOCKTAG's hash code.  We achieve
     531             :  * this with this specialized hash function.
     532             :  */
     533             : static uint32
     534         162 : proclock_hash(const void *key, Size keysize)
     535             : {
     536         162 :     const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
     537             :     uint32      lockhash;
     538             :     Datum       procptr;
     539             : 
     540             :     Assert(keysize == sizeof(PROCLOCKTAG));
     541             : 
     542             :     /* Look into the associated LOCK object, and compute its hash code */
     543         162 :     lockhash = LockTagHashCode(&proclocktag->myLock->tag);
     544             : 
     545             :     /*
     546             :      * To make the hash code also depend on the PGPROC, we xor the proc
     547             :      * struct's address into the hash code, left-shifted so that the
     548             :      * partition-number bits don't change.  Since this is only a hash, we
     549             :      * don't care if we lose high-order bits of the address; use an
     550             :      * intermediate variable to suppress cast-pointer-to-int warnings.
     551             :      */
     552         162 :     procptr = PointerGetDatum(proclocktag->myProc);
     553         162 :     lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
     554             : 
     555         162 :     return lockhash;
     556             : }
     557             : 
     558             : /*
     559             :  * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
     560             :  * for its underlying LOCK.
     561             :  *
     562             :  * We use this just to avoid redundant calls of LockTagHashCode().
     563             :  */
     564             : static inline uint32
     565     5391142 : ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
     566             : {
     567     5391142 :     uint32      lockhash = hashcode;
     568             :     Datum       procptr;
     569             : 
     570             :     /*
     571             :      * This must match proclock_hash()!
     572             :      */
     573     5391142 :     procptr = PointerGetDatum(proclocktag->myProc);
     574     5391142 :     lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
     575             : 
     576     5391142 :     return lockhash;
     577             : }
     578             : 
     579             : /*
     580             :  * Given two lock modes, return whether they would conflict.
     581             :  */
     582             : bool
     583         456 : DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
     584             : {
     585         456 :     LockMethod  lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
     586             : 
     587         456 :     if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
     588         268 :         return true;
     589             : 
     590         188 :     return false;
     591             : }
     592             : 
     593             : /*
     594             :  * LockHeldByMe -- test whether lock 'locktag' is held with mode 'lockmode'
     595             :  *      by the current transaction
     596             :  */
     597             : bool
     598           0 : LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode)
     599             : {
     600             :     LOCALLOCKTAG localtag;
     601             :     LOCALLOCK  *locallock;
     602             : 
     603             :     /*
     604             :      * See if there is a LOCALLOCK entry for this lock and lockmode
     605             :      */
     606           0 :     MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
     607           0 :     localtag.lock = *locktag;
     608           0 :     localtag.mode = lockmode;
     609             : 
     610           0 :     locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
     611             :                                           (void *) &localtag,
     612             :                                           HASH_FIND, NULL);
     613             : 
     614           0 :     return (locallock && locallock->nLocks > 0);
     615             : }
     616             : 
     617             : #ifdef USE_ASSERT_CHECKING
     618             : /*
     619             :  * GetLockMethodLocalHash -- return the hash of local locks, for modules that
     620             :  *      evaluate assertions based on all locks held.
     621             :  */
     622             : HTAB *
     623             : GetLockMethodLocalHash(void)
     624             : {
     625             :     return LockMethodLocalHash;
     626             : }
     627             : #endif
     628             : 
     629             : /*
     630             :  * LockHasWaiters -- look up 'locktag' and check if releasing this
     631             :  *      lock would wake up other processes waiting for it.
     632             :  */
     633             : bool
     634           0 : LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
     635             : {
     636           0 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
     637             :     LockMethod  lockMethodTable;
     638             :     LOCALLOCKTAG localtag;
     639             :     LOCALLOCK  *locallock;
     640             :     LOCK       *lock;
     641             :     PROCLOCK   *proclock;
     642             :     LWLock     *partitionLock;
     643           0 :     bool        hasWaiters = false;
     644             : 
     645           0 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
     646           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
     647           0 :     lockMethodTable = LockMethods[lockmethodid];
     648           0 :     if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
     649           0 :         elog(ERROR, "unrecognized lock mode: %d", lockmode);
     650             : 
     651             : #ifdef LOCK_DEBUG
     652             :     if (LOCK_DEBUG_ENABLED(locktag))
     653             :         elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
     654             :              locktag->locktag_field1, locktag->locktag_field2,
     655             :              lockMethodTable->lockModeNames[lockmode]);
     656             : #endif
     657             : 
     658             :     /*
     659             :      * Find the LOCALLOCK entry for this lock and lockmode
     660             :      */
     661           0 :     MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
     662           0 :     localtag.lock = *locktag;
     663           0 :     localtag.mode = lockmode;
     664             : 
     665           0 :     locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
     666             :                                           (void *) &localtag,
     667             :                                           HASH_FIND, NULL);
     668             : 
     669             :     /*
     670             :      * let the caller print its own error message, too. Do not ereport(ERROR).
     671             :      */
     672           0 :     if (!locallock || locallock->nLocks <= 0)
     673             :     {
     674           0 :         elog(WARNING, "you don't own a lock of type %s",
     675             :              lockMethodTable->lockModeNames[lockmode]);
     676           0 :         return false;
     677             :     }
     678             : 
     679             :     /*
     680             :      * Check the shared lock table.
     681             :      */
     682           0 :     partitionLock = LockHashPartitionLock(locallock->hashcode);
     683             : 
     684           0 :     LWLockAcquire(partitionLock, LW_SHARED);
     685             : 
     686             :     /*
     687             :      * We don't need to re-find the lock or proclock, since we kept their
     688             :      * addresses in the locallock table, and they couldn't have been removed
     689             :      * while we were holding a lock on them.
     690             :      */
     691           0 :     lock = locallock->lock;
     692             :     LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
     693           0 :     proclock = locallock->proclock;
     694             :     PROCLOCK_PRINT("LockHasWaiters: found", proclock);
     695             : 
     696             :     /*
     697             :      * Double-check that we are actually holding a lock of the type we want to
     698             :      * release.
     699             :      */
     700           0 :     if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
     701             :     {
     702             :         PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
     703           0 :         LWLockRelease(partitionLock);
     704           0 :         elog(WARNING, "you don't own a lock of type %s",
     705             :              lockMethodTable->lockModeNames[lockmode]);
     706           0 :         RemoveLocalLock(locallock);
     707           0 :         return false;
     708             :     }
     709             : 
     710             :     /*
     711             :      * Do the checking.
     712             :      */
     713           0 :     if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
     714           0 :         hasWaiters = true;
     715             : 
     716           0 :     LWLockRelease(partitionLock);
     717             : 
     718           0 :     return hasWaiters;
     719             : }
     720             : 
     721             : /*
     722             :  * LockAcquire -- Check for lock conflicts, sleep if conflict found,
     723             :  *      set lock if/when no conflicts.
     724             :  *
     725             :  * Inputs:
     726             :  *  locktag: unique identifier for the lockable object
     727             :  *  lockmode: lock mode to acquire
     728             :  *  sessionLock: if true, acquire lock for session not current transaction
     729             :  *  dontWait: if true, don't wait to acquire lock
     730             :  *
     731             :  * Returns one of:
     732             :  *      LOCKACQUIRE_NOT_AVAIL       lock not available, and dontWait=true
     733             :  *      LOCKACQUIRE_OK              lock successfully acquired
     734             :  *      LOCKACQUIRE_ALREADY_HELD    incremented count for lock already held
     735             :  *      LOCKACQUIRE_ALREADY_CLEAR   incremented count for lock already clear
     736             :  *
     737             :  * In the normal case where dontWait=false and the caller doesn't need to
     738             :  * distinguish a freshly acquired lock from one already taken earlier in
     739             :  * this same transaction, there is no need to examine the return value.
     740             :  *
     741             :  * Side Effects: The lock is acquired and recorded in lock tables.
     742             :  *
     743             :  * NOTE: if we wait for the lock, there is no way to abort the wait
     744             :  * short of aborting the transaction.
     745             :  */
     746             : LockAcquireResult
     747      959996 : LockAcquire(const LOCKTAG *locktag,
     748             :             LOCKMODE lockmode,
     749             :             bool sessionLock,
     750             :             bool dontWait)
     751             : {
     752      959996 :     return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
     753             :                                true, NULL);
     754             : }
     755             : 
     756             : /*
     757             :  * LockAcquireExtended - allows us to specify additional options
     758             :  *
     759             :  * reportMemoryError specifies whether a lock request that fills the lock
     760             :  * table should generate an ERROR or not.  Passing "false" allows the caller
     761             :  * to attempt to recover from lock-table-full situations, perhaps by forcibly
     762             :  * cancelling other lock holders and then retrying.  Note, however, that the
     763             :  * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
     764             :  * in combination with dontWait = true, as the cause of failure couldn't be
     765             :  * distinguished.
     766             :  *
     767             :  * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
     768             :  * table entry if a lock is successfully acquired, or NULL if not.
     769             :  */
     770             : LockAcquireResult
     771    29225936 : LockAcquireExtended(const LOCKTAG *locktag,
     772             :                     LOCKMODE lockmode,
     773             :                     bool sessionLock,
     774             :                     bool dontWait,
     775             :                     bool reportMemoryError,
     776             :                     LOCALLOCK **locallockp)
     777             : {
     778    29225936 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
     779             :     LockMethod  lockMethodTable;
     780             :     LOCALLOCKTAG localtag;
     781             :     LOCALLOCK  *locallock;
     782             :     LOCK       *lock;
     783             :     PROCLOCK   *proclock;
     784             :     bool        found;
     785             :     ResourceOwner owner;
     786             :     uint32      hashcode;
     787             :     LWLock     *partitionLock;
     788             :     bool        found_conflict;
     789    29225936 :     bool        log_lock = false;
     790             : 
     791    29225936 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
     792           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
     793    29225936 :     lockMethodTable = LockMethods[lockmethodid];
     794    29225936 :     if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
     795           0 :         elog(ERROR, "unrecognized lock mode: %d", lockmode);
     796             : 
     797    29225936 :     if (RecoveryInProgress() && !InRecovery &&
     798      114448 :         (locktag->locktag_type == LOCKTAG_OBJECT ||
     799      114448 :          locktag->locktag_type == LOCKTAG_RELATION) &&
     800             :         lockmode > RowExclusiveLock)
     801           0 :         ereport(ERROR,
     802             :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
     803             :                  errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
     804             :                         lockMethodTable->lockModeNames[lockmode]),
     805             :                  errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
     806             : 
     807             : #ifdef LOCK_DEBUG
     808             :     if (LOCK_DEBUG_ENABLED(locktag))
     809             :         elog(LOG, "LockAcquire: lock [%u,%u] %s",
     810             :              locktag->locktag_field1, locktag->locktag_field2,
     811             :              lockMethodTable->lockModeNames[lockmode]);
     812             : #endif
     813             : 
     814             :     /* Identify owner for lock */
     815    29225936 :     if (sessionLock)
     816       44878 :         owner = NULL;
     817             :     else
     818    29181058 :         owner = CurrentResourceOwner;
     819             : 
     820             :     /*
     821             :      * Find or create a LOCALLOCK entry for this lock and lockmode
     822             :      */
     823    29225936 :     MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
     824    29225936 :     localtag.lock = *locktag;
     825    29225936 :     localtag.mode = lockmode;
     826             : 
     827    29225936 :     locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
     828             :                                           (void *) &localtag,
     829             :                                           HASH_ENTER, &found);
     830             : 
     831             :     /*
     832             :      * if it's a new locallock object, initialize it
     833             :      */
     834    29225936 :     if (!found)
     835             :     {
     836    27450044 :         locallock->lock = NULL;
     837    27450044 :         locallock->proclock = NULL;
     838    27450044 :         locallock->hashcode = LockTagHashCode(&(localtag.lock));
     839    27450044 :         locallock->nLocks = 0;
     840    27450044 :         locallock->holdsStrongLockCount = false;
     841    27450044 :         locallock->lockCleared = false;
     842    27450044 :         locallock->numLockOwners = 0;
     843    27450044 :         locallock->maxLockOwners = 8;
     844    27450044 :         locallock->lockOwners = NULL;    /* in case next line fails */
     845    27450044 :         locallock->lockOwners = (LOCALLOCKOWNER *)
     846    27450044 :             MemoryContextAlloc(TopMemoryContext,
     847    27450044 :                                locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
     848             :     }
     849             :     else
     850             :     {
     851             :         /* Make sure there will be room to remember the lock */
     852     1775892 :         if (locallock->numLockOwners >= locallock->maxLockOwners)
     853             :         {
     854          34 :             int         newsize = locallock->maxLockOwners * 2;
     855             : 
     856          34 :             locallock->lockOwners = (LOCALLOCKOWNER *)
     857          34 :                 repalloc(locallock->lockOwners,
     858             :                          newsize * sizeof(LOCALLOCKOWNER));
     859          34 :             locallock->maxLockOwners = newsize;
     860             :         }
     861             :     }
     862    29225936 :     hashcode = locallock->hashcode;
     863             : 
     864    29225936 :     if (locallockp)
     865    28265940 :         *locallockp = locallock;
     866             : 
     867             :     /*
     868             :      * If we already hold the lock, we can just increase the count locally.
     869             :      *
     870             :      * If lockCleared is already set, caller need not worry about absorbing
     871             :      * sinval messages related to the lock's object.
     872             :      */
     873    29225936 :     if (locallock->nLocks > 0)
     874             :     {
     875     1775892 :         GrantLockLocal(locallock, owner);
     876     1775892 :         if (locallock->lockCleared)
     877     1642620 :             return LOCKACQUIRE_ALREADY_CLEAR;
     878             :         else
     879      133272 :             return LOCKACQUIRE_ALREADY_HELD;
     880             :     }
     881             : 
     882             :     /*
     883             :      * We don't acquire any other heavyweight lock while holding the relation
     884             :      * extension lock.  We do allow to acquire the same relation extension
     885             :      * lock more than once but that case won't reach here.
     886             :      */
     887             :     Assert(!IsRelationExtensionLockHeld);
     888             : 
     889             :     /*
     890             :      * We don't acquire any other heavyweight lock while holding the page lock
     891             :      * except for relation extension.
     892             :      */
     893             :     Assert(!IsPageLockHeld ||
     894             :            (locktag->locktag_type == LOCKTAG_RELATION_EXTEND));
     895             : 
     896             :     /*
     897             :      * Prepare to emit a WAL record if acquisition of this lock needs to be
     898             :      * replayed in a standby server.
     899             :      *
     900             :      * Here we prepare to log; after lock is acquired we'll issue log record.
     901             :      * This arrangement simplifies error recovery in case the preparation step
     902             :      * fails.
     903             :      *
     904             :      * Only AccessExclusiveLocks can conflict with lock types that read-only
     905             :      * transactions can acquire in a standby server. Make sure this definition
     906             :      * matches the one in GetRunningTransactionLocks().
     907             :      */
     908    27450044 :     if (lockmode >= AccessExclusiveLock &&
     909      289660 :         locktag->locktag_type == LOCKTAG_RELATION &&
     910      218850 :         !RecoveryInProgress() &&
     911      218710 :         XLogStandbyInfoActive())
     912             :     {
     913      212556 :         LogAccessExclusiveLockPrepare();
     914      212556 :         log_lock = true;
     915             :     }
     916             : 
     917             :     /*
     918             :      * Attempt to take lock via fast path, if eligible.  But if we remember
     919             :      * having filled up the fast path array, we don't attempt to make any
     920             :      * further use of it until we release some locks.  It's possible that some
     921             :      * other backend has transferred some of those locks to the shared hash
     922             :      * table, leaving space free, but it's not worth acquiring the LWLock just
     923             :      * to check.  It's also possible that we're acquiring a second or third
     924             :      * lock type on a relation we have already locked using the fast-path, but
     925             :      * for now we don't worry about that case either.
     926             :      */
     927    27450044 :     if (EligibleForRelationFastPath(locktag, lockmode) &&
     928    25346370 :         FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND)
     929             :     {
     930    25048684 :         uint32      fasthashcode = FastPathStrongLockHashPartition(hashcode);
     931             :         bool        acquired;
     932             : 
     933             :         /*
     934             :          * LWLockAcquire acts as a memory sequencing point, so it's safe to
     935             :          * assume that any strong locker whose increment to
     936             :          * FastPathStrongRelationLocks->counts becomes visible after we test
     937             :          * it has yet to begin to transfer fast-path locks.
     938             :          */
     939    25048684 :         LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
     940    25048684 :         if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
     941      570848 :             acquired = false;
     942             :         else
     943    24477836 :             acquired = FastPathGrantRelationLock(locktag->locktag_field2,
     944             :                                                  lockmode);
     945    25048684 :         LWLockRelease(&MyProc->fpInfoLock);
     946    25048684 :         if (acquired)
     947             :         {
     948             :             /*
     949             :              * The locallock might contain stale pointers to some old shared
     950             :              * objects; we MUST reset these to null before considering the
     951             :              * lock to be acquired via fast-path.
     952             :              */
     953    24477836 :             locallock->lock = NULL;
     954    24477836 :             locallock->proclock = NULL;
     955    24477836 :             GrantLockLocal(locallock, owner);
     956    24477836 :             return LOCKACQUIRE_OK;
     957             :         }
     958             :     }
     959             : 
     960             :     /*
     961             :      * If this lock could potentially have been taken via the fast-path by
     962             :      * some other backend, we must (temporarily) disable further use of the
     963             :      * fast-path for this lock tag, and migrate any locks already taken via
     964             :      * this method to the main lock table.
     965             :      */
     966     2972208 :     if (ConflictsWithRelationFastPath(locktag, lockmode))
     967             :     {
     968      265140 :         uint32      fasthashcode = FastPathStrongLockHashPartition(hashcode);
     969             : 
     970      265140 :         BeginStrongLockAcquire(locallock, fasthashcode);
     971      265140 :         if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
     972             :                                            hashcode))
     973             :         {
     974           0 :             AbortStrongLockAcquire();
     975           0 :             if (locallock->nLocks == 0)
     976           0 :                 RemoveLocalLock(locallock);
     977           0 :             if (locallockp)
     978           0 :                 *locallockp = NULL;
     979           0 :             if (reportMemoryError)
     980           0 :                 ereport(ERROR,
     981             :                         (errcode(ERRCODE_OUT_OF_MEMORY),
     982             :                          errmsg("out of shared memory"),
     983             :                          errhint("You might need to increase max_locks_per_transaction.")));
     984             :             else
     985           0 :                 return LOCKACQUIRE_NOT_AVAIL;
     986             :         }
     987             :     }
     988             : 
     989             :     /*
     990             :      * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
     991             :      * take it via the fast-path, either, so we've got to mess with the shared
     992             :      * lock table.
     993             :      */
     994     2972208 :     partitionLock = LockHashPartitionLock(hashcode);
     995             : 
     996     2972208 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
     997             : 
     998             :     /*
     999             :      * Find or create lock and proclock entries with this tag
    1000             :      *
    1001             :      * Note: if the locallock object already existed, it might have a pointer
    1002             :      * to the lock already ... but we should not assume that that pointer is
    1003             :      * valid, since a lock object with zero hold and request counts can go
    1004             :      * away anytime.  So we have to use SetupLockInTable() to recompute the
    1005             :      * lock and proclock pointers, even if they're already set.
    1006             :      */
    1007     2972208 :     proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
    1008             :                                 hashcode, lockmode);
    1009     2972208 :     if (!proclock)
    1010             :     {
    1011           0 :         AbortStrongLockAcquire();
    1012           0 :         LWLockRelease(partitionLock);
    1013           0 :         if (locallock->nLocks == 0)
    1014           0 :             RemoveLocalLock(locallock);
    1015           0 :         if (locallockp)
    1016           0 :             *locallockp = NULL;
    1017           0 :         if (reportMemoryError)
    1018           0 :             ereport(ERROR,
    1019             :                     (errcode(ERRCODE_OUT_OF_MEMORY),
    1020             :                      errmsg("out of shared memory"),
    1021             :                      errhint("You might need to increase max_locks_per_transaction.")));
    1022             :         else
    1023           0 :             return LOCKACQUIRE_NOT_AVAIL;
    1024             :     }
    1025     2972208 :     locallock->proclock = proclock;
    1026     2972208 :     lock = proclock->tag.myLock;
    1027     2972208 :     locallock->lock = lock;
    1028             : 
    1029             :     /*
    1030             :      * If lock requested conflicts with locks requested by waiters, must join
    1031             :      * wait queue.  Otherwise, check for conflict with already-held locks.
    1032             :      * (That's last because most complex check.)
    1033             :      */
    1034     2972208 :     if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
    1035          76 :         found_conflict = true;
    1036             :     else
    1037     2972132 :         found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
    1038             :                                             lock, proclock);
    1039             : 
    1040     2972208 :     if (!found_conflict)
    1041             :     {
    1042             :         /* No conflict with held or previously requested locks */
    1043     2970384 :         GrantLock(lock, proclock, lockmode);
    1044     2970384 :         GrantLockLocal(locallock, owner);
    1045             :     }
    1046             :     else
    1047             :     {
    1048             :         /*
    1049             :          * We can't acquire the lock immediately.  If caller specified no
    1050             :          * blocking, remove useless table entries and return
    1051             :          * LOCKACQUIRE_NOT_AVAIL without waiting.
    1052             :          */
    1053        1824 :         if (dontWait)
    1054             :         {
    1055         656 :             AbortStrongLockAcquire();
    1056         656 :             if (proclock->holdMask == 0)
    1057             :             {
    1058             :                 uint32      proclock_hashcode;
    1059             : 
    1060         250 :                 proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
    1061         250 :                 SHMQueueDelete(&proclock->lockLink);
    1062         250 :                 SHMQueueDelete(&proclock->procLink);
    1063         250 :                 if (!hash_search_with_hash_value(LockMethodProcLockHash,
    1064         250 :                                                  (void *) &(proclock->tag),
    1065             :                                                  proclock_hashcode,
    1066             :                                                  HASH_REMOVE,
    1067             :                                                  NULL))
    1068           0 :                     elog(PANIC, "proclock table corrupted");
    1069             :             }
    1070             :             else
    1071             :                 PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
    1072         656 :             lock->nRequested--;
    1073         656 :             lock->requested[lockmode]--;
    1074             :             LOCK_PRINT("LockAcquire: conditional lock failed", lock, lockmode);
    1075             :             Assert((lock->nRequested > 0) && (lock->requested[lockmode] >= 0));
    1076             :             Assert(lock->nGranted <= lock->nRequested);
    1077         656 :             LWLockRelease(partitionLock);
    1078         656 :             if (locallock->nLocks == 0)
    1079         656 :                 RemoveLocalLock(locallock);
    1080         656 :             if (locallockp)
    1081         438 :                 *locallockp = NULL;
    1082         656 :             return LOCKACQUIRE_NOT_AVAIL;
    1083             :         }
    1084             : 
    1085             :         /*
    1086             :          * Set bitmask of locks this process already holds on this object.
    1087             :          */
    1088        1168 :         MyProc->heldLocks = proclock->holdMask;
    1089             : 
    1090             :         /*
    1091             :          * Sleep till someone wakes me up.
    1092             :          */
    1093             : 
    1094             :         TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
    1095             :                                          locktag->locktag_field2,
    1096             :                                          locktag->locktag_field3,
    1097             :                                          locktag->locktag_field4,
    1098             :                                          locktag->locktag_type,
    1099             :                                          lockmode);
    1100             : 
    1101        1168 :         WaitOnLock(locallock, owner);
    1102             : 
    1103             :         TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
    1104             :                                         locktag->locktag_field2,
    1105             :                                         locktag->locktag_field3,
    1106             :                                         locktag->locktag_field4,
    1107             :                                         locktag->locktag_type,
    1108             :                                         lockmode);
    1109             : 
    1110             :         /*
    1111             :          * NOTE: do not do any material change of state between here and
    1112             :          * return.  All required changes in locktable state must have been
    1113             :          * done when the lock was granted to us --- see notes in WaitOnLock.
    1114             :          */
    1115             : 
    1116             :         /*
    1117             :          * Check the proclock entry status, in case something in the ipc
    1118             :          * communication doesn't work correctly.
    1119             :          */
    1120        1148 :         if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
    1121             :         {
    1122           0 :             AbortStrongLockAcquire();
    1123             :             PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
    1124             :             LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
    1125             :             /* Should we retry ? */
    1126           0 :             LWLockRelease(partitionLock);
    1127           0 :             elog(ERROR, "LockAcquire failed");
    1128             :         }
    1129             :         PROCLOCK_PRINT("LockAcquire: granted", proclock);
    1130             :         LOCK_PRINT("LockAcquire: granted", lock, lockmode);
    1131             :     }
    1132             : 
    1133             :     /*
    1134             :      * Lock state is fully up-to-date now; if we error out after this, no
    1135             :      * special error cleanup is required.
    1136             :      */
    1137     2971532 :     FinishStrongLockAcquire();
    1138             : 
    1139     2971532 :     LWLockRelease(partitionLock);
    1140             : 
    1141             :     /*
    1142             :      * Emit a WAL record if acquisition of this lock needs to be replayed in a
    1143             :      * standby server.
    1144             :      */
    1145     2971532 :     if (log_lock)
    1146             :     {
    1147             :         /*
    1148             :          * Decode the locktag back to the original values, to avoid sending
    1149             :          * lots of empty bytes with every message.  See lock.h to check how a
    1150             :          * locktag is defined for LOCKTAG_RELATION
    1151             :          */
    1152      212132 :         LogAccessExclusiveLock(locktag->locktag_field1,
    1153             :                                locktag->locktag_field2);
    1154             :     }
    1155             : 
    1156     2971532 :     return LOCKACQUIRE_OK;
    1157             : }
    1158             : 
    1159             : /*
    1160             :  * Find or create LOCK and PROCLOCK objects as needed for a new lock
    1161             :  * request.
    1162             :  *
    1163             :  * Returns the PROCLOCK object, or NULL if we failed to create the objects
    1164             :  * for lack of shared memory.
    1165             :  *
    1166             :  * The appropriate partition lock must be held at entry, and will be
    1167             :  * held at exit.
    1168             :  */
    1169             : static PROCLOCK *
    1170     2973752 : SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
    1171             :                  const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
    1172             : {
    1173             :     LOCK       *lock;
    1174             :     PROCLOCK   *proclock;
    1175             :     PROCLOCKTAG proclocktag;
    1176             :     uint32      proclock_hashcode;
    1177             :     bool        found;
    1178             : 
    1179             :     /*
    1180             :      * Find or create a lock with this tag.
    1181             :      */
    1182     2973752 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    1183             :                                                 (const void *) locktag,
    1184             :                                                 hashcode,
    1185             :                                                 HASH_ENTER_NULL,
    1186             :                                                 &found);
    1187     2973752 :     if (!lock)
    1188           0 :         return NULL;
    1189             : 
    1190             :     /*
    1191             :      * if it's a new lock object, initialize it
    1192             :      */
    1193     2973752 :     if (!found)
    1194             :     {
    1195     2405460 :         lock->grantMask = 0;
    1196     2405460 :         lock->waitMask = 0;
    1197     2405460 :         SHMQueueInit(&(lock->procLocks));
    1198     2405460 :         ProcQueueInit(&(lock->waitProcs));
    1199     2405460 :         lock->nRequested = 0;
    1200     2405460 :         lock->nGranted = 0;
    1201    14432760 :         MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
    1202     2405460 :         MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
    1203             :         LOCK_PRINT("LockAcquire: new", lock, lockmode);
    1204             :     }
    1205             :     else
    1206             :     {
    1207             :         LOCK_PRINT("LockAcquire: found", lock, lockmode);
    1208             :         Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
    1209             :         Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
    1210             :         Assert(lock->nGranted <= lock->nRequested);
    1211             :     }
    1212             : 
    1213             :     /*
    1214             :      * Create the hash key for the proclock table.
    1215             :      */
    1216     2973752 :     proclocktag.myLock = lock;
    1217     2973752 :     proclocktag.myProc = proc;
    1218             : 
    1219     2973752 :     proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
    1220             : 
    1221             :     /*
    1222             :      * Find or create a proclock entry with this tag
    1223             :      */
    1224     2973752 :     proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
    1225             :                                                         (void *) &proclocktag,
    1226             :                                                         proclock_hashcode,
    1227             :                                                         HASH_ENTER_NULL,
    1228             :                                                         &found);
    1229     2973752 :     if (!proclock)
    1230             :     {
    1231             :         /* Oops, not enough shmem for the proclock */
    1232           0 :         if (lock->nRequested == 0)
    1233             :         {
    1234             :             /*
    1235             :              * There are no other requestors of this lock, so garbage-collect
    1236             :              * the lock object.  We *must* do this to avoid a permanent leak
    1237             :              * of shared memory, because there won't be anything to cause
    1238             :              * anyone to release the lock object later.
    1239             :              */
    1240             :             Assert(SHMQueueEmpty(&(lock->procLocks)));
    1241           0 :             if (!hash_search_with_hash_value(LockMethodLockHash,
    1242           0 :                                              (void *) &(lock->tag),
    1243             :                                              hashcode,
    1244             :                                              HASH_REMOVE,
    1245             :                                              NULL))
    1246           0 :                 elog(PANIC, "lock table corrupted");
    1247             :         }
    1248           0 :         return NULL;
    1249             :     }
    1250             : 
    1251             :     /*
    1252             :      * If new, initialize the new entry
    1253             :      */
    1254     2973752 :     if (!found)
    1255             :     {
    1256     2415596 :         uint32      partition = LockHashPartition(hashcode);
    1257             : 
    1258             :         /*
    1259             :          * It might seem unsafe to access proclock->groupLeader without a
    1260             :          * lock, but it's not really.  Either we are initializing a proclock
    1261             :          * on our own behalf, in which case our group leader isn't changing
    1262             :          * because the group leader for a process can only ever be changed by
    1263             :          * the process itself; or else we are transferring a fast-path lock to
    1264             :          * the main lock table, in which case that process can't change it's
    1265             :          * lock group leader without first releasing all of its locks (and in
    1266             :          * particular the one we are currently transferring).
    1267             :          */
    1268     4831192 :         proclock->groupLeader = proc->lockGroupLeader != NULL ?
    1269     2415596 :             proc->lockGroupLeader : proc;
    1270     2415596 :         proclock->holdMask = 0;
    1271     2415596 :         proclock->releaseMask = 0;
    1272             :         /* Add proclock to appropriate lists */
    1273     2415596 :         SHMQueueInsertBefore(&lock->procLocks, &proclock->lockLink);
    1274     2415596 :         SHMQueueInsertBefore(&(proc->myProcLocks[partition]),
    1275             :                              &proclock->procLink);
    1276             :         PROCLOCK_PRINT("LockAcquire: new", proclock);
    1277             :     }
    1278             :     else
    1279             :     {
    1280             :         PROCLOCK_PRINT("LockAcquire: found", proclock);
    1281             :         Assert((proclock->holdMask & ~lock->grantMask) == 0);
    1282             : 
    1283             : #ifdef CHECK_DEADLOCK_RISK
    1284             : 
    1285             :         /*
    1286             :          * Issue warning if we already hold a lower-level lock on this object
    1287             :          * and do not hold a lock of the requested level or higher. This
    1288             :          * indicates a deadlock-prone coding practice (eg, we'd have a
    1289             :          * deadlock if another backend were following the same code path at
    1290             :          * about the same time).
    1291             :          *
    1292             :          * This is not enabled by default, because it may generate log entries
    1293             :          * about user-level coding practices that are in fact safe in context.
    1294             :          * It can be enabled to help find system-level problems.
    1295             :          *
    1296             :          * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
    1297             :          * better to use a table.  For now, though, this works.
    1298             :          */
    1299             :         {
    1300             :             int         i;
    1301             : 
    1302             :             for (i = lockMethodTable->numLockModes; i > 0; i--)
    1303             :             {
    1304             :                 if (proclock->holdMask & LOCKBIT_ON(i))
    1305             :                 {
    1306             :                     if (i >= (int) lockmode)
    1307             :                         break;  /* safe: we have a lock >= req level */
    1308             :                     elog(LOG, "deadlock risk: raising lock level"
    1309             :                          " from %s to %s on object %u/%u/%u",
    1310             :                          lockMethodTable->lockModeNames[i],
    1311             :                          lockMethodTable->lockModeNames[lockmode],
    1312             :                          lock->tag.locktag_field1, lock->tag.locktag_field2,
    1313             :                          lock->tag.locktag_field3);
    1314             :                     break;
    1315             :                 }
    1316             :             }
    1317             :         }
    1318             : #endif                          /* CHECK_DEADLOCK_RISK */
    1319             :     }
    1320             : 
    1321             :     /*
    1322             :      * lock->nRequested and lock->requested[] count the total number of
    1323             :      * requests, whether granted or waiting, so increment those immediately.
    1324             :      * The other counts don't increment till we get the lock.
    1325             :      */
    1326     2973752 :     lock->nRequested++;
    1327     2973752 :     lock->requested[lockmode]++;
    1328             :     Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
    1329             : 
    1330             :     /*
    1331             :      * We shouldn't already hold the desired lock; else locallock table is
    1332             :      * broken.
    1333             :      */
    1334     2973752 :     if (proclock->holdMask & LOCKBIT_ON(lockmode))
    1335           0 :         elog(ERROR, "lock %s on object %u/%u/%u is already held",
    1336             :              lockMethodTable->lockModeNames[lockmode],
    1337             :              lock->tag.locktag_field1, lock->tag.locktag_field2,
    1338             :              lock->tag.locktag_field3);
    1339             : 
    1340     2973752 :     return proclock;
    1341             : }
    1342             : 
    1343             : /*
    1344             :  * Check and set/reset the flag that we hold the relation extension/page lock.
    1345             :  *
    1346             :  * It is callers responsibility that this function is called after
    1347             :  * acquiring/releasing the relation extension/page lock.
    1348             :  *
    1349             :  * Pass acquired as true if lock is acquired, false otherwise.
    1350             :  */
    1351             : static inline void
    1352    55261442 : CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
    1353             : {
    1354             : #ifdef USE_ASSERT_CHECKING
    1355             :     if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
    1356             :         IsRelationExtensionLockHeld = acquired;
    1357             :     else if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_PAGE)
    1358             :         IsPageLockHeld = acquired;
    1359             : 
    1360             : #endif
    1361    55261442 : }
    1362             : 
    1363             : /*
    1364             :  * Subroutine to free a locallock entry
    1365             :  */
    1366             : static void
    1367    27450044 : RemoveLocalLock(LOCALLOCK *locallock)
    1368             : {
    1369             :     int         i;
    1370             : 
    1371    27516518 :     for (i = locallock->numLockOwners - 1; i >= 0; i--)
    1372             :     {
    1373       66474 :         if (locallock->lockOwners[i].owner != NULL)
    1374       66436 :             ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
    1375             :     }
    1376    27450044 :     locallock->numLockOwners = 0;
    1377    27450044 :     if (locallock->lockOwners != NULL)
    1378    27450044 :         pfree(locallock->lockOwners);
    1379    27450044 :     locallock->lockOwners = NULL;
    1380             : 
    1381    27450044 :     if (locallock->holdsStrongLockCount)
    1382             :     {
    1383             :         uint32      fasthashcode;
    1384             : 
    1385      264686 :         fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
    1386             : 
    1387      264686 :         SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    1388             :         Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
    1389      264686 :         FastPathStrongRelationLocks->count[fasthashcode]--;
    1390      264686 :         locallock->holdsStrongLockCount = false;
    1391      264686 :         SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    1392             :     }
    1393             : 
    1394    27450044 :     if (!hash_search(LockMethodLocalHash,
    1395    27450044 :                      (void *) &(locallock->tag),
    1396             :                      HASH_REMOVE, NULL))
    1397           0 :         elog(WARNING, "locallock table corrupted");
    1398             : 
    1399             :     /*
    1400             :      * Indicate that the lock is released for certain types of locks
    1401             :      */
    1402    27450044 :     CheckAndSetLockHeld(locallock, false);
    1403    27450044 : }
    1404             : 
    1405             : /*
    1406             :  * LockCheckConflicts -- test whether requested lock conflicts
    1407             :  *      with those already granted
    1408             :  *
    1409             :  * Returns true if conflict, false if no conflict.
    1410             :  *
    1411             :  * NOTES:
    1412             :  *      Here's what makes this complicated: one process's locks don't
    1413             :  * conflict with one another, no matter what purpose they are held for
    1414             :  * (eg, session and transaction locks do not conflict).  Nor do the locks
    1415             :  * of one process in a lock group conflict with those of another process in
    1416             :  * the same group.  So, we must subtract off these locks when determining
    1417             :  * whether the requested new lock conflicts with those already held.
    1418             :  */
    1419             : bool
    1420     2973344 : LockCheckConflicts(LockMethod lockMethodTable,
    1421             :                    LOCKMODE lockmode,
    1422             :                    LOCK *lock,
    1423             :                    PROCLOCK *proclock)
    1424             : {
    1425     2973344 :     int         numLockModes = lockMethodTable->numLockModes;
    1426             :     LOCKMASK    myLocks;
    1427     2973344 :     int         conflictMask = lockMethodTable->conflictTab[lockmode];
    1428             :     int         conflictsRemaining[MAX_LOCKMODES];
    1429     2973344 :     int         totalConflictsRemaining = 0;
    1430             :     int         i;
    1431             :     SHM_QUEUE  *procLocks;
    1432             :     PROCLOCK   *otherproclock;
    1433             : 
    1434             :     /*
    1435             :      * first check for global conflicts: If no locks conflict with my request,
    1436             :      * then I get the lock.
    1437             :      *
    1438             :      * Checking for conflict: lock->grantMask represents the types of
    1439             :      * currently held locks.  conflictTable[lockmode] has a bit set for each
    1440             :      * type of lock that conflicts with request.   Bitwise compare tells if
    1441             :      * there is a conflict.
    1442             :      */
    1443     2973344 :     if (!(conflictMask & lock->grantMask))
    1444             :     {
    1445             :         PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
    1446     2659182 :         return false;
    1447             :     }
    1448             : 
    1449             :     /*
    1450             :      * Rats.  Something conflicts.  But it could still be my own lock, or a
    1451             :      * lock held by another member of my locking group.  First, figure out how
    1452             :      * many conflicts remain after subtracting out any locks I hold myself.
    1453             :      */
    1454      314162 :     myLocks = proclock->holdMask;
    1455     2827458 :     for (i = 1; i <= numLockModes; i++)
    1456             :     {
    1457     2513296 :         if ((conflictMask & LOCKBIT_ON(i)) == 0)
    1458             :         {
    1459     1407624 :             conflictsRemaining[i] = 0;
    1460     1407624 :             continue;
    1461             :         }
    1462     1105672 :         conflictsRemaining[i] = lock->granted[i];
    1463     1105672 :         if (myLocks & LOCKBIT_ON(i))
    1464      317424 :             --conflictsRemaining[i];
    1465     1105672 :         totalConflictsRemaining += conflictsRemaining[i];
    1466             :     }
    1467             : 
    1468             :     /* If no conflicts remain, we get the lock. */
    1469      314162 :     if (totalConflictsRemaining == 0)
    1470             :     {
    1471             :         PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
    1472      311816 :         return false;
    1473             :     }
    1474             : 
    1475             :     /* If no group locking, it's definitely a conflict. */
    1476        2346 :     if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
    1477             :     {
    1478             :         Assert(proclock->tag.myProc == MyProc);
    1479             :         PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
    1480             :                        proclock);
    1481        1744 :         return true;
    1482             :     }
    1483             : 
    1484             :     /*
    1485             :      * The relation extension or page lock conflict even between the group
    1486             :      * members.
    1487             :      */
    1488         602 :     if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND ||
    1489         568 :         (LOCK_LOCKTAG(*lock) == LOCKTAG_PAGE))
    1490             :     {
    1491             :         PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
    1492             :                        proclock);
    1493          34 :         return true;
    1494             :     }
    1495             : 
    1496             :     /*
    1497             :      * Locks held in conflicting modes by members of our own lock group are
    1498             :      * not real conflicts; we can subtract those out and see if we still have
    1499             :      * a conflict.  This is O(N) in the number of processes holding or
    1500             :      * awaiting locks on this object.  We could improve that by making the
    1501             :      * shared memory state more complex (and larger) but it doesn't seem worth
    1502             :      * it.
    1503             :      */
    1504         568 :     procLocks = &(lock->procLocks);
    1505             :     otherproclock = (PROCLOCK *)
    1506         568 :         SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
    1507         652 :     while (otherproclock != NULL)
    1508             :     {
    1509         622 :         if (proclock != otherproclock &&
    1510         592 :             proclock->groupLeader == otherproclock->groupLeader &&
    1511         542 :             (otherproclock->holdMask & conflictMask) != 0)
    1512             :         {
    1513         538 :             int         intersectMask = otherproclock->holdMask & conflictMask;
    1514             : 
    1515        4842 :             for (i = 1; i <= numLockModes; i++)
    1516             :             {
    1517        4304 :                 if ((intersectMask & LOCKBIT_ON(i)) != 0)
    1518             :                 {
    1519         544 :                     if (conflictsRemaining[i] <= 0)
    1520           0 :                         elog(PANIC, "proclocks held do not match lock");
    1521         544 :                     conflictsRemaining[i]--;
    1522         544 :                     totalConflictsRemaining--;
    1523             :                 }
    1524             :             }
    1525             : 
    1526         538 :             if (totalConflictsRemaining == 0)
    1527             :             {
    1528             :                 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
    1529             :                                proclock);
    1530         538 :                 return false;
    1531             :             }
    1532             :         }
    1533             :         otherproclock = (PROCLOCK *)
    1534          84 :             SHMQueueNext(procLocks, &otherproclock->lockLink,
    1535             :                          offsetof(PROCLOCK, lockLink));
    1536             :     }
    1537             : 
    1538             :     /* Nope, it's a real conflict. */
    1539             :     PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
    1540          30 :     return true;
    1541             : }
    1542             : 
    1543             : /*
    1544             :  * GrantLock -- update the lock and proclock data structures to show
    1545             :  *      the lock request has been granted.
    1546             :  *
    1547             :  * NOTE: if proc was blocked, it also needs to be removed from the wait list
    1548             :  * and have its waitLock/waitProcLock fields cleared.  That's not done here.
    1549             :  *
    1550             :  * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
    1551             :  * table entry; but since we may be awaking some other process, we can't do
    1552             :  * that here; it's done by GrantLockLocal, instead.
    1553             :  */
    1554             : void
    1555     2973172 : GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
    1556             : {
    1557     2973172 :     lock->nGranted++;
    1558     2973172 :     lock->granted[lockmode]++;
    1559     2973172 :     lock->grantMask |= LOCKBIT_ON(lockmode);
    1560     2973172 :     if (lock->granted[lockmode] == lock->requested[lockmode])
    1561     2973084 :         lock->waitMask &= LOCKBIT_OFF(lockmode);
    1562     2973172 :     proclock->holdMask |= LOCKBIT_ON(lockmode);
    1563             :     LOCK_PRINT("GrantLock", lock, lockmode);
    1564             :     Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
    1565             :     Assert(lock->nGranted <= lock->nRequested);
    1566     2973172 : }
    1567             : 
    1568             : /*
    1569             :  * UnGrantLock -- opposite of GrantLock.
    1570             :  *
    1571             :  * Updates the lock and proclock data structures to show that the lock
    1572             :  * is no longer held nor requested by the current holder.
    1573             :  *
    1574             :  * Returns true if there were any waiters waiting on the lock that
    1575             :  * should now be woken up with ProcLockWakeup.
    1576             :  */
    1577             : static bool
    1578     2973100 : UnGrantLock(LOCK *lock, LOCKMODE lockmode,
    1579             :             PROCLOCK *proclock, LockMethod lockMethodTable)
    1580             : {
    1581     2973100 :     bool        wakeupNeeded = false;
    1582             : 
    1583             :     Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
    1584             :     Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
    1585             :     Assert(lock->nGranted <= lock->nRequested);
    1586             : 
    1587             :     /*
    1588             :      * fix the general lock stats
    1589             :      */
    1590     2973100 :     lock->nRequested--;
    1591     2973100 :     lock->requested[lockmode]--;
    1592     2973100 :     lock->nGranted--;
    1593     2973100 :     lock->granted[lockmode]--;
    1594             : 
    1595     2973100 :     if (lock->granted[lockmode] == 0)
    1596             :     {
    1597             :         /* change the conflict mask.  No more of this lock type. */
    1598     2966264 :         lock->grantMask &= LOCKBIT_OFF(lockmode);
    1599             :     }
    1600             : 
    1601             :     LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
    1602             : 
    1603             :     /*
    1604             :      * We need only run ProcLockWakeup if the released lock conflicts with at
    1605             :      * least one of the lock types requested by waiter(s).  Otherwise whatever
    1606             :      * conflict made them wait must still exist.  NOTE: before MVCC, we could
    1607             :      * skip wakeup if lock->granted[lockmode] was still positive. But that's
    1608             :      * not true anymore, because the remaining granted locks might belong to
    1609             :      * some waiter, who could now be awakened because he doesn't conflict with
    1610             :      * his own locks.
    1611             :      */
    1612     2973100 :     if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
    1613        1128 :         wakeupNeeded = true;
    1614             : 
    1615             :     /*
    1616             :      * Now fix the per-proclock state.
    1617             :      */
    1618     2973100 :     proclock->holdMask &= LOCKBIT_OFF(lockmode);
    1619             :     PROCLOCK_PRINT("UnGrantLock: updated", proclock);
    1620             : 
    1621     2973100 :     return wakeupNeeded;
    1622             : }
    1623             : 
    1624             : /*
    1625             :  * CleanUpLock -- clean up after releasing a lock.  We garbage-collect the
    1626             :  * proclock and lock objects if possible, and call ProcLockWakeup if there
    1627             :  * are remaining requests and the caller says it's OK.  (Normally, this
    1628             :  * should be called after UnGrantLock, and wakeupNeeded is the result from
    1629             :  * UnGrantLock.)
    1630             :  *
    1631             :  * The appropriate partition lock must be held at entry, and will be
    1632             :  * held at exit.
    1633             :  */
    1634             : static void
    1635     2935422 : CleanUpLock(LOCK *lock, PROCLOCK *proclock,
    1636             :             LockMethod lockMethodTable, uint32 hashcode,
    1637             :             bool wakeupNeeded)
    1638             : {
    1639             :     /*
    1640             :      * If this was my last hold on this lock, delete my entry in the proclock
    1641             :      * table.
    1642             :      */
    1643     2935422 :     if (proclock->holdMask == 0)
    1644             :     {
    1645             :         uint32      proclock_hashcode;
    1646             : 
    1647             :         PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
    1648     2415366 :         SHMQueueDelete(&proclock->lockLink);
    1649     2415366 :         SHMQueueDelete(&proclock->procLink);
    1650     2415366 :         proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
    1651     2415366 :         if (!hash_search_with_hash_value(LockMethodProcLockHash,
    1652     2415366 :                                          (void *) &(proclock->tag),
    1653             :                                          proclock_hashcode,
    1654             :                                          HASH_REMOVE,
    1655             :                                          NULL))
    1656           0 :             elog(PANIC, "proclock table corrupted");
    1657             :     }
    1658             : 
    1659     2935422 :     if (lock->nRequested == 0)
    1660             :     {
    1661             :         /*
    1662             :          * The caller just released the last lock, so garbage-collect the lock
    1663             :          * object.
    1664             :          */
    1665             :         LOCK_PRINT("CleanUpLock: deleting", lock, 0);
    1666             :         Assert(SHMQueueEmpty(&(lock->procLocks)));
    1667     2405474 :         if (!hash_search_with_hash_value(LockMethodLockHash,
    1668     2405474 :                                          (void *) &(lock->tag),
    1669             :                                          hashcode,
    1670             :                                          HASH_REMOVE,
    1671             :                                          NULL))
    1672           0 :             elog(PANIC, "lock table corrupted");
    1673             :     }
    1674      529948 :     else if (wakeupNeeded)
    1675             :     {
    1676             :         /* There are waiters on this lock, so wake them up. */
    1677        1148 :         ProcLockWakeup(lockMethodTable, lock);
    1678             :     }
    1679     2935422 : }
    1680             : 
    1681             : /*
    1682             :  * GrantLockLocal -- update the locallock data structures to show
    1683             :  *      the lock request has been granted.
    1684             :  *
    1685             :  * We expect that LockAcquire made sure there is room to add a new
    1686             :  * ResourceOwner entry.
    1687             :  */
    1688             : static void
    1689    29225260 : GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
    1690             : {
    1691    29225260 :     LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    1692             :     int         i;
    1693             : 
    1694             :     Assert(locallock->numLockOwners < locallock->maxLockOwners);
    1695             :     /* Count the total */
    1696    29225260 :     locallock->nLocks++;
    1697             :     /* Count the per-owner lock */
    1698    29788612 :     for (i = 0; i < locallock->numLockOwners; i++)
    1699             :     {
    1700     1977214 :         if (lockOwners[i].owner == owner)
    1701             :         {
    1702     1413862 :             lockOwners[i].nLocks++;
    1703     1413862 :             return;
    1704             :         }
    1705             :     }
    1706    27811398 :     lockOwners[i].owner = owner;
    1707    27811398 :     lockOwners[i].nLocks = 1;
    1708    27811398 :     locallock->numLockOwners++;
    1709    27811398 :     if (owner != NULL)
    1710    27766616 :         ResourceOwnerRememberLock(owner, locallock);
    1711             : 
    1712             :     /* Indicate that the lock is acquired for certain types of locks. */
    1713    27811398 :     CheckAndSetLockHeld(locallock, true);
    1714             : }
    1715             : 
    1716             : /*
    1717             :  * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
    1718             :  * and arrange for error cleanup if it fails
    1719             :  */
    1720             : static void
    1721      265140 : BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
    1722             : {
    1723             :     Assert(StrongLockInProgress == NULL);
    1724             :     Assert(locallock->holdsStrongLockCount == false);
    1725             : 
    1726             :     /*
    1727             :      * Adding to a memory location is not atomic, so we take a spinlock to
    1728             :      * ensure we don't collide with someone else trying to bump the count at
    1729             :      * the same time.
    1730             :      *
    1731             :      * XXX: It might be worth considering using an atomic fetch-and-add
    1732             :      * instruction here, on architectures where that is supported.
    1733             :      */
    1734             : 
    1735      265140 :     SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    1736      265140 :     FastPathStrongRelationLocks->count[fasthashcode]++;
    1737      265140 :     locallock->holdsStrongLockCount = true;
    1738      265140 :     StrongLockInProgress = locallock;
    1739      265140 :     SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    1740      265140 : }
    1741             : 
    1742             : /*
    1743             :  * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
    1744             :  * acquisition once it's no longer needed
    1745             :  */
    1746             : static void
    1747     2971532 : FinishStrongLockAcquire(void)
    1748             : {
    1749     2971532 :     StrongLockInProgress = NULL;
    1750     2971532 : }
    1751             : 
    1752             : /*
    1753             :  * AbortStrongLockAcquire - undo strong lock state changes performed by
    1754             :  * BeginStrongLockAcquire.
    1755             :  */
    1756             : void
    1757      519704 : AbortStrongLockAcquire(void)
    1758             : {
    1759             :     uint32      fasthashcode;
    1760      519704 :     LOCALLOCK  *locallock = StrongLockInProgress;
    1761             : 
    1762      519704 :     if (locallock == NULL)
    1763      519280 :         return;
    1764             : 
    1765         424 :     fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
    1766             :     Assert(locallock->holdsStrongLockCount == true);
    1767         424 :     SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    1768             :     Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
    1769         424 :     FastPathStrongRelationLocks->count[fasthashcode]--;
    1770         424 :     locallock->holdsStrongLockCount = false;
    1771         424 :     StrongLockInProgress = NULL;
    1772         424 :     SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    1773             : }
    1774             : 
    1775             : /*
    1776             :  * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
    1777             :  *      WaitOnLock on.
    1778             :  *
    1779             :  * proc.c needs this for the case where we are booted off the lock by
    1780             :  * timeout, but discover that someone granted us the lock anyway.
    1781             :  *
    1782             :  * We could just export GrantLockLocal, but that would require including
    1783             :  * resowner.h in lock.h, which creates circularity.
    1784             :  */
    1785             : void
    1786        1148 : GrantAwaitedLock(void)
    1787             : {
    1788        1148 :     GrantLockLocal(awaitedLock, awaitedOwner);
    1789        1148 : }
    1790             : 
    1791             : /*
    1792             :  * MarkLockClear -- mark an acquired lock as "clear"
    1793             :  *
    1794             :  * This means that we know we have absorbed all sinval messages that other
    1795             :  * sessions generated before we acquired this lock, and so we can confidently
    1796             :  * assume we know about any catalog changes protected by this lock.
    1797             :  */
    1798             : void
    1799    26667144 : MarkLockClear(LOCALLOCK *locallock)
    1800             : {
    1801             :     Assert(locallock->nLocks > 0);
    1802    26667144 :     locallock->lockCleared = true;
    1803    26667144 : }
    1804             : 
    1805             : /*
    1806             :  * WaitOnLock -- wait to acquire a lock
    1807             :  *
    1808             :  * Caller must have set MyProc->heldLocks to reflect locks already held
    1809             :  * on the lockable object by this process.
    1810             :  *
    1811             :  * The appropriate partition lock must be held at entry.
    1812             :  */
    1813             : static void
    1814        1168 : WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
    1815             : {
    1816        1168 :     LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
    1817        1168 :     LockMethod  lockMethodTable = LockMethods[lockmethodid];
    1818        1168 :     char       *volatile new_status = NULL;
    1819             : 
    1820             :     LOCK_PRINT("WaitOnLock: sleeping on lock",
    1821             :                locallock->lock, locallock->tag.mode);
    1822             : 
    1823             :     /* Report change to waiting status */
    1824        1168 :     if (update_process_title)
    1825             :     {
    1826             :         const char *old_status;
    1827             :         int         len;
    1828             : 
    1829        1168 :         old_status = get_ps_display(&len);
    1830        1168 :         new_status = (char *) palloc(len + 8 + 1);
    1831        1168 :         memcpy(new_status, old_status, len);
    1832        1168 :         strcpy(new_status + len, " waiting");
    1833        1168 :         set_ps_display(new_status);
    1834        1168 :         new_status[len] = '\0'; /* truncate off " waiting" */
    1835             :     }
    1836             : 
    1837        1168 :     awaitedLock = locallock;
    1838        1168 :     awaitedOwner = owner;
    1839             : 
    1840             :     /*
    1841             :      * NOTE: Think not to put any shared-state cleanup after the call to
    1842             :      * ProcSleep, in either the normal or failure path.  The lock state must
    1843             :      * be fully set by the lock grantor, or by CheckDeadLock if we give up
    1844             :      * waiting for the lock.  This is necessary because of the possibility
    1845             :      * that a cancel/die interrupt will interrupt ProcSleep after someone else
    1846             :      * grants us the lock, but before we've noticed it. Hence, after granting,
    1847             :      * the locktable state must fully reflect the fact that we own the lock;
    1848             :      * we can't do additional work on return.
    1849             :      *
    1850             :      * We can and do use a PG_TRY block to try to clean up after failure, but
    1851             :      * this still has a major limitation: elog(FATAL) can occur while waiting
    1852             :      * (eg, a "die" interrupt), and then control won't come back here. So all
    1853             :      * cleanup of essential state should happen in LockErrorCleanup, not here.
    1854             :      * We can use PG_TRY to clear the "waiting" status flags, since doing that
    1855             :      * is unimportant if the process exits.
    1856             :      */
    1857        1168 :     PG_TRY();
    1858             :     {
    1859        1168 :         if (ProcSleep(locallock, lockMethodTable) != STATUS_OK)
    1860             :         {
    1861             :             /*
    1862             :              * We failed as a result of a deadlock, see CheckDeadLock(). Quit
    1863             :              * now.
    1864             :              */
    1865           4 :             awaitedLock = NULL;
    1866             :             LOCK_PRINT("WaitOnLock: aborting on lock",
    1867             :                        locallock->lock, locallock->tag.mode);
    1868           4 :             LWLockRelease(LockHashPartitionLock(locallock->hashcode));
    1869             : 
    1870             :             /*
    1871             :              * Now that we aren't holding the partition lock, we can give an
    1872             :              * error report including details about the detected deadlock.
    1873             :              */
    1874           4 :             DeadLockReport();
    1875             :             /* not reached */
    1876             :         }
    1877             :     }
    1878          20 :     PG_CATCH();
    1879             :     {
    1880             :         /* In this path, awaitedLock remains set until LockErrorCleanup */
    1881             : 
    1882             :         /* Report change to non-waiting status */
    1883          20 :         if (update_process_title)
    1884             :         {
    1885          20 :             set_ps_display(new_status);
    1886          20 :             pfree(new_status);
    1887             :         }
    1888             : 
    1889             :         /* and propagate the error */
    1890          20 :         PG_RE_THROW();
    1891             :     }
    1892        1148 :     PG_END_TRY();
    1893             : 
    1894        1148 :     awaitedLock = NULL;
    1895             : 
    1896             :     /* Report change to non-waiting status */
    1897        1148 :     if (update_process_title)
    1898             :     {
    1899        1148 :         set_ps_display(new_status);
    1900        1148 :         pfree(new_status);
    1901             :     }
    1902             : 
    1903             :     LOCK_PRINT("WaitOnLock: wakeup on lock",
    1904             :                locallock->lock, locallock->tag.mode);
    1905        1148 : }
    1906             : 
    1907             : /*
    1908             :  * Remove a proc from the wait-queue it is on (caller must know it is on one).
    1909             :  * This is only used when the proc has failed to get the lock, so we set its
    1910             :  * waitStatus to STATUS_ERROR.
    1911             :  *
    1912             :  * Appropriate partition lock must be held by caller.  Also, caller is
    1913             :  * responsible for signaling the proc if needed.
    1914             :  *
    1915             :  * NB: this does not clean up any locallock object that may exist for the lock.
    1916             :  */
    1917             : void
    1918          20 : RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
    1919             : {
    1920          20 :     LOCK       *waitLock = proc->waitLock;
    1921          20 :     PROCLOCK   *proclock = proc->waitProcLock;
    1922          20 :     LOCKMODE    lockmode = proc->waitLockMode;
    1923          20 :     LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
    1924             : 
    1925             :     /* Make sure proc is waiting */
    1926             :     Assert(proc->waitStatus == STATUS_WAITING);
    1927             :     Assert(proc->links.next != NULL);
    1928             :     Assert(waitLock);
    1929             :     Assert(waitLock->waitProcs.size > 0);
    1930             :     Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
    1931             : 
    1932             :     /* Remove proc from lock's wait queue */
    1933          20 :     SHMQueueDelete(&(proc->links));
    1934          20 :     waitLock->waitProcs.size--;
    1935             : 
    1936             :     /* Undo increments of request counts by waiting process */
    1937             :     Assert(waitLock->nRequested > 0);
    1938             :     Assert(waitLock->nRequested > proc->waitLock->nGranted);
    1939          20 :     waitLock->nRequested--;
    1940             :     Assert(waitLock->requested[lockmode] > 0);
    1941          20 :     waitLock->requested[lockmode]--;
    1942             :     /* don't forget to clear waitMask bit if appropriate */
    1943          20 :     if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
    1944          18 :         waitLock->waitMask &= LOCKBIT_OFF(lockmode);
    1945             : 
    1946             :     /* Clean up the proc's own state, and pass it the ok/fail signal */
    1947          20 :     proc->waitLock = NULL;
    1948          20 :     proc->waitProcLock = NULL;
    1949          20 :     proc->waitStatus = STATUS_ERROR;
    1950             : 
    1951             :     /*
    1952             :      * Delete the proclock immediately if it represents no already-held locks.
    1953             :      * (This must happen now because if the owner of the lock decides to
    1954             :      * release it, and the requested/granted counts then go to zero,
    1955             :      * LockRelease expects there to be no remaining proclocks.) Then see if
    1956             :      * any other waiters for the lock can be woken up now.
    1957             :      */
    1958          20 :     CleanUpLock(waitLock, proclock,
    1959             :                 LockMethods[lockmethodid], hashcode,
    1960             :                 true);
    1961          20 : }
    1962             : 
    1963             : /*
    1964             :  * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
    1965             :  *      Release a session lock if 'sessionLock' is true, else release a
    1966             :  *      regular transaction lock.
    1967             :  *
    1968             :  * Side Effects: find any waiting processes that are now wakable,
    1969             :  *      grant them their requested locks and awaken them.
    1970             :  *      (We have to grant the lock here to avoid a race between
    1971             :  *      the waking process and any new process to
    1972             :  *      come along and request the lock.)
    1973             :  */
    1974             : bool
    1975    26760588 : LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
    1976             : {
    1977    26760588 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
    1978             :     LockMethod  lockMethodTable;
    1979             :     LOCALLOCKTAG localtag;
    1980             :     LOCALLOCK  *locallock;
    1981             :     LOCK       *lock;
    1982             :     PROCLOCK   *proclock;
    1983             :     LWLock     *partitionLock;
    1984             :     bool        wakeupNeeded;
    1985             : 
    1986    26760588 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    1987           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    1988    26760588 :     lockMethodTable = LockMethods[lockmethodid];
    1989    26760588 :     if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
    1990           0 :         elog(ERROR, "unrecognized lock mode: %d", lockmode);
    1991             : 
    1992             : #ifdef LOCK_DEBUG
    1993             :     if (LOCK_DEBUG_ENABLED(locktag))
    1994             :         elog(LOG, "LockRelease: lock [%u,%u] %s",
    1995             :              locktag->locktag_field1, locktag->locktag_field2,
    1996             :              lockMethodTable->lockModeNames[lockmode]);
    1997             : #endif
    1998             : 
    1999             :     /*
    2000             :      * Find the LOCALLOCK entry for this lock and lockmode
    2001             :      */
    2002    26760588 :     MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
    2003    26760588 :     localtag.lock = *locktag;
    2004    26760588 :     localtag.mode = lockmode;
    2005             : 
    2006    26760588 :     locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
    2007             :                                           (void *) &localtag,
    2008             :                                           HASH_FIND, NULL);
    2009             : 
    2010             :     /*
    2011             :      * let the caller print its own error message, too. Do not ereport(ERROR).
    2012             :      */
    2013    26760588 :     if (!locallock || locallock->nLocks <= 0)
    2014             :     {
    2015          16 :         elog(WARNING, "you don't own a lock of type %s",
    2016             :              lockMethodTable->lockModeNames[lockmode]);
    2017          16 :         return false;
    2018             :     }
    2019             : 
    2020             :     /*
    2021             :      * Decrease the count for the resource owner.
    2022             :      */
    2023             :     {
    2024    26760572 :         LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    2025             :         ResourceOwner owner;
    2026             :         int         i;
    2027             : 
    2028             :         /* Identify owner for lock */
    2029    26760572 :         if (sessionLock)
    2030       44776 :             owner = NULL;
    2031             :         else
    2032    26715796 :             owner = CurrentResourceOwner;
    2033             : 
    2034    26760658 :         for (i = locallock->numLockOwners - 1; i >= 0; i--)
    2035             :         {
    2036    26760642 :             if (lockOwners[i].owner == owner)
    2037             :             {
    2038             :                 Assert(lockOwners[i].nLocks > 0);
    2039    26760556 :                 if (--lockOwners[i].nLocks == 0)
    2040             :                 {
    2041    26186406 :                     if (owner != NULL)
    2042    26141662 :                         ResourceOwnerForgetLock(owner, locallock);
    2043             :                     /* compact out unused slot */
    2044    26186406 :                     locallock->numLockOwners--;
    2045    26186406 :                     if (i < locallock->numLockOwners)
    2046          26 :                         lockOwners[i] = lockOwners[locallock->numLockOwners];
    2047             :                 }
    2048    26760556 :                 break;
    2049             :             }
    2050             :         }
    2051    26760572 :         if (i < 0)
    2052             :         {
    2053             :             /* don't release a lock belonging to another owner */
    2054          16 :             elog(WARNING, "you don't own a lock of type %s",
    2055             :                  lockMethodTable->lockModeNames[lockmode]);
    2056          16 :             return false;
    2057             :         }
    2058             :     }
    2059             : 
    2060             :     /*
    2061             :      * Decrease the total local count.  If we're still holding the lock, we're
    2062             :      * done.
    2063             :      */
    2064    26760556 :     locallock->nLocks--;
    2065             : 
    2066    26760556 :     if (locallock->nLocks > 0)
    2067      777462 :         return true;
    2068             : 
    2069             :     /*
    2070             :      * At this point we can no longer suppose we are clear of invalidation
    2071             :      * messages related to this lock.  Although we'll delete the LOCALLOCK
    2072             :      * object before any intentional return from this routine, it seems worth
    2073             :      * the trouble to explicitly reset lockCleared right now, just in case
    2074             :      * some error prevents us from deleting the LOCALLOCK.
    2075             :      */
    2076    25983094 :     locallock->lockCleared = false;
    2077             : 
    2078             :     /* Attempt fast release of any lock eligible for the fast path. */
    2079    25983094 :     if (EligibleForRelationFastPath(locktag, lockmode) &&
    2080    24708230 :         FastPathLocalUseCount > 0)
    2081             :     {
    2082             :         bool        released;
    2083             : 
    2084             :         /*
    2085             :          * We might not find the lock here, even if we originally entered it
    2086             :          * here.  Another backend may have moved it to the main table.
    2087             :          */
    2088    24239514 :         LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    2089    24239514 :         released = FastPathUnGrantRelationLock(locktag->locktag_field2,
    2090             :                                                lockmode);
    2091    24239514 :         LWLockRelease(&MyProc->fpInfoLock);
    2092    24239514 :         if (released)
    2093             :         {
    2094    23877996 :             RemoveLocalLock(locallock);
    2095    23877996 :             return true;
    2096             :         }
    2097             :     }
    2098             : 
    2099             :     /*
    2100             :      * Otherwise we've got to mess with the shared lock table.
    2101             :      */
    2102     2105098 :     partitionLock = LockHashPartitionLock(locallock->hashcode);
    2103             : 
    2104     2105098 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    2105             : 
    2106             :     /*
    2107             :      * Normally, we don't need to re-find the lock or proclock, since we kept
    2108             :      * their addresses in the locallock table, and they couldn't have been
    2109             :      * removed while we were holding a lock on them.  But it's possible that
    2110             :      * the lock was taken fast-path and has since been moved to the main hash
    2111             :      * table by another backend, in which case we will need to look up the
    2112             :      * objects here.  We assume the lock field is NULL if so.
    2113             :      */
    2114     2105098 :     lock = locallock->lock;
    2115     2105098 :     if (!lock)
    2116             :     {
    2117             :         PROCLOCKTAG proclocktag;
    2118             : 
    2119             :         Assert(EligibleForRelationFastPath(locktag, lockmode));
    2120           6 :         lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    2121             :                                                     (const void *) locktag,
    2122             :                                                     locallock->hashcode,
    2123             :                                                     HASH_FIND,
    2124             :                                                     NULL);
    2125           6 :         if (!lock)
    2126           0 :             elog(ERROR, "failed to re-find shared lock object");
    2127           6 :         locallock->lock = lock;
    2128             : 
    2129           6 :         proclocktag.myLock = lock;
    2130           6 :         proclocktag.myProc = MyProc;
    2131           6 :         locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
    2132             :                                                        (void *) &proclocktag,
    2133             :                                                        HASH_FIND,
    2134             :                                                        NULL);
    2135           6 :         if (!locallock->proclock)
    2136           0 :             elog(ERROR, "failed to re-find shared proclock object");
    2137             :     }
    2138             :     LOCK_PRINT("LockRelease: found", lock, lockmode);
    2139     2105098 :     proclock = locallock->proclock;
    2140             :     PROCLOCK_PRINT("LockRelease: found", proclock);
    2141             : 
    2142             :     /*
    2143             :      * Double-check that we are actually holding a lock of the type we want to
    2144             :      * release.
    2145             :      */
    2146     2105098 :     if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
    2147             :     {
    2148             :         PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
    2149           0 :         LWLockRelease(partitionLock);
    2150           0 :         elog(WARNING, "you don't own a lock of type %s",
    2151             :              lockMethodTable->lockModeNames[lockmode]);
    2152           0 :         RemoveLocalLock(locallock);
    2153           0 :         return false;
    2154             :     }
    2155             : 
    2156             :     /*
    2157             :      * Do the releasing.  CleanUpLock will waken any now-wakable waiters.
    2158             :      */
    2159     2105098 :     wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
    2160             : 
    2161     2105098 :     CleanUpLock(lock, proclock,
    2162             :                 lockMethodTable, locallock->hashcode,
    2163             :                 wakeupNeeded);
    2164             : 
    2165     2105098 :     LWLockRelease(partitionLock);
    2166             : 
    2167     2105098 :     RemoveLocalLock(locallock);
    2168     2105098 :     return true;
    2169             : }
    2170             : 
    2171             : /*
    2172             :  * LockReleaseAll -- Release all locks of the specified lock method that
    2173             :  *      are held by the current process.
    2174             :  *
    2175             :  * Well, not necessarily *all* locks.  The available behaviors are:
    2176             :  *      allLocks == true: release all locks including session locks.
    2177             :  *      allLocks == false: release all non-session locks.
    2178             :  */
    2179             : void
    2180     1001650 : LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
    2181             : {
    2182             :     HASH_SEQ_STATUS status;
    2183             :     LockMethod  lockMethodTable;
    2184             :     int         i,
    2185             :                 numLockModes;
    2186             :     LOCALLOCK  *locallock;
    2187             :     LOCK       *lock;
    2188             :     PROCLOCK   *proclock;
    2189             :     int         partition;
    2190     1001650 :     bool        have_fast_path_lwlock = false;
    2191             : 
    2192     1001650 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    2193           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    2194     1001650 :     lockMethodTable = LockMethods[lockmethodid];
    2195             : 
    2196             : #ifdef LOCK_DEBUG
    2197             :     if (*(lockMethodTable->trace_flag))
    2198             :         elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
    2199             : #endif
    2200             : 
    2201             :     /*
    2202             :      * Get rid of our fast-path VXID lock, if appropriate.  Note that this is
    2203             :      * the only way that the lock we hold on our own VXID can ever get
    2204             :      * released: it is always and only released when a toplevel transaction
    2205             :      * ends.
    2206             :      */
    2207     1001650 :     if (lockmethodid == DEFAULT_LOCKMETHOD)
    2208      495284 :         VirtualXactLockTableCleanup();
    2209             : 
    2210     1001650 :     numLockModes = lockMethodTable->numLockModes;
    2211             : 
    2212             :     /*
    2213             :      * First we run through the locallock table and get rid of unwanted
    2214             :      * entries, then we scan the process's proclocks and get rid of those. We
    2215             :      * do this separately because we may have multiple locallock entries
    2216             :      * pointing to the same proclock, and we daren't end up with any dangling
    2217             :      * pointers.  Fast-path locks are cleaned up during the locallock table
    2218             :      * scan, though.
    2219             :      */
    2220     1001650 :     hash_seq_init(&status, LockMethodLocalHash);
    2221             : 
    2222     2598632 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    2223             :     {
    2224             :         /*
    2225             :          * If the LOCALLOCK entry is unused, we must've run out of shared
    2226             :          * memory while trying to set up this lock.  Just forget the local
    2227             :          * entry.
    2228             :          */
    2229     1596982 :         if (locallock->nLocks == 0)
    2230             :         {
    2231          20 :             RemoveLocalLock(locallock);
    2232          20 :             continue;
    2233             :         }
    2234             : 
    2235             :         /* Ignore items that are not of the lockmethod to be removed */
    2236     1596962 :         if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
    2237       65504 :             continue;
    2238             : 
    2239             :         /*
    2240             :          * If we are asked to release all locks, we can just zap the entry.
    2241             :          * Otherwise, must scan to see if there are session locks. We assume
    2242             :          * there is at most one lockOwners entry for session locks.
    2243             :          */
    2244     1531458 :         if (!allLocks)
    2245             :         {
    2246     1465838 :             LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    2247             : 
    2248             :             /* If session lock is above array position 0, move it down to 0 */
    2249     2978842 :             for (i = 0; i < locallock->numLockOwners; i++)
    2250             :             {
    2251     1513004 :                 if (lockOwners[i].owner == NULL)
    2252       65362 :                     lockOwners[0] = lockOwners[i];
    2253             :                 else
    2254     1447642 :                     ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
    2255             :             }
    2256             : 
    2257     1465838 :             if (locallock->numLockOwners > 0 &&
    2258     1465838 :                 lockOwners[0].owner == NULL &&
    2259       65362 :                 lockOwners[0].nLocks > 0)
    2260             :             {
    2261             :                 /* Fix the locallock to show just the session locks */
    2262       65362 :                 locallock->nLocks = lockOwners[0].nLocks;
    2263       65362 :                 locallock->numLockOwners = 1;
    2264             :                 /* We aren't deleting this locallock, so done */
    2265       65362 :                 continue;
    2266             :             }
    2267             :             else
    2268     1400476 :                 locallock->numLockOwners = 0;
    2269             :         }
    2270             : 
    2271             :         /*
    2272             :          * If the lock or proclock pointers are NULL, this lock was taken via
    2273             :          * the relation fast-path (and is not known to have been transferred).
    2274             :          */
    2275     1466096 :         if (locallock->proclock == NULL || locallock->lock == NULL)
    2276             :         {
    2277      599774 :             LOCKMODE    lockmode = locallock->tag.mode;
    2278             :             Oid         relid;
    2279             : 
    2280             :             /* Verify that a fast-path lock is what we've got. */
    2281      599774 :             if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
    2282           0 :                 elog(PANIC, "locallock table corrupted");
    2283             : 
    2284             :             /*
    2285             :              * If we don't currently hold the LWLock that protects our
    2286             :              * fast-path data structures, we must acquire it before attempting
    2287             :              * to release the lock via the fast-path.  We will continue to
    2288             :              * hold the LWLock until we're done scanning the locallock table,
    2289             :              * unless we hit a transferred fast-path lock.  (XXX is this
    2290             :              * really such a good idea?  There could be a lot of entries ...)
    2291             :              */
    2292      599774 :             if (!have_fast_path_lwlock)
    2293             :             {
    2294      223020 :                 LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    2295      223020 :                 have_fast_path_lwlock = true;
    2296             :             }
    2297             : 
    2298             :             /* Attempt fast-path release. */
    2299      599774 :             relid = locallock->tag.lock.locktag_field2;
    2300      599774 :             if (FastPathUnGrantRelationLock(relid, lockmode))
    2301             :             {
    2302      598346 :                 RemoveLocalLock(locallock);
    2303      598346 :                 continue;
    2304             :             }
    2305             : 
    2306             :             /*
    2307             :              * Our lock, originally taken via the fast path, has been
    2308             :              * transferred to the main lock table.  That's going to require
    2309             :              * some extra work, so release our fast-path lock before starting.
    2310             :              */
    2311        1428 :             LWLockRelease(&MyProc->fpInfoLock);
    2312        1428 :             have_fast_path_lwlock = false;
    2313             : 
    2314             :             /*
    2315             :              * Now dump the lock.  We haven't got a pointer to the LOCK or
    2316             :              * PROCLOCK in this case, so we have to handle this a bit
    2317             :              * differently than a normal lock release.  Unfortunately, this
    2318             :              * requires an extra LWLock acquire-and-release cycle on the
    2319             :              * partitionLock, but hopefully it shouldn't happen often.
    2320             :              */
    2321        1428 :             LockRefindAndRelease(lockMethodTable, MyProc,
    2322             :                                  &locallock->tag.lock, lockmode, false);
    2323        1428 :             RemoveLocalLock(locallock);
    2324        1428 :             continue;
    2325             :         }
    2326             : 
    2327             :         /* Mark the proclock to show we need to release this lockmode */
    2328      866322 :         if (locallock->nLocks > 0)
    2329      866322 :             locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
    2330             : 
    2331             :         /* And remove the locallock hashtable entry */
    2332      866322 :         RemoveLocalLock(locallock);
    2333             :     }
    2334             : 
    2335             :     /* Done with the fast-path data structures */
    2336     1001650 :     if (have_fast_path_lwlock)
    2337      221592 :         LWLockRelease(&MyProc->fpInfoLock);
    2338             : 
    2339             :     /*
    2340             :      * Now, scan each lock partition separately.
    2341             :      */
    2342    17028050 :     for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
    2343             :     {
    2344             :         LWLock     *partitionLock;
    2345    16026400 :         SHM_QUEUE  *procLocks = &(MyProc->myProcLocks[partition]);
    2346             :         PROCLOCK   *nextplock;
    2347             : 
    2348    16026400 :         partitionLock = LockHashPartitionLockByIndex(partition);
    2349             : 
    2350             :         /*
    2351             :          * If the proclock list for this partition is empty, we can skip
    2352             :          * acquiring the partition lock.  This optimization is trickier than
    2353             :          * it looks, because another backend could be in process of adding
    2354             :          * something to our proclock list due to promoting one of our
    2355             :          * fast-path locks.  However, any such lock must be one that we
    2356             :          * decided not to delete above, so it's okay to skip it again now;
    2357             :          * we'd just decide not to delete it again.  We must, however, be
    2358             :          * careful to re-fetch the list header once we've acquired the
    2359             :          * partition lock, to be sure we have a valid, up-to-date pointer.
    2360             :          * (There is probably no significant risk if pointer fetch/store is
    2361             :          * atomic, but we don't wish to assume that.)
    2362             :          *
    2363             :          * XXX This argument assumes that the locallock table correctly
    2364             :          * represents all of our fast-path locks.  While allLocks mode
    2365             :          * guarantees to clean up all of our normal locks regardless of the
    2366             :          * locallock situation, we lose that guarantee for fast-path locks.
    2367             :          * This is not ideal.
    2368             :          */
    2369    16026400 :         if (SHMQueueNext(procLocks, procLocks,
    2370             :                          offsetof(PROCLOCK, procLink)) == NULL)
    2371    15257654 :             continue;           /* needn't examine this partition */
    2372             : 
    2373      768746 :         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    2374             : 
    2375     1727692 :         for (proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
    2376             :                                                   offsetof(PROCLOCK, procLink));
    2377             :              proclock;
    2378      958946 :              proclock = nextplock)
    2379             :         {
    2380      958946 :             bool        wakeupNeeded = false;
    2381             : 
    2382             :             /* Get link first, since we may unlink/delete this proclock */
    2383             :             nextplock = (PROCLOCK *)
    2384      958946 :                 SHMQueueNext(procLocks, &proclock->procLink,
    2385             :                              offsetof(PROCLOCK, procLink));
    2386             : 
    2387             :             Assert(proclock->tag.myProc == MyProc);
    2388             : 
    2389      958946 :             lock = proclock->tag.myLock;
    2390             : 
    2391             :             /* Ignore items that are not of the lockmethod to be removed */
    2392      958946 :             if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
    2393       65504 :                 continue;
    2394             : 
    2395             :             /*
    2396             :              * In allLocks mode, force release of all locks even if locallock
    2397             :              * table had problems
    2398             :              */
    2399      893442 :             if (allLocks)
    2400       43006 :                 proclock->releaseMask = proclock->holdMask;
    2401             :             else
    2402             :                 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
    2403             : 
    2404             :             /*
    2405             :              * Ignore items that have nothing to be released, unless they have
    2406             :              * holdMask == 0 and are therefore recyclable
    2407             :              */
    2408      893442 :             if (proclock->releaseMask == 0 && proclock->holdMask != 0)
    2409       64818 :                 continue;
    2410             : 
    2411             :             PROCLOCK_PRINT("LockReleaseAll", proclock);
    2412             :             LOCK_PRINT("LockReleaseAll", lock, 0);
    2413             :             Assert(lock->nRequested >= 0);
    2414             :             Assert(lock->nGranted >= 0);
    2415             :             Assert(lock->nGranted <= lock->nRequested);
    2416             :             Assert((proclock->holdMask & ~lock->grantMask) == 0);
    2417             : 
    2418             :             /*
    2419             :              * Release the previously-marked lock modes
    2420             :              */
    2421     7457616 :             for (i = 1; i <= numLockModes; i++)
    2422             :             {
    2423     6628992 :                 if (proclock->releaseMask & LOCKBIT_ON(i))
    2424      866322 :                     wakeupNeeded |= UnGrantLock(lock, i, proclock,
    2425             :                                                 lockMethodTable);
    2426             :             }
    2427             :             Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
    2428             :             Assert(lock->nGranted <= lock->nRequested);
    2429             :             LOCK_PRINT("LockReleaseAll: updated", lock, 0);
    2430             : 
    2431      828624 :             proclock->releaseMask = 0;
    2432             : 
    2433             :             /* CleanUpLock will wake up waiters if needed. */
    2434      828624 :             CleanUpLock(lock, proclock,
    2435             :                         lockMethodTable,
    2436      828624 :                         LockTagHashCode(&lock->tag),
    2437             :                         wakeupNeeded);
    2438             :         }                       /* loop over PROCLOCKs within this partition */
    2439             : 
    2440      768746 :         LWLockRelease(partitionLock);
    2441             :     }                           /* loop over partitions */
    2442             : 
    2443             : #ifdef LOCK_DEBUG
    2444             :     if (*(lockMethodTable->trace_flag))
    2445             :         elog(LOG, "LockReleaseAll done");
    2446             : #endif
    2447     1001650 : }
    2448             : 
    2449             : /*
    2450             :  * LockReleaseSession -- Release all session locks of the specified lock method
    2451             :  *      that are held by the current process.
    2452             :  */
    2453             : void
    2454         210 : LockReleaseSession(LOCKMETHODID lockmethodid)
    2455             : {
    2456             :     HASH_SEQ_STATUS status;
    2457             :     LOCALLOCK  *locallock;
    2458             : 
    2459         210 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    2460           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    2461             : 
    2462         210 :     hash_seq_init(&status, LockMethodLocalHash);
    2463             : 
    2464         372 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    2465             :     {
    2466             :         /* Ignore items that are not of the specified lock method */
    2467         162 :         if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
    2468          18 :             continue;
    2469             : 
    2470         144 :         ReleaseLockIfHeld(locallock, true);
    2471             :     }
    2472         210 : }
    2473             : 
    2474             : /*
    2475             :  * LockReleaseCurrentOwner
    2476             :  *      Release all locks belonging to CurrentResourceOwner
    2477             :  *
    2478             :  * If the caller knows what those locks are, it can pass them as an array.
    2479             :  * That speeds up the call significantly, when a lot of locks are held.
    2480             :  * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
    2481             :  * table to find them.
    2482             :  */
    2483             : void
    2484        3086 : LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
    2485             : {
    2486        3086 :     if (locallocks == NULL)
    2487             :     {
    2488             :         HASH_SEQ_STATUS status;
    2489             :         LOCALLOCK  *locallock;
    2490             : 
    2491           6 :         hash_seq_init(&status, LockMethodLocalHash);
    2492             : 
    2493         366 :         while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    2494         360 :             ReleaseLockIfHeld(locallock, false);
    2495             :     }
    2496             :     else
    2497             :     {
    2498             :         int         i;
    2499             : 
    2500        4508 :         for (i = nlocks - 1; i >= 0; i--)
    2501        1428 :             ReleaseLockIfHeld(locallocks[i], false);
    2502             :     }
    2503        3086 : }
    2504             : 
    2505             : /*
    2506             :  * ReleaseLockIfHeld
    2507             :  *      Release any session-level locks on this lockable object if sessionLock
    2508             :  *      is true; else, release any locks held by CurrentResourceOwner.
    2509             :  *
    2510             :  * It is tempting to pass this a ResourceOwner pointer (or NULL for session
    2511             :  * locks), but without refactoring LockRelease() we cannot support releasing
    2512             :  * locks belonging to resource owners other than CurrentResourceOwner.
    2513             :  * If we were to refactor, it'd be a good idea to fix it so we don't have to
    2514             :  * do a hashtable lookup of the locallock, too.  However, currently this
    2515             :  * function isn't used heavily enough to justify refactoring for its
    2516             :  * convenience.
    2517             :  */
    2518             : static void
    2519        1932 : ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
    2520             : {
    2521             :     ResourceOwner owner;
    2522             :     LOCALLOCKOWNER *lockOwners;
    2523             :     int         i;
    2524             : 
    2525             :     /* Identify owner for lock (must match LockRelease!) */
    2526        1932 :     if (sessionLock)
    2527         144 :         owner = NULL;
    2528             :     else
    2529        1788 :         owner = CurrentResourceOwner;
    2530             : 
    2531             :     /* Scan to see if there are any locks belonging to the target owner */
    2532        1932 :     lockOwners = locallock->lockOwners;
    2533        2186 :     for (i = locallock->numLockOwners - 1; i >= 0; i--)
    2534             :     {
    2535        1932 :         if (lockOwners[i].owner == owner)
    2536             :         {
    2537             :             Assert(lockOwners[i].nLocks > 0);
    2538        1678 :             if (lockOwners[i].nLocks < locallock->nLocks)
    2539             :             {
    2540             :                 /*
    2541             :                  * We will still hold this lock after forgetting this
    2542             :                  * ResourceOwner.
    2543             :                  */
    2544         438 :                 locallock->nLocks -= lockOwners[i].nLocks;
    2545             :                 /* compact out unused slot */
    2546         438 :                 locallock->numLockOwners--;
    2547         438 :                 if (owner != NULL)
    2548         438 :                     ResourceOwnerForgetLock(owner, locallock);
    2549         438 :                 if (i < locallock->numLockOwners)
    2550           0 :                     lockOwners[i] = lockOwners[locallock->numLockOwners];
    2551             :             }
    2552             :             else
    2553             :             {
    2554             :                 Assert(lockOwners[i].nLocks == locallock->nLocks);
    2555             :                 /* We want to call LockRelease just once */
    2556        1240 :                 lockOwners[i].nLocks = 1;
    2557        1240 :                 locallock->nLocks = 1;
    2558        1240 :                 if (!LockRelease(&locallock->tag.lock,
    2559             :                                  locallock->tag.mode,
    2560             :                                  sessionLock))
    2561           0 :                     elog(WARNING, "ReleaseLockIfHeld: failed??");
    2562             :             }
    2563        1678 :             break;
    2564             :         }
    2565             :     }
    2566        1932 : }
    2567             : 
    2568             : /*
    2569             :  * LockReassignCurrentOwner
    2570             :  *      Reassign all locks belonging to CurrentResourceOwner to belong
    2571             :  *      to its parent resource owner.
    2572             :  *
    2573             :  * If the caller knows what those locks are, it can pass them as an array.
    2574             :  * That speeds up the call significantly, when a lot of locks are held
    2575             :  * (e.g pg_dump with a large schema).  Otherwise, pass NULL for locallocks,
    2576             :  * and we'll traverse through our hash table to find them.
    2577             :  */
    2578             : void
    2579      507458 : LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
    2580             : {
    2581      507458 :     ResourceOwner parent = ResourceOwnerGetParent(CurrentResourceOwner);
    2582             : 
    2583             :     Assert(parent != NULL);
    2584             : 
    2585      507458 :     if (locallocks == NULL)
    2586             :     {
    2587             :         HASH_SEQ_STATUS status;
    2588             :         LOCALLOCK  *locallock;
    2589             : 
    2590        4276 :         hash_seq_init(&status, LockMethodLocalHash);
    2591             : 
    2592       92236 :         while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    2593       87960 :             LockReassignOwner(locallock, parent);
    2594             :     }
    2595             :     else
    2596             :     {
    2597             :         int         i;
    2598             : 
    2599     1065264 :         for (i = nlocks - 1; i >= 0; i--)
    2600      562082 :             LockReassignOwner(locallocks[i], parent);
    2601             :     }
    2602      507458 : }
    2603             : 
    2604             : /*
    2605             :  * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
    2606             :  * CurrentResourceOwner to its parent.
    2607             :  */
    2608             : static void
    2609      650042 : LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
    2610             : {
    2611             :     LOCALLOCKOWNER *lockOwners;
    2612             :     int         i;
    2613      650042 :     int         ic = -1;
    2614      650042 :     int         ip = -1;
    2615             : 
    2616             :     /*
    2617             :      * Scan to see if there are any locks belonging to current owner or its
    2618             :      * parent
    2619             :      */
    2620      650042 :     lockOwners = locallock->lockOwners;
    2621     1486366 :     for (i = locallock->numLockOwners - 1; i >= 0; i--)
    2622             :     {
    2623      836324 :         if (lockOwners[i].owner == CurrentResourceOwner)
    2624      641374 :             ic = i;
    2625      194950 :         else if (lockOwners[i].owner == parent)
    2626      119064 :             ip = i;
    2627             :     }
    2628             : 
    2629      650042 :     if (ic < 0)
    2630        8668 :         return;                 /* no current locks */
    2631             : 
    2632      641374 :     if (ip < 0)
    2633             :     {
    2634             :         /* Parent has no slot, so just give it the child's slot */
    2635      530936 :         lockOwners[ic].owner = parent;
    2636      530936 :         ResourceOwnerRememberLock(parent, locallock);
    2637             :     }
    2638             :     else
    2639             :     {
    2640             :         /* Merge child's count with parent's */
    2641      110438 :         lockOwners[ip].nLocks += lockOwners[ic].nLocks;
    2642             :         /* compact out unused slot */
    2643      110438 :         locallock->numLockOwners--;
    2644      110438 :         if (ic < locallock->numLockOwners)
    2645        1176 :             lockOwners[ic] = lockOwners[locallock->numLockOwners];
    2646             :     }
    2647      641374 :     ResourceOwnerForgetLock(CurrentResourceOwner, locallock);
    2648             : }
    2649             : 
    2650             : /*
    2651             :  * FastPathGrantRelationLock
    2652             :  *      Grant lock using per-backend fast-path array, if there is space.
    2653             :  */
    2654             : static bool
    2655    24477836 : FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
    2656             : {
    2657             :     uint32      f;
    2658    24477836 :     uint32      unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
    2659             : 
    2660             :     /* Scan for existing entry for this relid, remembering empty slot. */
    2661   407692090 :     for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
    2662             :     {
    2663   384988208 :         if (FAST_PATH_GET_BITS(MyProc, f) == 0)
    2664   326476716 :             unused_slot = f;
    2665    58511492 :         else if (MyProc->fpRelId[f] == relid)
    2666             :         {
    2667             :             Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
    2668     1773954 :             FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
    2669     1773954 :             return true;
    2670             :         }
    2671             :     }
    2672             : 
    2673             :     /* If no existing entry, use any empty slot. */
    2674    22703882 :     if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
    2675             :     {
    2676    22703882 :         MyProc->fpRelId[unused_slot] = relid;
    2677    22703882 :         FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
    2678    22703882 :         ++FastPathLocalUseCount;
    2679    22703882 :         return true;
    2680             :     }
    2681             : 
    2682             :     /* No existing entry, and no empty slot. */
    2683           0 :     return false;
    2684             : }
    2685             : 
    2686             : /*
    2687             :  * FastPathUnGrantRelationLock
    2688             :  *      Release fast-path lock, if present.  Update backend-private local
    2689             :  *      use count, while we're at it.
    2690             :  */
    2691             : static bool
    2692    24839288 : FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
    2693             : {
    2694             :     uint32      f;
    2695    24839288 :     bool        result = false;
    2696             : 
    2697    24839288 :     FastPathLocalUseCount = 0;
    2698   422267896 :     for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
    2699             :     {
    2700   397428608 :         if (MyProc->fpRelId[f] == relid
    2701    33602448 :             && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
    2702             :         {
    2703             :             Assert(!result);
    2704    24476342 :             FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
    2705    24476342 :             result = true;
    2706             :             /* we continue iterating so as to update FastPathLocalUseCount */
    2707             :         }
    2708   397428608 :         if (FAST_PATH_GET_BITS(MyProc, f) != 0)
    2709    69708044 :             ++FastPathLocalUseCount;
    2710             :     }
    2711    24839288 :     return result;
    2712             : }
    2713             : 
    2714             : /*
    2715             :  * FastPathTransferRelationLocks
    2716             :  *      Transfer locks matching the given lock tag from per-backend fast-path
    2717             :  *      arrays to the shared hash table.
    2718             :  *
    2719             :  * Returns true if successful, false if ran out of shared memory.
    2720             :  */
    2721             : static bool
    2722      265140 : FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
    2723             :                               uint32 hashcode)
    2724             : {
    2725      265140 :     LWLock     *partitionLock = LockHashPartitionLock(hashcode);
    2726      265140 :     Oid         relid = locktag->locktag_field2;
    2727             :     uint32      i;
    2728             : 
    2729             :     /*
    2730             :      * Every PGPROC that can potentially hold a fast-path lock is present in
    2731             :      * ProcGlobal->allProcs.  Prepared transactions are not, but any
    2732             :      * outstanding fast-path locks held by prepared transactions are
    2733             :      * transferred to the main lock table.
    2734             :      */
    2735    33371510 :     for (i = 0; i < ProcGlobal->allProcCount; i++)
    2736             :     {
    2737    33106370 :         PGPROC     *proc = &ProcGlobal->allProcs[i];
    2738             :         uint32      f;
    2739             : 
    2740    33106370 :         LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
    2741             : 
    2742             :         /*
    2743             :          * If the target backend isn't referencing the same database as the
    2744             :          * lock, then we needn't examine the individual relation IDs at all;
    2745             :          * none of them can be relevant.
    2746             :          *
    2747             :          * proc->databaseId is set at backend startup time and never changes
    2748             :          * thereafter, so it might be safe to perform this test before
    2749             :          * acquiring &proc->fpInfoLock.  In particular, it's certainly safe to
    2750             :          * assume that if the target backend holds any fast-path locks, it
    2751             :          * must have performed a memory-fencing operation (in particular, an
    2752             :          * LWLock acquisition) since setting proc->databaseId.  However, it's
    2753             :          * less clear that our backend is certain to have performed a memory
    2754             :          * fencing operation since the other backend set proc->databaseId.  So
    2755             :          * for now, we test it after acquiring the LWLock just to be safe.
    2756             :          */
    2757    33106370 :         if (proc->databaseId != locktag->locktag_field1)
    2758             :         {
    2759    31524006 :             LWLockRelease(&proc->fpInfoLock);
    2760    31524006 :             continue;
    2761             :         }
    2762             : 
    2763    26898462 :         for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
    2764             :         {
    2765             :             uint32      lockmode;
    2766             : 
    2767             :             /* Look for an allocated slot matching the given relid. */
    2768    25317436 :             if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
    2769    25316098 :                 continue;
    2770             : 
    2771             :             /* Find or create lock object. */
    2772        1338 :             LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    2773        5352 :             for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
    2774             :                  lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
    2775        4014 :                  ++lockmode)
    2776             :             {
    2777             :                 PROCLOCK   *proclock;
    2778             : 
    2779        4014 :                 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
    2780        2578 :                     continue;
    2781        1436 :                 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
    2782             :                                             hashcode, lockmode);
    2783        1436 :                 if (!proclock)
    2784             :                 {
    2785           0 :                     LWLockRelease(partitionLock);
    2786           0 :                     LWLockRelease(&proc->fpInfoLock);
    2787           0 :                     return false;
    2788             :                 }
    2789        1436 :                 GrantLock(proclock->tag.myLock, proclock, lockmode);
    2790        1436 :                 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
    2791             :             }
    2792        1338 :             LWLockRelease(partitionLock);
    2793             : 
    2794             :             /* No need to examine remaining slots. */
    2795        1338 :             break;
    2796             :         }
    2797     1582364 :         LWLockRelease(&proc->fpInfoLock);
    2798             :     }
    2799      265140 :     return true;
    2800             : }
    2801             : 
    2802             : /*
    2803             :  * FastPathGetRelationLockEntry
    2804             :  *      Return the PROCLOCK for a lock originally taken via the fast-path,
    2805             :  *      transferring it to the primary lock table if necessary.
    2806             :  *
    2807             :  * Note: caller takes care of updating the locallock object.
    2808             :  */
    2809             : static PROCLOCK *
    2810          60 : FastPathGetRelationLockEntry(LOCALLOCK *locallock)
    2811             : {
    2812          60 :     LockMethod  lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
    2813          60 :     LOCKTAG    *locktag = &locallock->tag.lock;
    2814          60 :     PROCLOCK   *proclock = NULL;
    2815          60 :     LWLock     *partitionLock = LockHashPartitionLock(locallock->hashcode);
    2816          60 :     Oid         relid = locktag->locktag_field2;
    2817             :     uint32      f;
    2818             : 
    2819          60 :     LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    2820             : 
    2821         962 :     for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
    2822             :     {
    2823             :         uint32      lockmode;
    2824             : 
    2825             :         /* Look for an allocated slot matching the given relid. */
    2826         960 :         if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
    2827         902 :             continue;
    2828             : 
    2829             :         /* If we don't have a lock of the given mode, forget it! */
    2830          58 :         lockmode = locallock->tag.mode;
    2831          58 :         if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
    2832           0 :             break;
    2833             : 
    2834             :         /* Find or create lock object. */
    2835          58 :         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    2836             : 
    2837          58 :         proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
    2838             :                                     locallock->hashcode, lockmode);
    2839          58 :         if (!proclock)
    2840             :         {
    2841           0 :             LWLockRelease(partitionLock);
    2842           0 :             LWLockRelease(&MyProc->fpInfoLock);
    2843           0 :             ereport(ERROR,
    2844             :                     (errcode(ERRCODE_OUT_OF_MEMORY),
    2845             :                      errmsg("out of shared memory"),
    2846             :                      errhint("You might need to increase max_locks_per_transaction.")));
    2847             :         }
    2848          58 :         GrantLock(proclock->tag.myLock, proclock, lockmode);
    2849          58 :         FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
    2850             : 
    2851          58 :         LWLockRelease(partitionLock);
    2852             : 
    2853             :         /* No need to examine remaining slots. */
    2854          58 :         break;
    2855             :     }
    2856             : 
    2857          60 :     LWLockRelease(&MyProc->fpInfoLock);
    2858             : 
    2859             :     /* Lock may have already been transferred by some other backend. */
    2860          60 :     if (proclock == NULL)
    2861             :     {
    2862             :         LOCK       *lock;
    2863             :         PROCLOCKTAG proclocktag;
    2864             :         uint32      proclock_hashcode;
    2865             : 
    2866           2 :         LWLockAcquire(partitionLock, LW_SHARED);
    2867             : 
    2868           2 :         lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    2869             :                                                     (void *) locktag,
    2870             :                                                     locallock->hashcode,
    2871             :                                                     HASH_FIND,
    2872             :                                                     NULL);
    2873           2 :         if (!lock)
    2874           0 :             elog(ERROR, "failed to re-find shared lock object");
    2875             : 
    2876           2 :         proclocktag.myLock = lock;
    2877           2 :         proclocktag.myProc = MyProc;
    2878             : 
    2879           2 :         proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
    2880             :         proclock = (PROCLOCK *)
    2881           2 :             hash_search_with_hash_value(LockMethodProcLockHash,
    2882             :                                         (void *) &proclocktag,
    2883             :                                         proclock_hashcode,
    2884             :                                         HASH_FIND,
    2885             :                                         NULL);
    2886           2 :         if (!proclock)
    2887           0 :             elog(ERROR, "failed to re-find shared proclock object");
    2888           2 :         LWLockRelease(partitionLock);
    2889             :     }
    2890             : 
    2891          60 :     return proclock;
    2892             : }
    2893             : 
    2894             : /*
    2895             :  * GetLockConflicts
    2896             :  *      Get an array of VirtualTransactionIds of xacts currently holding locks
    2897             :  *      that would conflict with the specified lock/lockmode.
    2898             :  *      xacts merely awaiting such a lock are NOT reported.
    2899             :  *
    2900             :  * The result array is palloc'd and is terminated with an invalid VXID.
    2901             :  * *countp, if not null, is updated to the number of items set.
    2902             :  *
    2903             :  * Of course, the result could be out of date by the time it's returned,
    2904             :  * so use of this function has to be thought about carefully.
    2905             :  *
    2906             :  * Note we never include the current xact's vxid in the result array,
    2907             :  * since an xact never blocks itself.  Also, prepared transactions are
    2908             :  * ignored, which is a bit more debatable but is appropriate for current
    2909             :  * uses of the result.
    2910             :  */
    2911             : VirtualTransactionId *
    2912         712 : GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
    2913             : {
    2914             :     static VirtualTransactionId *vxids;
    2915         712 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
    2916             :     LockMethod  lockMethodTable;
    2917             :     LOCK       *lock;
    2918             :     LOCKMASK    conflictMask;
    2919             :     SHM_QUEUE  *procLocks;
    2920             :     PROCLOCK   *proclock;
    2921             :     uint32      hashcode;
    2922             :     LWLock     *partitionLock;
    2923         712 :     int         count = 0;
    2924         712 :     int         fast_count = 0;
    2925             : 
    2926         712 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    2927           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    2928         712 :     lockMethodTable = LockMethods[lockmethodid];
    2929         712 :     if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
    2930           0 :         elog(ERROR, "unrecognized lock mode: %d", lockmode);
    2931             : 
    2932             :     /*
    2933             :      * Allocate memory to store results, and fill with InvalidVXID.  We only
    2934             :      * need enough space for MaxBackends + a terminator, since prepared xacts
    2935             :      * don't count. InHotStandby allocate once in TopMemoryContext.
    2936             :      */
    2937         712 :     if (InHotStandby)
    2938             :     {
    2939           0 :         if (vxids == NULL)
    2940           0 :             vxids = (VirtualTransactionId *)
    2941           0 :                 MemoryContextAlloc(TopMemoryContext,
    2942           0 :                                    sizeof(VirtualTransactionId) * (MaxBackends + 1));
    2943             :     }
    2944             :     else
    2945         712 :         vxids = (VirtualTransactionId *)
    2946         712 :             palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1));
    2947             : 
    2948             :     /* Compute hash code and partition lock, and look up conflicting modes. */
    2949         712 :     hashcode = LockTagHashCode(locktag);
    2950         712 :     partitionLock = LockHashPartitionLock(hashcode);
    2951         712 :     conflictMask = lockMethodTable->conflictTab[lockmode];
    2952             : 
    2953             :     /*
    2954             :      * Fast path locks might not have been entered in the primary lock table.
    2955             :      * If the lock we're dealing with could conflict with such a lock, we must
    2956             :      * examine each backend's fast-path array for conflicts.
    2957             :      */
    2958         712 :     if (ConflictsWithRelationFastPath(locktag, lockmode))
    2959             :     {
    2960             :         int         i;
    2961         712 :         Oid         relid = locktag->locktag_field2;
    2962             :         VirtualTransactionId vxid;
    2963             : 
    2964             :         /*
    2965             :          * Iterate over relevant PGPROCs.  Anything held by a prepared
    2966             :          * transaction will have been transferred to the primary lock table,
    2967             :          * so we need not worry about those.  This is all a bit fuzzy, because
    2968             :          * new locks could be taken after we've visited a particular
    2969             :          * partition, but the callers had better be prepared to deal with that
    2970             :          * anyway, since the locks could equally well be taken between the
    2971             :          * time we return the value and the time the caller does something
    2972             :          * with it.
    2973             :          */
    2974       88584 :         for (i = 0; i < ProcGlobal->allProcCount; i++)
    2975             :         {
    2976       87872 :             PGPROC     *proc = &ProcGlobal->allProcs[i];
    2977             :             uint32      f;
    2978             : 
    2979             :             /* A backend never blocks itself */
    2980       87872 :             if (proc == MyProc)
    2981         712 :                 continue;
    2982             : 
    2983       87160 :             LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
    2984             : 
    2985             :             /*
    2986             :              * If the target backend isn't referencing the same database as
    2987             :              * the lock, then we needn't examine the individual relation IDs
    2988             :              * at all; none of them can be relevant.
    2989             :              *
    2990             :              * See FastPathTransferRelationLocks() for discussion of why we do
    2991             :              * this test after acquiring the lock.
    2992             :              */
    2993       87160 :             if (proc->databaseId != locktag->locktag_field1)
    2994             :             {
    2995       82862 :                 LWLockRelease(&proc->fpInfoLock);
    2996       82862 :                 continue;
    2997             :             }
    2998             : 
    2999       73040 :             for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
    3000             :             {
    3001             :                 uint32      lockmask;
    3002             : 
    3003             :                 /* Look for an allocated slot matching the given relid. */
    3004       68768 :                 if (relid != proc->fpRelId[f])
    3005       68686 :                     continue;
    3006          82 :                 lockmask = FAST_PATH_GET_BITS(proc, f);
    3007          82 :                 if (!lockmask)
    3008          56 :                     continue;
    3009          26 :                 lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
    3010             : 
    3011             :                 /*
    3012             :                  * There can only be one entry per relation, so if we found it
    3013             :                  * and it doesn't conflict, we can skip the rest of the slots.
    3014             :                  */
    3015          26 :                 if ((lockmask & conflictMask) == 0)
    3016          10 :                     break;
    3017             : 
    3018             :                 /* Conflict! */
    3019          16 :                 GET_VXID_FROM_PGPROC(vxid, *proc);
    3020             : 
    3021             :                 /*
    3022             :                  * If we see an invalid VXID, then either the xact has already
    3023             :                  * committed (or aborted), or it's a prepared xact.  In either
    3024             :                  * case we may ignore it.
    3025             :                  */
    3026          16 :                 if (VirtualTransactionIdIsValid(vxid))
    3027          16 :                     vxids[count++] = vxid;
    3028             : 
    3029             :                 /* No need to examine remaining slots. */
    3030          16 :                 break;
    3031             :             }
    3032             : 
    3033        4298 :             LWLockRelease(&proc->fpInfoLock);
    3034             :         }
    3035             :     }
    3036             : 
    3037             :     /* Remember how many fast-path conflicts we found. */
    3038         712 :     fast_count = count;
    3039             : 
    3040             :     /*
    3041             :      * Look up the lock object matching the tag.
    3042             :      */
    3043         712 :     LWLockAcquire(partitionLock, LW_SHARED);
    3044             : 
    3045         712 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    3046             :                                                 (const void *) locktag,
    3047             :                                                 hashcode,
    3048             :                                                 HASH_FIND,
    3049             :                                                 NULL);
    3050         712 :     if (!lock)
    3051             :     {
    3052             :         /*
    3053             :          * If the lock object doesn't exist, there is nothing holding a lock
    3054             :          * on this lockable object.
    3055             :          */
    3056           0 :         LWLockRelease(partitionLock);
    3057           0 :         vxids[count].backendId = InvalidBackendId;
    3058           0 :         vxids[count].localTransactionId = InvalidLocalTransactionId;
    3059           0 :         if (countp)
    3060           0 :             *countp = count;
    3061           0 :         return vxids;
    3062             :     }
    3063             : 
    3064             :     /*
    3065             :      * Examine each existing holder (or awaiter) of the lock.
    3066             :      */
    3067             : 
    3068         712 :     procLocks = &(lock->procLocks);
    3069             : 
    3070         712 :     proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
    3071             :                                          offsetof(PROCLOCK, lockLink));
    3072             : 
    3073        1424 :     while (proclock)
    3074             :     {
    3075         712 :         if (conflictMask & proclock->holdMask)
    3076             :         {
    3077         712 :             PGPROC     *proc = proclock->tag.myProc;
    3078             : 
    3079             :             /* A backend never blocks itself */
    3080         712 :             if (proc != MyProc)
    3081             :             {
    3082             :                 VirtualTransactionId vxid;
    3083             : 
    3084           0 :                 GET_VXID_FROM_PGPROC(vxid, *proc);
    3085             : 
    3086             :                 /*
    3087             :                  * If we see an invalid VXID, then either the xact has already
    3088             :                  * committed (or aborted), or it's a prepared xact.  In either
    3089             :                  * case we may ignore it.
    3090             :                  */
    3091           0 :                 if (VirtualTransactionIdIsValid(vxid))
    3092             :                 {
    3093             :                     int         i;
    3094             : 
    3095             :                     /* Avoid duplicate entries. */
    3096           0 :                     for (i = 0; i < fast_count; ++i)
    3097           0 :                         if (VirtualTransactionIdEquals(vxids[i], vxid))
    3098           0 :                             break;
    3099           0 :                     if (i >= fast_count)
    3100           0 :                         vxids[count++] = vxid;
    3101             :                 }
    3102             :             }
    3103             :         }
    3104             : 
    3105         712 :         proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
    3106             :                                              offsetof(PROCLOCK, lockLink));
    3107             :     }
    3108             : 
    3109         712 :     LWLockRelease(partitionLock);
    3110             : 
    3111         712 :     if (count > MaxBackends) /* should never happen */
    3112           0 :         elog(PANIC, "too many conflicting locks found");
    3113             : 
    3114         712 :     vxids[count].backendId = InvalidBackendId;
    3115         712 :     vxids[count].localTransactionId = InvalidLocalTransactionId;
    3116         712 :     if (countp)
    3117         712 :         *countp = count;
    3118         712 :     return vxids;
    3119             : }
    3120             : 
    3121             : /*
    3122             :  * Find a lock in the shared lock table and release it.  It is the caller's
    3123             :  * responsibility to verify that this is a sane thing to do.  (For example, it
    3124             :  * would be bad to release a lock here if there might still be a LOCALLOCK
    3125             :  * object with pointers to it.)
    3126             :  *
    3127             :  * We currently use this in two situations: first, to release locks held by
    3128             :  * prepared transactions on commit (see lock_twophase_postcommit); and second,
    3129             :  * to release locks taken via the fast-path, transferred to the main hash
    3130             :  * table, and then released (see LockReleaseAll).
    3131             :  */
    3132             : static void
    3133        1680 : LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
    3134             :                      LOCKTAG *locktag, LOCKMODE lockmode,
    3135             :                      bool decrement_strong_lock_count)
    3136             : {
    3137             :     LOCK       *lock;
    3138             :     PROCLOCK   *proclock;
    3139             :     PROCLOCKTAG proclocktag;
    3140             :     uint32      hashcode;
    3141             :     uint32      proclock_hashcode;
    3142             :     LWLock     *partitionLock;
    3143             :     bool        wakeupNeeded;
    3144             : 
    3145        1680 :     hashcode = LockTagHashCode(locktag);
    3146        1680 :     partitionLock = LockHashPartitionLock(hashcode);
    3147             : 
    3148        1680 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    3149             : 
    3150             :     /*
    3151             :      * Re-find the lock object (it had better be there).
    3152             :      */
    3153        1680 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    3154             :                                                 (void *) locktag,
    3155             :                                                 hashcode,
    3156             :                                                 HASH_FIND,
    3157             :                                                 NULL);
    3158        1680 :     if (!lock)
    3159           0 :         elog(PANIC, "failed to re-find shared lock object");
    3160             : 
    3161             :     /*
    3162             :      * Re-find the proclock object (ditto).
    3163             :      */
    3164        1680 :     proclocktag.myLock = lock;
    3165        1680 :     proclocktag.myProc = proc;
    3166             : 
    3167        1680 :     proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
    3168             : 
    3169        1680 :     proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
    3170             :                                                         (void *) &proclocktag,
    3171             :                                                         proclock_hashcode,
    3172             :                                                         HASH_FIND,
    3173             :                                                         NULL);
    3174        1680 :     if (!proclock)
    3175           0 :         elog(PANIC, "failed to re-find shared proclock object");
    3176             : 
    3177             :     /*
    3178             :      * Double-check that we are actually holding a lock of the type we want to
    3179             :      * release.
    3180             :      */
    3181        1680 :     if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
    3182             :     {
    3183             :         PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
    3184           0 :         LWLockRelease(partitionLock);
    3185           0 :         elog(WARNING, "you don't own a lock of type %s",
    3186             :              lockMethodTable->lockModeNames[lockmode]);
    3187           0 :         return;
    3188             :     }
    3189             : 
    3190             :     /*
    3191             :      * Do the releasing.  CleanUpLock will waken any now-wakable waiters.
    3192             :      */
    3193        1680 :     wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
    3194             : 
    3195        1680 :     CleanUpLock(lock, proclock,
    3196             :                 lockMethodTable, hashcode,
    3197             :                 wakeupNeeded);
    3198             : 
    3199        1680 :     LWLockRelease(partitionLock);
    3200             : 
    3201             :     /*
    3202             :      * Decrement strong lock count.  This logic is needed only for 2PC.
    3203             :      */
    3204        1680 :     if (decrement_strong_lock_count
    3205         202 :         && ConflictsWithRelationFastPath(locktag, lockmode))
    3206             :     {
    3207          42 :         uint32      fasthashcode = FastPathStrongLockHashPartition(hashcode);
    3208             : 
    3209          42 :         SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    3210             :         Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
    3211          42 :         FastPathStrongRelationLocks->count[fasthashcode]--;
    3212          42 :         SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    3213             :     }
    3214             : }
    3215             : 
    3216             : /*
    3217             :  * AtPrepare_Locks
    3218             :  *      Do the preparatory work for a PREPARE: make 2PC state file records
    3219             :  *      for all locks currently held.
    3220             :  *
    3221             :  * Session-level locks are ignored, as are VXID locks.
    3222             :  *
    3223             :  * There are some special cases that we error out on: we can't be holding any
    3224             :  * locks at both session and transaction level (since we must either keep or
    3225             :  * give away the PROCLOCK object), and we can't be holding any locks on
    3226             :  * temporary objects (since that would mess up the current backend if it tries
    3227             :  * to exit before the prepared xact is committed).
    3228             :  */
    3229             : void
    3230          62 : AtPrepare_Locks(void)
    3231             : {
    3232             :     HASH_SEQ_STATUS status;
    3233             :     LOCALLOCK  *locallock;
    3234             : 
    3235             :     /*
    3236             :      * For the most part, we don't need to touch shared memory for this ---
    3237             :      * all the necessary state information is in the locallock table.
    3238             :      * Fast-path locks are an exception, however: we move any such locks to
    3239             :      * the main table before allowing PREPARE TRANSACTION to succeed.
    3240             :      */
    3241          62 :     hash_seq_init(&status, LockMethodLocalHash);
    3242             : 
    3243         240 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    3244             :     {
    3245             :         TwoPhaseLockRecord record;
    3246         178 :         LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    3247             :         bool        haveSessionLock;
    3248             :         bool        haveXactLock;
    3249             :         int         i;
    3250             : 
    3251             :         /*
    3252             :          * Ignore VXID locks.  We don't want those to be held by prepared
    3253             :          * transactions, since they aren't meaningful after a restart.
    3254             :          */
    3255         178 :         if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
    3256           0 :             continue;
    3257             : 
    3258             :         /* Ignore it if we don't actually hold the lock */
    3259         178 :         if (locallock->nLocks <= 0)
    3260           0 :             continue;
    3261             : 
    3262             :         /* Scan to see whether we hold it at session or transaction level */
    3263         178 :         haveSessionLock = haveXactLock = false;
    3264         356 :         for (i = locallock->numLockOwners - 1; i >= 0; i--)
    3265             :         {
    3266         178 :             if (lockOwners[i].owner == NULL)
    3267           0 :                 haveSessionLock = true;
    3268             :             else
    3269         178 :                 haveXactLock = true;
    3270             :         }
    3271             : 
    3272             :         /* Ignore it if we have only session lock */
    3273         178 :         if (!haveXactLock)
    3274           0 :             continue;
    3275             : 
    3276             :         /*
    3277             :          * If we have both session- and transaction-level locks, fail.  This
    3278             :          * should never happen with regular locks, since we only take those at
    3279             :          * session level in some special operations like VACUUM.  It's
    3280             :          * possible to hit this with advisory locks, though.
    3281             :          *
    3282             :          * It would be nice if we could keep the session hold and give away
    3283             :          * the transactional hold to the prepared xact.  However, that would
    3284             :          * require two PROCLOCK objects, and we cannot be sure that another
    3285             :          * PROCLOCK will be available when it comes time for PostPrepare_Locks
    3286             :          * to do the deed.  So for now, we error out while we can still do so
    3287             :          * safely.
    3288             :          */
    3289         178 :         if (haveSessionLock)
    3290           0 :             ereport(ERROR,
    3291             :                     (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    3292             :                      errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
    3293             : 
    3294             :         /*
    3295             :          * If the local lock was taken via the fast-path, we need to move it
    3296             :          * to the primary lock table, or just get a pointer to the existing
    3297             :          * primary lock table entry if by chance it's already been
    3298             :          * transferred.
    3299             :          */
    3300         178 :         if (locallock->proclock == NULL)
    3301             :         {
    3302          60 :             locallock->proclock = FastPathGetRelationLockEntry(locallock);
    3303          60 :             locallock->lock = locallock->proclock->tag.myLock;
    3304             :         }
    3305             : 
    3306             :         /*
    3307             :          * Arrange to not release any strong lock count held by this lock
    3308             :          * entry.  We must retain the count until the prepared transaction is
    3309             :          * committed or rolled back.
    3310             :          */
    3311         178 :         locallock->holdsStrongLockCount = false;
    3312             : 
    3313             :         /*
    3314             :          * Create a 2PC record.
    3315             :          */
    3316         178 :         memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
    3317         178 :         record.lockmode = locallock->tag.mode;
    3318             : 
    3319         178 :         RegisterTwoPhaseRecord(TWOPHASE_RM_LOCK_ID, 0,
    3320             :                                &record, sizeof(TwoPhaseLockRecord));
    3321             :     }
    3322          62 : }
    3323             : 
    3324             : /*
    3325             :  * PostPrepare_Locks
    3326             :  *      Clean up after successful PREPARE
    3327             :  *
    3328             :  * Here, we want to transfer ownership of our locks to a dummy PGPROC
    3329             :  * that's now associated with the prepared transaction, and we want to
    3330             :  * clean out the corresponding entries in the LOCALLOCK table.
    3331             :  *
    3332             :  * Note: by removing the LOCALLOCK entries, we are leaving dangling
    3333             :  * pointers in the transaction's resource owner.  This is OK at the
    3334             :  * moment since resowner.c doesn't try to free locks retail at a toplevel
    3335             :  * transaction commit or abort.  We could alternatively zero out nLocks
    3336             :  * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
    3337             :  * but that probably costs more cycles.
    3338             :  */
    3339             : void
    3340          62 : PostPrepare_Locks(TransactionId xid)
    3341             : {
    3342          62 :     PGPROC     *newproc = TwoPhaseGetDummyProc(xid, false);
    3343             :     HASH_SEQ_STATUS status;
    3344             :     LOCALLOCK  *locallock;
    3345             :     LOCK       *lock;
    3346             :     PROCLOCK   *proclock;
    3347             :     PROCLOCKTAG proclocktag;
    3348             :     int         partition;
    3349             : 
    3350             :     /* Can't prepare a lock group follower. */
    3351             :     Assert(MyProc->lockGroupLeader == NULL ||
    3352             :            MyProc->lockGroupLeader == MyProc);
    3353             : 
    3354             :     /* This is a critical section: any error means big trouble */
    3355          62 :     START_CRIT_SECTION();
    3356             : 
    3357             :     /*
    3358             :      * First we run through the locallock table and get rid of unwanted
    3359             :      * entries, then we scan the process's proclocks and transfer them to the
    3360             :      * target proc.
    3361             :      *
    3362             :      * We do this separately because we may have multiple locallock entries
    3363             :      * pointing to the same proclock, and we daren't end up with any dangling
    3364             :      * pointers.
    3365             :      */
    3366          62 :     hash_seq_init(&status, LockMethodLocalHash);
    3367             : 
    3368         240 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    3369             :     {
    3370         178 :         LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    3371             :         bool        haveSessionLock;
    3372             :         bool        haveXactLock;
    3373             :         int         i;
    3374             : 
    3375         178 :         if (locallock->proclock == NULL || locallock->lock == NULL)
    3376             :         {
    3377             :             /*
    3378             :              * We must've run out of shared memory while trying to set up this
    3379             :              * lock.  Just forget the local entry.
    3380             :              */
    3381             :             Assert(locallock->nLocks == 0);
    3382           0 :             RemoveLocalLock(locallock);
    3383           0 :             continue;
    3384             :         }
    3385             : 
    3386             :         /* Ignore VXID locks */
    3387         178 :         if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
    3388           0 :             continue;
    3389             : 
    3390             :         /* Scan to see whether we hold it at session or transaction level */
    3391         178 :         haveSessionLock = haveXactLock = false;
    3392         356 :         for (i = locallock->numLockOwners - 1; i >= 0; i--)
    3393             :         {
    3394         178 :             if (lockOwners[i].owner == NULL)
    3395           0 :                 haveSessionLock = true;
    3396             :             else
    3397         178 :                 haveXactLock = true;
    3398             :         }
    3399             : 
    3400             :         /* Ignore it if we have only session lock */
    3401         178 :         if (!haveXactLock)
    3402           0 :             continue;
    3403             : 
    3404             :         /* This can't happen, because we already checked it */
    3405         178 :         if (haveSessionLock)
    3406           0 :             ereport(PANIC,
    3407             :                     (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    3408             :                      errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
    3409             : 
    3410             :         /* Mark the proclock to show we need to release this lockmode */
    3411         178 :         if (locallock->nLocks > 0)
    3412         178 :             locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
    3413             : 
    3414             :         /* And remove the locallock hashtable entry */
    3415         178 :         RemoveLocalLock(locallock);
    3416             :     }
    3417             : 
    3418             :     /*
    3419             :      * Now, scan each lock partition separately.
    3420             :      */
    3421        1054 :     for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
    3422             :     {
    3423             :         LWLock     *partitionLock;
    3424         992 :         SHM_QUEUE  *procLocks = &(MyProc->myProcLocks[partition]);
    3425             :         PROCLOCK   *nextplock;
    3426             : 
    3427         992 :         partitionLock = LockHashPartitionLockByIndex(partition);
    3428             : 
    3429             :         /*
    3430             :          * If the proclock list for this partition is empty, we can skip
    3431             :          * acquiring the partition lock.  This optimization is safer than the
    3432             :          * situation in LockReleaseAll, because we got rid of any fast-path
    3433             :          * locks during AtPrepare_Locks, so there cannot be any case where
    3434             :          * another backend is adding something to our lists now.  For safety,
    3435             :          * though, we code this the same way as in LockReleaseAll.
    3436             :          */
    3437         992 :         if (SHMQueueNext(procLocks, procLocks,
    3438             :                          offsetof(PROCLOCK, procLink)) == NULL)
    3439         842 :             continue;           /* needn't examine this partition */
    3440             : 
    3441         150 :         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    3442             : 
    3443         306 :         for (proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
    3444             :                                                   offsetof(PROCLOCK, procLink));
    3445             :              proclock;
    3446         156 :              proclock = nextplock)
    3447             :         {
    3448             :             /* Get link first, since we may unlink/relink this proclock */
    3449             :             nextplock = (PROCLOCK *)
    3450         156 :                 SHMQueueNext(procLocks, &proclock->procLink,
    3451             :                              offsetof(PROCLOCK, procLink));
    3452             : 
    3453             :             Assert(proclock->tag.myProc == MyProc);
    3454             : 
    3455         156 :             lock = proclock->tag.myLock;
    3456             : 
    3457             :             /* Ignore VXID locks */
    3458         156 :             if (lock->tag.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
    3459           0 :                 continue;
    3460             : 
    3461             :             PROCLOCK_PRINT("PostPrepare_Locks", proclock);
    3462             :             LOCK_PRINT("PostPrepare_Locks", lock, 0);
    3463             :             Assert(lock->nRequested >= 0);
    3464             :             Assert(lock->nGranted >= 0);
    3465             :             Assert(lock->nGranted <= lock->nRequested);
    3466             :             Assert((proclock->holdMask & ~lock->grantMask) == 0);
    3467             : 
    3468             :             /* Ignore it if nothing to release (must be a session lock) */
    3469         156 :             if (proclock->releaseMask == 0)
    3470           0 :                 continue;
    3471             : 
    3472             :             /* Else we should be releasing all locks */
    3473         156 :             if (proclock->releaseMask != proclock->holdMask)
    3474           0 :                 elog(PANIC, "we seem to have dropped a bit somewhere");
    3475             : 
    3476             :             /*
    3477             :              * We cannot simply modify proclock->tag.myProc to reassign
    3478             :              * ownership of the lock, because that's part of the hash key and
    3479             :              * the proclock would then be in the wrong hash chain.  Instead
    3480             :              * use hash_update_hash_key.  (We used to create a new hash entry,
    3481             :              * but that risks out-of-memory failure if other processes are
    3482             :              * busy making proclocks too.)  We must unlink the proclock from
    3483             :              * our procLink chain and put it into the new proc's chain, too.
    3484             :              *
    3485             :              * Note: the updated proclock hash key will still belong to the
    3486             :              * same hash partition, cf proclock_hash().  So the partition lock
    3487             :              * we already hold is sufficient for this.
    3488             :              */
    3489         156 :             SHMQueueDelete(&proclock->procLink);
    3490             : 
    3491             :             /*
    3492             :              * Create the new hash key for the proclock.
    3493             :              */
    3494         156 :             proclocktag.myLock = lock;
    3495         156 :             proclocktag.myProc = newproc;
    3496             : 
    3497             :             /*
    3498             :              * Update groupLeader pointer to point to the new proc.  (We'd
    3499             :              * better not be a member of somebody else's lock group!)
    3500             :              */
    3501             :             Assert(proclock->groupLeader == proclock->tag.myProc);
    3502         156 :             proclock->groupLeader = newproc;
    3503             : 
    3504             :             /*
    3505             :              * Update the proclock.  We should not find any existing entry for
    3506             :              * the same hash key, since there can be only one entry for any
    3507             :              * given lock with my own proc.
    3508             :              */
    3509         156 :             if (!hash_update_hash_key(LockMethodProcLockHash,
    3510             :                                       (void *) proclock,
    3511             :                                       (void *) &proclocktag))
    3512           0 :                 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
    3513             : 
    3514             :             /* Re-link into the new proc's proclock list */
    3515         156 :             SHMQueueInsertBefore(&(newproc->myProcLocks[partition]),
    3516             :                                  &proclock->procLink);
    3517             : 
    3518             :             PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
    3519             :         }                       /* loop over PROCLOCKs within this partition */
    3520             : 
    3521         150 :         LWLockRelease(partitionLock);
    3522             :     }                           /* loop over partitions */
    3523             : 
    3524          62 :     END_CRIT_SECTION();
    3525          62 : }
    3526             : 
    3527             : 
    3528             : /*
    3529             :  * Estimate shared-memory space used for lock tables
    3530             :  */
    3531             : Size
    3532        2174 : LockShmemSize(void)
    3533             : {
    3534        2174 :     Size        size = 0;
    3535             :     long        max_table_size;
    3536             : 
    3537             :     /* lock hash table */
    3538        2174 :     max_table_size = NLOCKENTS();
    3539        2174 :     size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
    3540             : 
    3541             :     /* proclock hash table */
    3542        2174 :     max_table_size *= 2;
    3543        2174 :     size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
    3544             : 
    3545             :     /*
    3546             :      * Since NLOCKENTS is only an estimate, add 10% safety margin.
    3547             :      */
    3548        2174 :     size = add_size(size, size / 10);
    3549             : 
    3550        2174 :     return size;
    3551             : }
    3552             : 
    3553             : /*
    3554             :  * GetLockStatusData - Return a summary of the lock manager's internal
    3555             :  * status, for use in a user-level reporting function.
    3556             :  *
    3557             :  * The return data consists of an array of LockInstanceData objects,
    3558             :  * which are a lightly abstracted version of the PROCLOCK data structures,
    3559             :  * i.e. there is one entry for each unique lock and interested PGPROC.
    3560             :  * It is the caller's responsibility to match up related items (such as
    3561             :  * references to the same lockable object or PGPROC) if wanted.
    3562             :  *
    3563             :  * The design goal is to hold the LWLocks for as short a time as possible;
    3564             :  * thus, this function simply makes a copy of the necessary data and releases
    3565             :  * the locks, allowing the caller to contemplate and format the data for as
    3566             :  * long as it pleases.
    3567             :  */
    3568             : LockData *
    3569         244 : GetLockStatusData(void)
    3570             : {
    3571             :     LockData   *data;
    3572             :     PROCLOCK   *proclock;
    3573             :     HASH_SEQ_STATUS seqstat;
    3574             :     int         els;
    3575             :     int         el;
    3576             :     int         i;
    3577             : 
    3578         244 :     data = (LockData *) palloc(sizeof(LockData));
    3579             : 
    3580             :     /* Guess how much space we'll need. */
    3581         244 :     els = MaxBackends;
    3582         244 :     el = 0;
    3583         244 :     data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
    3584             : 
    3585             :     /*
    3586             :      * First, we iterate through the per-backend fast-path arrays, locking
    3587             :      * them one at a time.  This might produce an inconsistent picture of the
    3588             :      * system state, but taking all of those LWLocks at the same time seems
    3589             :      * impractical (in particular, note MAX_SIMUL_LWLOCKS).  It shouldn't
    3590             :      * matter too much, because none of these locks can be involved in lock
    3591             :      * conflicts anyway - anything that might must be present in the main lock
    3592             :      * table.  (For the same reason, we don't sweat about making leaderPid
    3593             :      * completely valid.  We cannot safely dereference another backend's
    3594             :      * lockGroupLeader field without holding all lock partition locks, and
    3595             :      * it's not worth that.)
    3596             :      */
    3597       30988 :     for (i = 0; i < ProcGlobal->allProcCount; ++i)
    3598             :     {
    3599       30744 :         PGPROC     *proc = &ProcGlobal->allProcs[i];
    3600             :         uint32      f;
    3601             : 
    3602       30744 :         LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
    3603             : 
    3604      522648 :         for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
    3605             :         {
    3606             :             LockInstanceData *instance;
    3607      491904 :             uint32      lockbits = FAST_PATH_GET_BITS(proc, f);
    3608             : 
    3609             :             /* Skip unallocated slots. */
    3610      491904 :             if (!lockbits)
    3611      489266 :                 continue;
    3612             : 
    3613        2638 :             if (el >= els)
    3614             :             {
    3615           0 :                 els += MaxBackends;
    3616           0 :                 data->locks = (LockInstanceData *)
    3617           0 :                     repalloc(data->locks, sizeof(LockInstanceData) * els);
    3618             :             }
    3619             : 
    3620        2638 :             instance = &data->locks[el];
    3621        2638 :             SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
    3622             :                                  proc->fpRelId[f]);
    3623        2638 :             instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
    3624        2638 :             instance->waitLockMode = NoLock;
    3625        2638 :             instance->backend = proc->backendId;
    3626        2638 :             instance->lxid = proc->lxid;
    3627        2638 :             instance->pid = proc->pid;
    3628        2638 :             instance->leaderPid = proc->pid;
    3629        2638 :             instance->fastpath = true;
    3630             : 
    3631        2638 :             el++;
    3632             :         }
    3633             : 
    3634       30744 :         if (proc->fpVXIDLock)
    3635             :         {
    3636             :             VirtualTransactionId vxid;
    3637             :             LockInstanceData *instance;
    3638             : 
    3639         696 :             if (el >= els)
    3640             :             {
    3641           0 :                 els += MaxBackends;
    3642           0 :                 data->locks = (LockInstanceData *)
    3643           0 :                     repalloc(data->locks, sizeof(LockInstanceData) * els);
    3644             :             }
    3645             : 
    3646         696 :             vxid.backendId = proc->backendId;
    3647         696 :             vxid.localTransactionId = proc->fpLocalTransactionId;
    3648             : 
    3649         696 :             instance = &data->locks[el];
    3650         696 :             SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
    3651         696 :             instance->holdMask = LOCKBIT_ON(ExclusiveLock);
    3652         696 :             instance->waitLockMode = NoLock;
    3653         696 :             instance->backend = proc->backendId;
    3654         696 :             instance->lxid = proc->lxid;
    3655         696 :             instance->pid = proc->pid;
    3656         696 :             instance->leaderPid = proc->pid;
    3657         696 :             instance->fastpath = true;
    3658             : 
    3659         696 :             el++;
    3660             :         }
    3661             : 
    3662       30744 :         LWLockRelease(&proc->fpInfoLock);
    3663             :     }
    3664             : 
    3665             :     /*
    3666             :      * Next, acquire lock on the entire shared lock data structure.  We do
    3667             :      * this so that, at least for locks in the primary lock table, the state
    3668             :      * will be self-consistent.
    3669             :      *
    3670             :      * Since this is a read-only operation, we take shared instead of
    3671             :      * exclusive lock.  There's not a whole lot of point to this, because all
    3672             :      * the normal operations require exclusive lock, but it doesn't hurt
    3673             :      * anything either. It will at least allow two backends to do
    3674             :      * GetLockStatusData in parallel.
    3675             :      *
    3676             :      * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
    3677             :      */
    3678        4148 :     for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    3679        3904 :         LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
    3680             : 
    3681             :     /* Now we can safely count the number of proclocks */
    3682         244 :     data->nelements = el + hash_get_num_entries(LockMethodProcLockHash);
    3683         244 :     if (data->nelements > els)
    3684             :     {
    3685           0 :         els = data->nelements;
    3686           0 :         data->locks = (LockInstanceData *)
    3687           0 :             repalloc(data->locks, sizeof(LockInstanceData) * els);
    3688             :     }
    3689             : 
    3690             :     /* Now scan the tables to copy the data */
    3691         244 :     hash_seq_init(&seqstat, LockMethodProcLockHash);
    3692             : 
    3693        2086 :     while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
    3694             :     {
    3695        1842 :         PGPROC     *proc = proclock->tag.myProc;
    3696        1842 :         LOCK       *lock = proclock->tag.myLock;
    3697        1842 :         LockInstanceData *instance = &data->locks[el];
    3698             : 
    3699        1842 :         memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
    3700        1842 :         instance->holdMask = proclock->holdMask;
    3701        1842 :         if (proc->waitLock == proclock->tag.myLock)
    3702           6 :             instance->waitLockMode = proc->waitLockMode;
    3703             :         else
    3704        1836 :             instance->waitLockMode = NoLock;
    3705        1842 :         instance->backend = proc->backendId;
    3706        1842 :         instance->lxid = proc->lxid;
    3707        1842 :         instance->pid = proc->pid;
    3708        1842 :         instance->leaderPid = proclock->groupLeader->pid;
    3709        1842 :         instance->fastpath = false;
    3710             : 
    3711        1842 :         el++;
    3712             :     }
    3713             : 
    3714             :     /*
    3715             :      * And release locks.  We do this in reverse order for two reasons: (1)
    3716             :      * Anyone else who needs more than one of the locks will be trying to lock
    3717             :      * them in increasing order; we don't want to release the other process
    3718             :      * until it can get all the locks it needs. (2) This avoids O(N^2)
    3719             :      * behavior inside LWLockRelease.
    3720             :      */
    3721        4148 :     for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
    3722        3904 :         LWLockRelease(LockHashPartitionLockByIndex(i));
    3723             : 
    3724             :     Assert(el == data->nelements);
    3725             : 
    3726         244 :     return data;
    3727             : }
    3728             : 
    3729             : /*
    3730             :  * GetBlockerStatusData - Return a summary of the lock manager's state
    3731             :  * concerning locks that are blocking the specified PID or any member of
    3732             :  * the PID's lock group, for use in a user-level reporting function.
    3733             :  *
    3734             :  * For each PID within the lock group that is awaiting some heavyweight lock,
    3735             :  * the return data includes an array of LockInstanceData objects, which are
    3736             :  * the same data structure used by GetLockStatusData; but unlike that function,
    3737             :  * this one reports only the PROCLOCKs associated with the lock that that PID
    3738             :  * is blocked on.  (Hence, all the locktags should be the same for any one
    3739             :  * blocked PID.)  In addition, we return an array of the PIDs of those backends
    3740             :  * that are ahead of the blocked PID in the lock's wait queue.  These can be
    3741             :  * compared with the PIDs in the LockInstanceData objects to determine which
    3742             :  * waiters are ahead of or behind the blocked PID in the queue.
    3743             :  *
    3744             :  * If blocked_pid isn't a valid backend PID or nothing in its lock group is
    3745             :  * waiting on any heavyweight lock, return empty arrays.
    3746             :  *
    3747             :  * The design goal is to hold the LWLocks for as short a time as possible;
    3748             :  * thus, this function simply makes a copy of the necessary data and releases
    3749             :  * the locks, allowing the caller to contemplate and format the data for as
    3750             :  * long as it pleases.
    3751             :  */
    3752             : BlockedProcsData *
    3753       15434 : GetBlockerStatusData(int blocked_pid)
    3754             : {
    3755             :     BlockedProcsData *data;
    3756             :     PGPROC     *proc;
    3757             :     int         i;
    3758             : 
    3759       15434 :     data = (BlockedProcsData *) palloc(sizeof(BlockedProcsData));
    3760             : 
    3761             :     /*
    3762             :      * Guess how much space we'll need, and preallocate.  Most of the time
    3763             :      * this will avoid needing to do repalloc while holding the LWLocks.  (We
    3764             :      * assume, but check with an Assert, that MaxBackends is enough entries
    3765             :      * for the procs[] array; the other two could need enlargement, though.)
    3766             :      */
    3767       15434 :     data->nprocs = data->nlocks = data->npids = 0;
    3768       15434 :     data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
    3769       15434 :     data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
    3770       15434 :     data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
    3771       15434 :     data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
    3772             : 
    3773             :     /*
    3774             :      * In order to search the ProcArray for blocked_pid and assume that that
    3775             :      * entry won't immediately disappear under us, we must hold ProcArrayLock.
    3776             :      * In addition, to examine the lock grouping fields of any other backend,
    3777             :      * we must hold all the hash partition locks.  (Only one of those locks is
    3778             :      * actually relevant for any one lock group, but we can't know which one
    3779             :      * ahead of time.)  It's fairly annoying to hold all those locks
    3780             :      * throughout this, but it's no worse than GetLockStatusData(), and it
    3781             :      * does have the advantage that we're guaranteed to return a
    3782             :      * self-consistent instantaneous state.
    3783             :      */
    3784       15434 :     LWLockAcquire(ProcArrayLock, LW_SHARED);
    3785             : 
    3786       15434 :     proc = BackendPidGetProcWithLock(blocked_pid);
    3787             : 
    3788             :     /* Nothing to do if it's gone */
    3789       15434 :     if (proc != NULL)
    3790             :     {
    3791             :         /*
    3792             :          * Acquire lock on the entire shared lock data structure.  See notes
    3793             :          * in GetLockStatusData().
    3794             :          */
    3795      262378 :         for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    3796      246944 :             LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
    3797             : 
    3798       15434 :         if (proc->lockGroupLeader == NULL)
    3799             :         {
    3800             :             /* Easy case, proc is not a lock group member */
    3801       15280 :             GetSingleProcBlockerStatusData(proc, data);
    3802             :         }
    3803             :         else
    3804             :         {
    3805             :             /* Examine all procs in proc's lock group */
    3806             :             dlist_iter  iter;
    3807             : 
    3808         388 :             dlist_foreach(iter, &proc->lockGroupLeader->lockGroupMembers)
    3809             :             {
    3810             :                 PGPROC     *memberProc;
    3811             : 
    3812         234 :                 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
    3813         234 :                 GetSingleProcBlockerStatusData(memberProc, data);
    3814             :             }
    3815             :         }
    3816             : 
    3817             :         /*
    3818             :          * And release locks.  See notes in GetLockStatusData().
    3819             :          */
    3820      262378 :         for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
    3821      246944 :             LWLockRelease(LockHashPartitionLockByIndex(i));
    3822             : 
    3823             :         Assert(data->nprocs <= data->maxprocs);
    3824             :     }
    3825             : 
    3826       15434 :     LWLockRelease(ProcArrayLock);
    3827             : 
    3828       15434 :     return data;
    3829             : }
    3830             : 
    3831             : /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
    3832             : static void
    3833       15514 : GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
    3834             : {
    3835       15514 :     LOCK       *theLock = blocked_proc->waitLock;
    3836             :     BlockedProcData *bproc;
    3837             :     SHM_QUEUE  *procLocks;
    3838             :     PROCLOCK   *proclock;
    3839             :     PROC_QUEUE *waitQueue;
    3840             :     PGPROC     *proc;
    3841             :     int         queue_size;
    3842             :     int         i;
    3843             : 
    3844             :     /* Nothing to do if this proc is not blocked */
    3845       15514 :     if (theLock == NULL)
    3846       13858 :         return;
    3847             : 
    3848             :     /* Set up a procs[] element */
    3849        1656 :     bproc = &data->procs[data->nprocs++];
    3850        1656 :     bproc->pid = blocked_proc->pid;
    3851        1656 :     bproc->first_lock = data->nlocks;
    3852        1656 :     bproc->first_waiter = data->npids;
    3853             : 
    3854             :     /*
    3855             :      * We may ignore the proc's fast-path arrays, since nothing in those could
    3856             :      * be related to a contended lock.
    3857             :      */
    3858             : 
    3859             :     /* Collect all PROCLOCKs associated with theLock */
    3860        1656 :     procLocks = &(theLock->procLocks);
    3861        1656 :     proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
    3862             :                                          offsetof(PROCLOCK, lockLink));
    3863        5060 :     while (proclock)
    3864             :     {
    3865        3404 :         PGPROC     *proc = proclock->tag.myProc;
    3866        3404 :         LOCK       *lock = proclock->tag.myLock;
    3867             :         LockInstanceData *instance;
    3868             : 
    3869        3404 :         if (data->nlocks >= data->maxlocks)
    3870             :         {
    3871           0 :             data->maxlocks += MaxBackends;
    3872           0 :             data->locks = (LockInstanceData *)
    3873           0 :                 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
    3874             :         }
    3875             : 
    3876        3404 :         instance = &data->locks[data->nlocks];
    3877        3404 :         memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
    3878        3404 :         instance->holdMask = proclock->holdMask;
    3879        3404 :         if (proc->waitLock == lock)
    3880        1736 :             instance->waitLockMode = proc->waitLockMode;
    3881             :         else
    3882        1668 :             instance->waitLockMode = NoLock;
    3883        3404 :         instance->backend = proc->backendId;
    3884        3404 :         instance->lxid = proc->lxid;
    3885        3404 :         instance->pid = proc->pid;
    3886        3404 :         instance->leaderPid = proclock->groupLeader->pid;
    3887        3404 :         instance->fastpath = false;
    3888        3404 :         data->nlocks++;
    3889             : 
    3890        3404 :         proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
    3891             :                                              offsetof(PROCLOCK, lockLink));
    3892             :     }
    3893             : 
    3894             :     /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
    3895        1656 :     waitQueue = &(theLock->waitProcs);
    3896        1656 :     queue_size = waitQueue->size;
    3897             : 
    3898        1656 :     if (queue_size > data->maxpids - data->npids)
    3899             :     {
    3900           0 :         data->maxpids = Max(data->maxpids + MaxBackends,
    3901             :                             data->npids + queue_size);
    3902           0 :         data->waiter_pids = (int *) repalloc(data->waiter_pids,
    3903           0 :                                              sizeof(int) * data->maxpids);
    3904             :     }
    3905             : 
    3906             :     /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
    3907        1656 :     proc = (PGPROC *) waitQueue->links.next;
    3908        1696 :     for (i = 0; i < queue_size; i++)
    3909             :     {
    3910        1696 :         if (proc == blocked_proc)
    3911        1656 :             break;
    3912          40 :         data->waiter_pids[data->npids++] = proc->pid;
    3913          40 :         proc = (PGPROC *) proc->links.next;
    3914             :     }
    3915             : 
    3916        1656 :     bproc->num_locks = data->nlocks - bproc->first_lock;
    3917        1656 :     bproc->num_waiters = data->npids - bproc->first_waiter;
    3918             : }
    3919             : 
    3920             : /*
    3921             :  * Returns a list of currently held AccessExclusiveLocks, for use by
    3922             :  * LogStandbySnapshot().  The result is a palloc'd array,
    3923             :  * with the number of elements returned into *nlocks.
    3924             :  *
    3925             :  * XXX This currently takes a lock on all partitions of the lock table,
    3926             :  * but it's possible to do better.  By reference counting locks and storing
    3927             :  * the value in the ProcArray entry for each backend we could tell if any
    3928             :  * locks need recording without having to acquire the partition locks and
    3929             :  * scan the lock table.  Whether that's worth the additional overhead
    3930             :  * is pretty dubious though.
    3931             :  */
    3932             : xl_standby_lock *
    3933        2248 : GetRunningTransactionLocks(int *nlocks)
    3934             : {
    3935             :     xl_standby_lock *accessExclusiveLocks;
    3936             :     PROCLOCK   *proclock;
    3937             :     HASH_SEQ_STATUS seqstat;
    3938             :     int         i;
    3939             :     int         index;
    3940             :     int         els;
    3941             : 
    3942             :     /*
    3943             :      * Acquire lock on the entire shared lock data structure.
    3944             :      *
    3945             :      * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
    3946             :      */
    3947       38216 :     for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    3948       35968 :         LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
    3949             : 
    3950             :     /* Now we can safely count the number of proclocks */
    3951        2248 :     els = hash_get_num_entries(LockMethodProcLockHash);
    3952             : 
    3953             :     /*
    3954             :      * Allocating enough space for all locks in the lock table is overkill,
    3955             :      * but it's more convenient and faster than having to enlarge the array.
    3956             :      */
    3957        2248 :     accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
    3958             : 
    3959             :     /* Now scan the tables to copy the data */
    3960        2248 :     hash_seq_init(&seqstat, LockMethodProcLockHash);
    3961             : 
    3962             :     /*
    3963             :      * If lock is a currently granted AccessExclusiveLock then it will have
    3964             :      * just one proclock holder, so locks are never accessed twice in this
    3965             :      * particular case. Don't copy this code for use elsewhere because in the
    3966             :      * general case this will give you duplicate locks when looking at
    3967             :      * non-exclusive lock types.
    3968             :      */
    3969        2248 :     index = 0;
    3970        8102 :     while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
    3971             :     {
    3972             :         /* make sure this definition matches the one used in LockAcquire */
    3973        5854 :         if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
    3974         188 :             proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
    3975             :         {
    3976         148 :             PGPROC     *proc = proclock->tag.myProc;
    3977         148 :             PGXACT     *pgxact = &ProcGlobal->allPgXact[proc->pgprocno];
    3978         148 :             LOCK       *lock = proclock->tag.myLock;
    3979         148 :             TransactionId xid = pgxact->xid;
    3980             : 
    3981             :             /*
    3982             :              * Don't record locks for transactions if we know they have
    3983             :              * already issued their WAL record for commit but not yet released
    3984             :              * lock. It is still possible that we see locks held by already
    3985             :              * complete transactions, if they haven't yet zeroed their xids.
    3986             :              */
    3987         148 :             if (!TransactionIdIsValid(xid))
    3988           6 :                 continue;
    3989             : 
    3990         142 :             accessExclusiveLocks[index].xid = xid;
    3991         142 :             accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
    3992         142 :             accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
    3993             : 
    3994         142 :             index++;
    3995             :         }
    3996             :     }
    3997             : 
    3998             :     Assert(index <= els);
    3999             : 
    4000             :     /*
    4001             :      * And release locks.  We do this in reverse order for two reasons: (1)
    4002             :      * Anyone else who needs more than one of the locks will be trying to lock
    4003             :      * them in increasing order; we don't want to release the other process
    4004             :      * until it can get all the locks it needs. (2) This avoids O(N^2)
    4005             :      * behavior inside LWLockRelease.
    4006             :      */
    4007       38216 :     for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
    4008       35968 :         LWLockRelease(LockHashPartitionLockByIndex(i));
    4009             : 
    4010        2248 :     *nlocks = index;
    4011        2248 :     return accessExclusiveLocks;
    4012             : }
    4013             : 
    4014             : /* Provide the textual name of any lock mode */
    4015             : const char *
    4016        5346 : GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
    4017             : {
    4018             :     Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
    4019             :     Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
    4020        5346 :     return LockMethods[lockmethodid]->lockModeNames[mode];
    4021             : }
    4022             : 
    4023             : #ifdef LOCK_DEBUG
    4024             : /*
    4025             :  * Dump all locks in the given proc's myProcLocks lists.
    4026             :  *
    4027             :  * Caller is responsible for having acquired appropriate LWLocks.
    4028             :  */
    4029             : void
    4030             : DumpLocks(PGPROC *proc)
    4031             : {
    4032             :     SHM_QUEUE  *procLocks;
    4033             :     PROCLOCK   *proclock;
    4034             :     LOCK       *lock;
    4035             :     int         i;
    4036             : 
    4037             :     if (proc == NULL)
    4038             :         return;
    4039             : 
    4040             :     if (proc->waitLock)
    4041             :         LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
    4042             : 
    4043             :     for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    4044             :     {
    4045             :         procLocks = &(proc->myProcLocks[i]);
    4046             : 
    4047             :         proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
    4048             :                                              offsetof(PROCLOCK, procLink));
    4049             : 
    4050             :         while (proclock)
    4051             :         {
    4052             :             Assert(proclock->tag.myProc == proc);
    4053             : 
    4054             :             lock = proclock->tag.myLock;
    4055             : 
    4056             :             PROCLOCK_PRINT("DumpLocks", proclock);
    4057             :             LOCK_PRINT("DumpLocks", lock, 0);
    4058             : 
    4059             :             proclock = (PROCLOCK *)
    4060             :                 SHMQueueNext(procLocks, &proclock->procLink,
    4061             :                              offsetof(PROCLOCK, procLink));
    4062             :         }
    4063             :     }
    4064             : }
    4065             : 
    4066             : /*
    4067             :  * Dump all lmgr locks.
    4068             :  *
    4069             :  * Caller is responsible for having acquired appropriate LWLocks.
    4070             :  */
    4071             : void
    4072             : DumpAllLocks(void)
    4073             : {
    4074             :     PGPROC     *proc;
    4075             :     PROCLOCK   *proclock;
    4076             :     LOCK       *lock;
    4077             :     HASH_SEQ_STATUS status;
    4078             : 
    4079             :     proc = MyProc;
    4080             : 
    4081             :     if (proc && proc->waitLock)
    4082             :         LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
    4083             : 
    4084             :     hash_seq_init(&status, LockMethodProcLockHash);
    4085             : 
    4086             :     while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
    4087             :     {
    4088             :         PROCLOCK_PRINT("DumpAllLocks", proclock);
    4089             : 
    4090             :         lock = proclock->tag.myLock;
    4091             :         if (lock)
    4092             :             LOCK_PRINT("DumpAllLocks", lock, 0);
    4093             :         else
    4094             :             elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
    4095             :     }
    4096             : }
    4097             : #endif                          /* LOCK_DEBUG */
    4098             : 
    4099             : /*
    4100             :  * LOCK 2PC resource manager's routines
    4101             :  */
    4102             : 
    4103             : /*
    4104             :  * Re-acquire a lock belonging to a transaction that was prepared.
    4105             :  *
    4106             :  * Because this function is run at db startup, re-acquiring the locks should
    4107             :  * never conflict with running transactions because there are none.  We
    4108             :  * assume that the lock state represented by the stored 2PC files is legal.
    4109             :  *
    4110             :  * When switching from Hot Standby mode to normal operation, the locks will
    4111             :  * be already held by the startup process. The locks are acquired for the new
    4112             :  * procs without checking for conflicts, so we don't get a conflict between the
    4113             :  * startup process and the dummy procs, even though we will momentarily have
    4114             :  * a situation where two procs are holding the same AccessExclusiveLock,
    4115             :  * which isn't normally possible because the conflict. If we're in standby
    4116             :  * mode, but a recovery snapshot hasn't been established yet, it's possible
    4117             :  * that some but not all of the locks are already held by the startup process.
    4118             :  *
    4119             :  * This approach is simple, but also a bit dangerous, because if there isn't
    4120             :  * enough shared memory to acquire the locks, an error will be thrown, which
    4121             :  * is promoted to FATAL and recovery will abort, bringing down postmaster.
    4122             :  * A safer approach would be to transfer the locks like we do in
    4123             :  * AtPrepare_Locks, but then again, in hot standby mode it's possible for
    4124             :  * read-only backends to use up all the shared lock memory anyway, so that
    4125             :  * replaying the WAL record that needs to acquire a lock will throw an error
    4126             :  * and PANIC anyway.
    4127             :  */
    4128             : void
    4129          92 : lock_twophase_recover(TransactionId xid, uint16 info,
    4130             :                       void *recdata, uint32 len)
    4131             : {
    4132          92 :     TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
    4133          92 :     PGPROC     *proc = TwoPhaseGetDummyProc(xid, false);
    4134             :     LOCKTAG    *locktag;
    4135             :     LOCKMODE    lockmode;
    4136             :     LOCKMETHODID lockmethodid;
    4137             :     LOCK       *lock;
    4138             :     PROCLOCK   *proclock;
    4139             :     PROCLOCKTAG proclocktag;
    4140             :     bool        found;
    4141             :     uint32      hashcode;
    4142             :     uint32      proclock_hashcode;
    4143             :     int         partition;
    4144             :     LWLock     *partitionLock;
    4145             :     LockMethod  lockMethodTable;
    4146             : 
    4147             :     Assert(len == sizeof(TwoPhaseLockRecord));
    4148          92 :     locktag = &rec->locktag;
    4149          92 :     lockmode = rec->lockmode;
    4150          92 :     lockmethodid = locktag->locktag_lockmethodid;
    4151             : 
    4152          92 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    4153           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    4154          92 :     lockMethodTable = LockMethods[lockmethodid];
    4155             : 
    4156          92 :     hashcode = LockTagHashCode(locktag);
    4157          92 :     partition = LockHashPartition(hashcode);
    4158          92 :     partitionLock = LockHashPartitionLock(hashcode);
    4159             : 
    4160          92 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    4161             : 
    4162             :     /*
    4163             :      * Find or create a lock with this tag.
    4164             :      */
    4165          92 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    4166             :                                                 (void *) locktag,
    4167             :                                                 hashcode,
    4168             :                                                 HASH_ENTER_NULL,
    4169             :                                                 &found);
    4170          92 :     if (!lock)
    4171             :     {
    4172           0 :         LWLockRelease(partitionLock);
    4173           0 :         ereport(ERROR,
    4174             :                 (errcode(ERRCODE_OUT_OF_MEMORY),
    4175             :                  errmsg("out of shared memory"),
    4176             :                  errhint("You might need to increase max_locks_per_transaction.")));
    4177             :     }
    4178             : 
    4179             :     /*
    4180             :      * if it's a new lock object, initialize it
    4181             :      */
    4182          92 :     if (!found)
    4183             :     {
    4184          76 :         lock->grantMask = 0;
    4185          76 :         lock->waitMask = 0;
    4186          76 :         SHMQueueInit(&(lock->procLocks));
    4187          76 :         ProcQueueInit(&(lock->waitProcs));
    4188          76 :         lock->nRequested = 0;
    4189          76 :         lock->nGranted = 0;
    4190         456 :         MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
    4191          76 :         MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
    4192             :         LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
    4193             :     }
    4194             :     else
    4195             :     {
    4196             :         LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
    4197             :         Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
    4198             :         Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
    4199             :         Assert(lock->nGranted <= lock->nRequested);
    4200             :     }
    4201             : 
    4202             :     /*
    4203             :      * Create the hash key for the proclock table.
    4204             :      */
    4205          92 :     proclocktag.myLock = lock;
    4206          92 :     proclocktag.myProc = proc;
    4207             : 
    4208          92 :     proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
    4209             : 
    4210             :     /*
    4211             :      * Find or create a proclock entry with this tag
    4212             :      */
    4213          92 :     proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
    4214             :                                                         (void *) &proclocktag,
    4215             :                                                         proclock_hashcode,
    4216             :                                                         HASH_ENTER_NULL,
    4217             :                                                         &found);
    4218          92 :     if (!proclock)
    4219             :     {
    4220             :         /* Oops, not enough shmem for the proclock */
    4221           0 :         if (lock->nRequested == 0)
    4222             :         {
    4223             :             /*
    4224             :              * There are no other requestors of this lock, so garbage-collect
    4225             :              * the lock object.  We *must* do this to avoid a permanent leak
    4226             :              * of shared memory, because there won't be anything to cause
    4227             :              * anyone to release the lock object later.
    4228             :              */
    4229             :             Assert(SHMQueueEmpty(&(lock->procLocks)));
    4230           0 :             if (!hash_search_with_hash_value(LockMethodLockHash,
    4231           0 :                                              (void *) &(lock->tag),
    4232             :                                              hashcode,
    4233             :                                              HASH_REMOVE,
    4234             :                                              NULL))
    4235           0 :                 elog(PANIC, "lock table corrupted");
    4236             :         }
    4237           0 :         LWLockRelease(partitionLock);
    4238           0 :         ereport(ERROR,
    4239             :                 (errcode(ERRCODE_OUT_OF_MEMORY),
    4240             :                  errmsg("out of shared memory"),
    4241             :                  errhint("You might need to increase max_locks_per_transaction.")));
    4242             :     }
    4243             : 
    4244             :     /*
    4245             :      * If new, initialize the new entry
    4246             :      */
    4247          92 :     if (!found)
    4248             :     {
    4249             :         Assert(proc->lockGroupLeader == NULL);
    4250          84 :         proclock->groupLeader = proc;
    4251          84 :         proclock->holdMask = 0;
    4252          84 :         proclock->releaseMask = 0;
    4253             :         /* Add proclock to appropriate lists */
    4254          84 :         SHMQueueInsertBefore(&lock->procLocks, &proclock->lockLink);
    4255          84 :         SHMQueueInsertBefore(&(proc->myProcLocks[partition]),
    4256             :                              &proclock->procLink);
    4257             :         PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
    4258             :     }
    4259             :     else
    4260             :     {
    4261             :         PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
    4262             :         Assert((proclock->holdMask & ~lock->grantMask) == 0);
    4263             :     }
    4264             : 
    4265             :     /*
    4266             :      * lock->nRequested and lock->requested[] count the total number of
    4267             :      * requests, whether granted or waiting, so increment those immediately.
    4268             :      */
    4269          92 :     lock->nRequested++;
    4270          92 :     lock->requested[lockmode]++;
    4271             :     Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
    4272             : 
    4273             :     /*
    4274             :      * We shouldn't already hold the desired lock.
    4275             :      */
    4276          92 :     if (proclock->holdMask & LOCKBIT_ON(lockmode))
    4277           0 :         elog(ERROR, "lock %s on object %u/%u/%u is already held",
    4278             :              lockMethodTable->lockModeNames[lockmode],
    4279             :              lock->tag.locktag_field1, lock->tag.locktag_field2,
    4280             :              lock->tag.locktag_field3);
    4281             : 
    4282             :     /*
    4283             :      * We ignore any possible conflicts and just grant ourselves the lock. Not
    4284             :      * only because we don't bother, but also to avoid deadlocks when
    4285             :      * switching from standby to normal mode. See function comment.
    4286             :      */
    4287          92 :     GrantLock(lock, proclock, lockmode);
    4288             : 
    4289             :     /*
    4290             :      * Bump strong lock count, to make sure any fast-path lock requests won't
    4291             :      * be granted without consulting the primary lock table.
    4292             :      */
    4293          92 :     if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
    4294             :     {
    4295          24 :         uint32      fasthashcode = FastPathStrongLockHashPartition(hashcode);
    4296             : 
    4297          24 :         SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    4298          24 :         FastPathStrongRelationLocks->count[fasthashcode]++;
    4299          24 :         SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    4300             :     }
    4301             : 
    4302          92 :     LWLockRelease(partitionLock);
    4303          92 : }
    4304             : 
    4305             : /*
    4306             :  * Re-acquire a lock belonging to a transaction that was prepared, when
    4307             :  * starting up into hot standby mode.
    4308             :  */
    4309             : void
    4310           0 : lock_twophase_standby_recover(TransactionId xid, uint16 info,
    4311             :                               void *recdata, uint32 len)
    4312             : {
    4313           0 :     TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
    4314             :     LOCKTAG    *locktag;
    4315             :     LOCKMODE    lockmode;
    4316             :     LOCKMETHODID lockmethodid;
    4317             : 
    4318             :     Assert(len == sizeof(TwoPhaseLockRecord));
    4319           0 :     locktag = &rec->locktag;
    4320           0 :     lockmode = rec->lockmode;
    4321           0 :     lockmethodid = locktag->locktag_lockmethodid;
    4322             : 
    4323           0 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    4324           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    4325             : 
    4326           0 :     if (lockmode == AccessExclusiveLock &&
    4327           0 :         locktag->locktag_type == LOCKTAG_RELATION)
    4328             :     {
    4329           0 :         StandbyAcquireAccessExclusiveLock(xid,
    4330             :                                           locktag->locktag_field1 /* dboid */ ,
    4331             :                                           locktag->locktag_field2 /* reloid */ );
    4332             :     }
    4333           0 : }
    4334             : 
    4335             : 
    4336             : /*
    4337             :  * 2PC processing routine for COMMIT PREPARED case.
    4338             :  *
    4339             :  * Find and release the lock indicated by the 2PC record.
    4340             :  */
    4341             : void
    4342         202 : lock_twophase_postcommit(TransactionId xid, uint16 info,
    4343             :                          void *recdata, uint32 len)
    4344             : {
    4345         202 :     TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
    4346         202 :     PGPROC     *proc = TwoPhaseGetDummyProc(xid, true);
    4347             :     LOCKTAG    *locktag;
    4348             :     LOCKMETHODID lockmethodid;
    4349             :     LockMethod  lockMethodTable;
    4350             : 
    4351             :     Assert(len == sizeof(TwoPhaseLockRecord));
    4352         202 :     locktag = &rec->locktag;
    4353         202 :     lockmethodid = locktag->locktag_lockmethodid;
    4354             : 
    4355         202 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    4356           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    4357         202 :     lockMethodTable = LockMethods[lockmethodid];
    4358             : 
    4359         202 :     LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
    4360         202 : }
    4361             : 
    4362             : /*
    4363             :  * 2PC processing routine for ROLLBACK PREPARED case.
    4364             :  *
    4365             :  * This is actually just the same as the COMMIT case.
    4366             :  */
    4367             : void
    4368          62 : lock_twophase_postabort(TransactionId xid, uint16 info,
    4369             :                         void *recdata, uint32 len)
    4370             : {
    4371          62 :     lock_twophase_postcommit(xid, info, recdata, len);
    4372          62 : }
    4373             : 
    4374             : /*
    4375             :  *      VirtualXactLockTableInsert
    4376             :  *
    4377             :  *      Take vxid lock via the fast-path.  There can't be any pre-existing
    4378             :  *      lockers, as we haven't advertised this vxid via the ProcArray yet.
    4379             :  *
    4380             :  *      Since MyProc->fpLocalTransactionId will normally contain the same data
    4381             :  *      as MyProc->lxid, you might wonder if we really need both.  The
    4382             :  *      difference is that MyProc->lxid is set and cleared unlocked, and
    4383             :  *      examined by procarray.c, while fpLocalTransactionId is protected by
    4384             :  *      fpInfoLock and is used only by the locking subsystem.  Doing it this
    4385             :  *      way makes it easier to verify that there are no funny race conditions.
    4386             :  *
    4387             :  *      We don't bother recording this lock in the local lock table, since it's
    4388             :  *      only ever released at the end of a transaction.  Instead,
    4389             :  *      LockReleaseAll() calls VirtualXactLockTableCleanup().
    4390             :  */
    4391             : void
    4392      495366 : VirtualXactLockTableInsert(VirtualTransactionId vxid)
    4393             : {
    4394             :     Assert(VirtualTransactionIdIsValid(vxid));
    4395             : 
    4396      495366 :     LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    4397             : 
    4398             :     Assert(MyProc->backendId == vxid.backendId);
    4399             :     Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId);
    4400             :     Assert(MyProc->fpVXIDLock == false);
    4401             : 
    4402      495366 :     MyProc->fpVXIDLock = true;
    4403      495366 :     MyProc->fpLocalTransactionId = vxid.localTransactionId;
    4404             : 
    4405      495366 :     LWLockRelease(&MyProc->fpInfoLock);
    4406      495366 : }
    4407             : 
    4408             : /*
    4409             :  *      VirtualXactLockTableCleanup
    4410             :  *
    4411             :  *      Check whether a VXID lock has been materialized; if so, release it,
    4412             :  *      unblocking waiters.
    4413             :  */
    4414             : void
    4415      495332 : VirtualXactLockTableCleanup(void)
    4416             : {
    4417             :     bool        fastpath;
    4418             :     LocalTransactionId lxid;
    4419             : 
    4420             :     Assert(MyProc->backendId != InvalidBackendId);
    4421             : 
    4422             :     /*
    4423             :      * Clean up shared memory state.
    4424             :      */
    4425      495332 :     LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    4426             : 
    4427      495332 :     fastpath = MyProc->fpVXIDLock;
    4428      495332 :     lxid = MyProc->fpLocalTransactionId;
    4429      495332 :     MyProc->fpVXIDLock = false;
    4430      495332 :     MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
    4431             : 
    4432      495332 :     LWLockRelease(&MyProc->fpInfoLock);
    4433             : 
    4434             :     /*
    4435             :      * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
    4436             :      * that means someone transferred the lock to the main lock table.
    4437             :      */
    4438      495332 :     if (!fastpath && LocalTransactionIdIsValid(lxid))
    4439             :     {
    4440             :         VirtualTransactionId vxid;
    4441             :         LOCKTAG     locktag;
    4442             : 
    4443          50 :         vxid.backendId = MyBackendId;
    4444          50 :         vxid.localTransactionId = lxid;
    4445          50 :         SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
    4446             : 
    4447          50 :         LockRefindAndRelease(LockMethods[DEFAULT_LOCKMETHOD], MyProc,
    4448             :                              &locktag, ExclusiveLock, false);
    4449             :     }
    4450      495332 : }
    4451             : 
    4452             : /*
    4453             :  *      VirtualXactLock
    4454             :  *
    4455             :  * If wait = true, wait until the given VXID has been released, and then
    4456             :  * return true.
    4457             :  *
    4458             :  * If wait = false, just check whether the VXID is still running, and return
    4459             :  * true or false.
    4460             :  */
    4461             : bool
    4462          50 : VirtualXactLock(VirtualTransactionId vxid, bool wait)
    4463             : {
    4464             :     LOCKTAG     tag;
    4465             :     PGPROC     *proc;
    4466             : 
    4467             :     Assert(VirtualTransactionIdIsValid(vxid));
    4468             : 
    4469          50 :     SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
    4470             : 
    4471             :     /*
    4472             :      * If a lock table entry must be made, this is the PGPROC on whose behalf
    4473             :      * it must be done.  Note that the transaction might end or the PGPROC
    4474             :      * might be reassigned to a new backend before we get around to examining
    4475             :      * it, but it doesn't matter.  If we find upon examination that the
    4476             :      * relevant lxid is no longer running here, that's enough to prove that
    4477             :      * it's no longer running anywhere.
    4478             :      */
    4479          50 :     proc = BackendIdGetProc(vxid.backendId);
    4480          50 :     if (proc == NULL)
    4481           0 :         return true;
    4482             : 
    4483             :     /*
    4484             :      * We must acquire this lock before checking the backendId and lxid
    4485             :      * against the ones we're waiting for.  The target backend will only set
    4486             :      * or clear lxid while holding this lock.
    4487             :      */
    4488          50 :     LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
    4489             : 
    4490             :     /* If the transaction has ended, our work here is done. */
    4491          50 :     if (proc->backendId != vxid.backendId
    4492          50 :         || proc->fpLocalTransactionId != vxid.localTransactionId)
    4493             :     {
    4494           0 :         LWLockRelease(&proc->fpInfoLock);
    4495           0 :         return true;
    4496             :     }
    4497             : 
    4498             :     /*
    4499             :      * If we aren't asked to wait, there's no need to set up a lock table
    4500             :      * entry.  The transaction is still in progress, so just return false.
    4501             :      */
    4502          50 :     if (!wait)
    4503             :     {
    4504           0 :         LWLockRelease(&proc->fpInfoLock);
    4505           0 :         return false;
    4506             :     }
    4507             : 
    4508             :     /*
    4509             :      * OK, we're going to need to sleep on the VXID.  But first, we must set
    4510             :      * up the primary lock table entry, if needed (ie, convert the proc's
    4511             :      * fast-path lock on its VXID to a regular lock).
    4512             :      */
    4513          50 :     if (proc->fpVXIDLock)
    4514             :     {
    4515             :         PROCLOCK   *proclock;
    4516             :         uint32      hashcode;
    4517             :         LWLock     *partitionLock;
    4518             : 
    4519          50 :         hashcode = LockTagHashCode(&tag);
    4520             : 
    4521          50 :         partitionLock = LockHashPartitionLock(hashcode);
    4522          50 :         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    4523             : 
    4524          50 :         proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc,
    4525             :                                     &tag, hashcode, ExclusiveLock);
    4526          50 :         if (!proclock)
    4527             :         {
    4528           0 :             LWLockRelease(partitionLock);
    4529           0 :             LWLockRelease(&proc->fpInfoLock);
    4530           0 :             ereport(ERROR,
    4531             :                     (errcode(ERRCODE_OUT_OF_MEMORY),
    4532             :                      errmsg("out of shared memory"),
    4533             :                      errhint("You might need to increase max_locks_per_transaction.")));
    4534             :         }
    4535          50 :         GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
    4536             : 
    4537          50 :         LWLockRelease(partitionLock);
    4538             : 
    4539          50 :         proc->fpVXIDLock = false;
    4540             :     }
    4541             : 
    4542             :     /* Done with proc->fpLockBits */
    4543          50 :     LWLockRelease(&proc->fpInfoLock);
    4544             : 
    4545             :     /* Time to wait. */
    4546          50 :     (void) LockAcquire(&tag, ShareLock, false, false);
    4547             : 
    4548          50 :     LockRelease(&tag, ShareLock, false);
    4549          50 :     return true;
    4550             : }
    4551             : 
    4552             : /*
    4553             :  * LockWaiterCount
    4554             :  *
    4555             :  * Find the number of lock requester on this locktag
    4556             :  */
    4557             : int
    4558          72 : LockWaiterCount(const LOCKTAG *locktag)
    4559             : {
    4560          72 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
    4561             :     LOCK       *lock;
    4562             :     bool        found;
    4563             :     uint32      hashcode;
    4564             :     LWLock     *partitionLock;
    4565          72 :     int         waiters = 0;
    4566             : 
    4567          72 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    4568           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    4569             : 
    4570          72 :     hashcode = LockTagHashCode(locktag);
    4571          72 :     partitionLock = LockHashPartitionLock(hashcode);
    4572          72 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    4573             : 
    4574          72 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    4575             :                                                 (const void *) locktag,
    4576             :                                                 hashcode,
    4577             :                                                 HASH_FIND,
    4578             :                                                 &found);
    4579          72 :     if (found)
    4580             :     {
    4581             :         Assert(lock != NULL);
    4582          72 :         waiters = lock->nRequested;
    4583             :     }
    4584          72 :     LWLockRelease(partitionLock);
    4585             : 
    4586          72 :     return waiters;
    4587             : }

Generated by: LCOV version 1.13