Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * lock.c
4 : * POSTGRES primary lock mechanism
5 : *
6 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/storage/lmgr/lock.c
12 : *
13 : * NOTES
14 : * A lock table is a shared memory hash table. When
15 : * a process tries to acquire a lock of a type that conflicts
16 : * with existing locks, it is put to sleep using the routines
17 : * in storage/lmgr/proc.c.
18 : *
19 : * For the most part, this code should be invoked via lmgr.c
20 : * or another lock-management module, not directly.
21 : *
22 : * Interface:
23 : *
24 : * InitLocks(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25 : * LockAcquire(), LockRelease(), LockReleaseAll(),
26 : * LockCheckConflicts(), GrantLock()
27 : *
28 : *-------------------------------------------------------------------------
29 : */
30 : #include "postgres.h"
31 :
32 : #include <signal.h>
33 : #include <unistd.h>
34 :
35 : #include "access/transam.h"
36 : #include "access/twophase.h"
37 : #include "access/twophase_rmgr.h"
38 : #include "access/xlog.h"
39 : #include "access/xlogutils.h"
40 : #include "miscadmin.h"
41 : #include "pg_trace.h"
42 : #include "storage/proc.h"
43 : #include "storage/procarray.h"
44 : #include "storage/sinvaladt.h"
45 : #include "storage/spin.h"
46 : #include "storage/standby.h"
47 : #include "utils/memutils.h"
48 : #include "utils/ps_status.h"
49 : #include "utils/resowner.h"
50 :
51 :
52 : /* This configuration variable is used to set the lock table size */
53 : int max_locks_per_xact; /* set by guc.c */
54 :
55 : #define NLOCKENTS() \
56 : mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
57 :
58 :
59 : /*
60 : * Data structures defining the semantics of the standard lock methods.
61 : *
62 : * The conflict table defines the semantics of the various lock modes.
63 : */
64 : static const LOCKMASK LockConflicts[] = {
65 : 0,
66 :
67 : /* AccessShareLock */
68 : LOCKBIT_ON(AccessExclusiveLock),
69 :
70 : /* RowShareLock */
71 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
72 :
73 : /* RowExclusiveLock */
74 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
75 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
76 :
77 : /* ShareUpdateExclusiveLock */
78 : LOCKBIT_ON(ShareUpdateExclusiveLock) |
79 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
80 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
81 :
82 : /* ShareLock */
83 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
84 : LOCKBIT_ON(ShareRowExclusiveLock) |
85 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
86 :
87 : /* ShareRowExclusiveLock */
88 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
89 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
90 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
91 :
92 : /* ExclusiveLock */
93 : LOCKBIT_ON(RowShareLock) |
94 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
95 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
96 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
97 :
98 : /* AccessExclusiveLock */
99 : LOCKBIT_ON(AccessShareLock) | LOCKBIT_ON(RowShareLock) |
100 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
101 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
102 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock)
103 :
104 : };
105 :
106 : /* Names of lock modes, for debug printouts */
107 : static const char *const lock_mode_names[] =
108 : {
109 : "INVALID",
110 : "AccessShareLock",
111 : "RowShareLock",
112 : "RowExclusiveLock",
113 : "ShareUpdateExclusiveLock",
114 : "ShareLock",
115 : "ShareRowExclusiveLock",
116 : "ExclusiveLock",
117 : "AccessExclusiveLock"
118 : };
119 :
120 : #ifndef LOCK_DEBUG
121 : static bool Dummy_trace = false;
122 : #endif
123 :
124 : static const LockMethodData default_lockmethod = {
125 : MaxLockMode,
126 : LockConflicts,
127 : lock_mode_names,
128 : #ifdef LOCK_DEBUG
129 : &Trace_locks
130 : #else
131 : &Dummy_trace
132 : #endif
133 : };
134 :
135 : static const LockMethodData user_lockmethod = {
136 : MaxLockMode,
137 : LockConflicts,
138 : lock_mode_names,
139 : #ifdef LOCK_DEBUG
140 : &Trace_userlocks
141 : #else
142 : &Dummy_trace
143 : #endif
144 : };
145 :
146 : /*
147 : * map from lock method id to the lock table data structures
148 : */
149 : static const LockMethod LockMethods[] = {
150 : NULL,
151 : &default_lockmethod,
152 : &user_lockmethod
153 : };
154 :
155 :
156 : /* Record that's written to 2PC state file when a lock is persisted */
157 : typedef struct TwoPhaseLockRecord
158 : {
159 : LOCKTAG locktag;
160 : LOCKMODE lockmode;
161 : } TwoPhaseLockRecord;
162 :
163 :
164 : /*
165 : * Count of the number of fast path lock slots we believe to be used. This
166 : * might be higher than the real number if another backend has transferred
167 : * our locks to the primary lock table, but it can never be lower than the
168 : * real value, since only we can acquire locks on our own behalf.
169 : */
170 : static int FastPathLocalUseCount = 0;
171 :
172 : /*
173 : * Flag to indicate if the relation extension lock is held by this backend.
174 : * This flag is used to ensure that while holding the relation extension lock
175 : * we don't try to acquire a heavyweight lock on any other object. This
176 : * restriction implies that the relation extension lock won't ever participate
177 : * in the deadlock cycle because we can never wait for any other heavyweight
178 : * lock after acquiring this lock.
179 : *
180 : * Such a restriction is okay for relation extension locks as unlike other
181 : * heavyweight locks these are not held till the transaction end. These are
182 : * taken for a short duration to extend a particular relation and then
183 : * released.
184 : */
185 : static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
186 :
187 : /* Macros for manipulating proc->fpLockBits */
188 : #define FAST_PATH_BITS_PER_SLOT 3
189 : #define FAST_PATH_LOCKNUMBER_OFFSET 1
190 : #define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
191 : #define FAST_PATH_GET_BITS(proc, n) \
192 : (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
193 : #define FAST_PATH_BIT_POSITION(n, l) \
194 : (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
195 : AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
196 : AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
197 : ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (n)))
198 : #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
199 : (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
200 : #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
201 : (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
202 : #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
203 : ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
204 :
205 : /*
206 : * The fast-path lock mechanism is concerned only with relation locks on
207 : * unshared relations by backends bound to a database. The fast-path
208 : * mechanism exists mostly to accelerate acquisition and release of locks
209 : * that rarely conflict. Because ShareUpdateExclusiveLock is
210 : * self-conflicting, it can't use the fast-path mechanism; but it also does
211 : * not conflict with any of the locks that do, so we can ignore it completely.
212 : */
213 : #define EligibleForRelationFastPath(locktag, mode) \
214 : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
215 : (locktag)->locktag_type == LOCKTAG_RELATION && \
216 : (locktag)->locktag_field1 == MyDatabaseId && \
217 : MyDatabaseId != InvalidOid && \
218 : (mode) < ShareUpdateExclusiveLock)
219 : #define ConflictsWithRelationFastPath(locktag, mode) \
220 : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
221 : (locktag)->locktag_type == LOCKTAG_RELATION && \
222 : (locktag)->locktag_field1 != InvalidOid && \
223 : (mode) > ShareUpdateExclusiveLock)
224 :
225 : static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
226 : static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
227 : static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
228 : const LOCKTAG *locktag, uint32 hashcode);
229 : static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
230 :
231 : /*
232 : * To make the fast-path lock mechanism work, we must have some way of
233 : * preventing the use of the fast-path when a conflicting lock might be present.
234 : * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
235 : * and maintain an integer count of the number of "strong" lockers
236 : * in each partition. When any "strong" lockers are present (which is
237 : * hopefully not very often), the fast-path mechanism can't be used, and we
238 : * must fall back to the slower method of pushing matching locks directly
239 : * into the main lock tables.
240 : *
241 : * The deadlock detector does not know anything about the fast path mechanism,
242 : * so any locks that might be involved in a deadlock must be transferred from
243 : * the fast-path queues to the main lock table.
244 : */
245 :
246 : #define FAST_PATH_STRONG_LOCK_HASH_BITS 10
247 : #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
248 : (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
249 : #define FastPathStrongLockHashPartition(hashcode) \
250 : ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
251 :
252 : typedef struct
253 : {
254 : slock_t mutex;
255 : uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
256 : } FastPathStrongRelationLockData;
257 :
258 : static volatile FastPathStrongRelationLockData *FastPathStrongRelationLocks;
259 :
260 :
261 : /*
262 : * Pointers to hash tables containing lock state
263 : *
264 : * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
265 : * shared memory; LockMethodLocalHash is local to each backend.
266 : */
267 : static HTAB *LockMethodLockHash;
268 : static HTAB *LockMethodProcLockHash;
269 : static HTAB *LockMethodLocalHash;
270 :
271 :
272 : /* private state for error cleanup */
273 : static LOCALLOCK *StrongLockInProgress;
274 : static LOCALLOCK *awaitedLock;
275 : static ResourceOwner awaitedOwner;
276 :
277 :
278 : #ifdef LOCK_DEBUG
279 :
280 : /*------
281 : * The following configuration options are available for lock debugging:
282 : *
283 : * TRACE_LOCKS -- give a bunch of output what's going on in this file
284 : * TRACE_USERLOCKS -- same but for user locks
285 : * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
286 : * (use to avoid output on system tables)
287 : * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
288 : * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
289 : *
290 : * Furthermore, but in storage/lmgr/lwlock.c:
291 : * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
292 : *
293 : * Define LOCK_DEBUG at compile time to get all these enabled.
294 : * --------
295 : */
296 :
297 : int Trace_lock_oidmin = FirstNormalObjectId;
298 : bool Trace_locks = false;
299 : bool Trace_userlocks = false;
300 : int Trace_lock_table = 0;
301 : bool Debug_deadlocks = false;
302 :
303 :
304 : inline static bool
305 : LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
306 : {
307 : return
308 : (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
309 : ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
310 : || (Trace_lock_table &&
311 : (tag->locktag_field2 == Trace_lock_table));
312 : }
313 :
314 :
315 : inline static void
316 : LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
317 : {
318 : if (LOCK_DEBUG_ENABLED(&lock->tag))
319 : elog(LOG,
320 : "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
321 : "req(%d,%d,%d,%d,%d,%d,%d)=%d "
322 : "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
323 : where, lock,
324 : lock->tag.locktag_field1, lock->tag.locktag_field2,
325 : lock->tag.locktag_field3, lock->tag.locktag_field4,
326 : lock->tag.locktag_type, lock->tag.locktag_lockmethodid,
327 : lock->grantMask,
328 : lock->requested[1], lock->requested[2], lock->requested[3],
329 : lock->requested[4], lock->requested[5], lock->requested[6],
330 : lock->requested[7], lock->nRequested,
331 : lock->granted[1], lock->granted[2], lock->granted[3],
332 : lock->granted[4], lock->granted[5], lock->granted[6],
333 : lock->granted[7], lock->nGranted,
334 : dclist_count(&lock->waitProcs),
335 : LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
336 : }
337 :
338 :
339 : inline static void
340 : PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
341 : {
342 : if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
343 : elog(LOG,
344 : "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
345 : where, proclockP, proclockP->tag.myLock,
346 : PROCLOCK_LOCKMETHOD(*(proclockP)),
347 : proclockP->tag.myProc, (int) proclockP->holdMask);
348 : }
349 : #else /* not LOCK_DEBUG */
350 :
351 : #define LOCK_PRINT(where, lock, type) ((void) 0)
352 : #define PROCLOCK_PRINT(where, proclockP) ((void) 0)
353 : #endif /* not LOCK_DEBUG */
354 :
355 :
356 : static uint32 proclock_hash(const void *key, Size keysize);
357 : static void RemoveLocalLock(LOCALLOCK *locallock);
358 : static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
359 : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
360 : static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
361 : static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
362 : static void FinishStrongLockAcquire(void);
363 : static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner,
364 : bool dontWait);
365 : static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
366 : static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
367 : static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
368 : PROCLOCK *proclock, LockMethod lockMethodTable);
369 : static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
370 : LockMethod lockMethodTable, uint32 hashcode,
371 : bool wakeupNeeded);
372 : static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
373 : LOCKTAG *locktag, LOCKMODE lockmode,
374 : bool decrement_strong_lock_count);
375 : static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
376 : BlockedProcsData *data);
377 :
378 :
379 : /*
380 : * InitLocks -- Initialize the lock manager's data structures.
381 : *
382 : * This is called from CreateSharedMemoryAndSemaphores(), which see for
383 : * more comments. In the normal postmaster case, the shared hash tables
384 : * are created here, as well as a locallock hash table that will remain
385 : * unused and empty in the postmaster itself. Backends inherit the pointers
386 : * to the shared tables via fork(), and also inherit an image of the locallock
387 : * hash table, which they proceed to use. In the EXEC_BACKEND case, each
388 : * backend re-executes this code to obtain pointers to the already existing
389 : * shared hash tables and to create its locallock hash table.
390 : */
391 : void
392 1768 : InitLocks(void)
393 : {
394 : HASHCTL info;
395 : long init_table_size,
396 : max_table_size;
397 : bool found;
398 :
399 : /*
400 : * Compute init/max size to request for lock hashtables. Note these
401 : * calculations must agree with LockShmemSize!
402 : */
403 1768 : max_table_size = NLOCKENTS();
404 1768 : init_table_size = max_table_size / 2;
405 :
406 : /*
407 : * Allocate hash table for LOCK structs. This stores per-locked-object
408 : * information.
409 : */
410 1768 : info.keysize = sizeof(LOCKTAG);
411 1768 : info.entrysize = sizeof(LOCK);
412 1768 : info.num_partitions = NUM_LOCK_PARTITIONS;
413 :
414 1768 : LockMethodLockHash = ShmemInitHash("LOCK hash",
415 : init_table_size,
416 : max_table_size,
417 : &info,
418 : HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
419 :
420 : /* Assume an average of 2 holders per lock */
421 1768 : max_table_size *= 2;
422 1768 : init_table_size *= 2;
423 :
424 : /*
425 : * Allocate hash table for PROCLOCK structs. This stores
426 : * per-lock-per-holder information.
427 : */
428 1768 : info.keysize = sizeof(PROCLOCKTAG);
429 1768 : info.entrysize = sizeof(PROCLOCK);
430 1768 : info.hash = proclock_hash;
431 1768 : info.num_partitions = NUM_LOCK_PARTITIONS;
432 :
433 1768 : LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
434 : init_table_size,
435 : max_table_size,
436 : &info,
437 : HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
438 :
439 : /*
440 : * Allocate fast-path structures.
441 : */
442 1768 : FastPathStrongRelationLocks =
443 1768 : ShmemInitStruct("Fast Path Strong Relation Lock Data",
444 : sizeof(FastPathStrongRelationLockData), &found);
445 1768 : if (!found)
446 1768 : SpinLockInit(&FastPathStrongRelationLocks->mutex);
447 :
448 : /*
449 : * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
450 : * counts and resource owner information.
451 : *
452 : * The non-shared table could already exist in this process (this occurs
453 : * when the postmaster is recreating shared memory after a backend crash).
454 : * If so, delete and recreate it. (We could simply leave it, since it
455 : * ought to be empty in the postmaster, but for safety let's zap it.)
456 : */
457 1768 : if (LockMethodLocalHash)
458 8 : hash_destroy(LockMethodLocalHash);
459 :
460 1768 : info.keysize = sizeof(LOCALLOCKTAG);
461 1768 : info.entrysize = sizeof(LOCALLOCK);
462 :
463 1768 : LockMethodLocalHash = hash_create("LOCALLOCK hash",
464 : 16,
465 : &info,
466 : HASH_ELEM | HASH_BLOBS);
467 1768 : }
468 :
469 :
470 : /*
471 : * Fetch the lock method table associated with a given lock
472 : */
473 : LockMethod
474 176 : GetLocksMethodTable(const LOCK *lock)
475 : {
476 176 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
477 :
478 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
479 176 : return LockMethods[lockmethodid];
480 : }
481 :
482 : /*
483 : * Fetch the lock method table associated with a given locktag
484 : */
485 : LockMethod
486 2198 : GetLockTagsMethodTable(const LOCKTAG *locktag)
487 : {
488 2198 : LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
489 :
490 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
491 2198 : return LockMethods[lockmethodid];
492 : }
493 :
494 :
495 : /*
496 : * Compute the hash code associated with a LOCKTAG.
497 : *
498 : * To avoid unnecessary recomputations of the hash code, we try to do this
499 : * just once per function, and then pass it around as needed. Aside from
500 : * passing the hashcode to hash_search_with_hash_value(), we can extract
501 : * the lock partition number from the hashcode.
502 : */
503 : uint32
504 27229442 : LockTagHashCode(const LOCKTAG *locktag)
505 : {
506 27229442 : return get_hash_value(LockMethodLockHash, (const void *) locktag);
507 : }
508 :
509 : /*
510 : * Compute the hash code associated with a PROCLOCKTAG.
511 : *
512 : * Because we want to use just one set of partition locks for both the
513 : * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
514 : * fall into the same partition number as their associated LOCKs.
515 : * dynahash.c expects the partition number to be the low-order bits of
516 : * the hash code, and therefore a PROCLOCKTAG's hash code must have the
517 : * same low-order bits as the associated LOCKTAG's hash code. We achieve
518 : * this with this specialized hash function.
519 : */
520 : static uint32
521 1660 : proclock_hash(const void *key, Size keysize)
522 : {
523 1660 : const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
524 : uint32 lockhash;
525 : Datum procptr;
526 :
527 : Assert(keysize == sizeof(PROCLOCKTAG));
528 :
529 : /* Look into the associated LOCK object, and compute its hash code */
530 1660 : lockhash = LockTagHashCode(&proclocktag->myLock->tag);
531 :
532 : /*
533 : * To make the hash code also depend on the PGPROC, we xor the proc
534 : * struct's address into the hash code, left-shifted so that the
535 : * partition-number bits don't change. Since this is only a hash, we
536 : * don't care if we lose high-order bits of the address; use an
537 : * intermediate variable to suppress cast-pointer-to-int warnings.
538 : */
539 1660 : procptr = PointerGetDatum(proclocktag->myProc);
540 1660 : lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
541 :
542 1660 : return lockhash;
543 : }
544 :
545 : /*
546 : * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
547 : * for its underlying LOCK.
548 : *
549 : * We use this just to avoid redundant calls of LockTagHashCode().
550 : */
551 : static inline uint32
552 6509226 : ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
553 : {
554 6509226 : uint32 lockhash = hashcode;
555 : Datum procptr;
556 :
557 : /*
558 : * This must match proclock_hash()!
559 : */
560 6509226 : procptr = PointerGetDatum(proclocktag->myProc);
561 6509226 : lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
562 :
563 6509226 : return lockhash;
564 : }
565 :
566 : /*
567 : * Given two lock modes, return whether they would conflict.
568 : */
569 : bool
570 464 : DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
571 : {
572 464 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
573 :
574 464 : if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
575 272 : return true;
576 :
577 192 : return false;
578 : }
579 :
580 : /*
581 : * LockHeldByMe -- test whether lock 'locktag' is held with mode 'lockmode'
582 : * by the current transaction
583 : */
584 : bool
585 0 : LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode)
586 : {
587 : LOCALLOCKTAG localtag;
588 : LOCALLOCK *locallock;
589 :
590 : /*
591 : * See if there is a LOCALLOCK entry for this lock and lockmode
592 : */
593 0 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
594 0 : localtag.lock = *locktag;
595 0 : localtag.mode = lockmode;
596 :
597 0 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
598 : &localtag,
599 : HASH_FIND, NULL);
600 :
601 0 : return (locallock && locallock->nLocks > 0);
602 : }
603 :
604 : #ifdef USE_ASSERT_CHECKING
605 : /*
606 : * GetLockMethodLocalHash -- return the hash of local locks, for modules that
607 : * evaluate assertions based on all locks held.
608 : */
609 : HTAB *
610 : GetLockMethodLocalHash(void)
611 : {
612 : return LockMethodLocalHash;
613 : }
614 : #endif
615 :
616 : /*
617 : * LockHasWaiters -- look up 'locktag' and check if releasing this
618 : * lock would wake up other processes waiting for it.
619 : */
620 : bool
621 0 : LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
622 : {
623 0 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
624 : LockMethod lockMethodTable;
625 : LOCALLOCKTAG localtag;
626 : LOCALLOCK *locallock;
627 : LOCK *lock;
628 : PROCLOCK *proclock;
629 : LWLock *partitionLock;
630 0 : bool hasWaiters = false;
631 :
632 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
633 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
634 0 : lockMethodTable = LockMethods[lockmethodid];
635 0 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
636 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
637 :
638 : #ifdef LOCK_DEBUG
639 : if (LOCK_DEBUG_ENABLED(locktag))
640 : elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
641 : locktag->locktag_field1, locktag->locktag_field2,
642 : lockMethodTable->lockModeNames[lockmode]);
643 : #endif
644 :
645 : /*
646 : * Find the LOCALLOCK entry for this lock and lockmode
647 : */
648 0 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
649 0 : localtag.lock = *locktag;
650 0 : localtag.mode = lockmode;
651 :
652 0 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
653 : &localtag,
654 : HASH_FIND, NULL);
655 :
656 : /*
657 : * let the caller print its own error message, too. Do not ereport(ERROR).
658 : */
659 0 : if (!locallock || locallock->nLocks <= 0)
660 : {
661 0 : elog(WARNING, "you don't own a lock of type %s",
662 : lockMethodTable->lockModeNames[lockmode]);
663 0 : return false;
664 : }
665 :
666 : /*
667 : * Check the shared lock table.
668 : */
669 0 : partitionLock = LockHashPartitionLock(locallock->hashcode);
670 :
671 0 : LWLockAcquire(partitionLock, LW_SHARED);
672 :
673 : /*
674 : * We don't need to re-find the lock or proclock, since we kept their
675 : * addresses in the locallock table, and they couldn't have been removed
676 : * while we were holding a lock on them.
677 : */
678 0 : lock = locallock->lock;
679 : LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
680 0 : proclock = locallock->proclock;
681 : PROCLOCK_PRINT("LockHasWaiters: found", proclock);
682 :
683 : /*
684 : * Double-check that we are actually holding a lock of the type we want to
685 : * release.
686 : */
687 0 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
688 : {
689 : PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
690 0 : LWLockRelease(partitionLock);
691 0 : elog(WARNING, "you don't own a lock of type %s",
692 : lockMethodTable->lockModeNames[lockmode]);
693 0 : RemoveLocalLock(locallock);
694 0 : return false;
695 : }
696 :
697 : /*
698 : * Do the checking.
699 : */
700 0 : if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
701 0 : hasWaiters = true;
702 :
703 0 : LWLockRelease(partitionLock);
704 :
705 0 : return hasWaiters;
706 : }
707 :
708 : /*
709 : * LockAcquire -- Check for lock conflicts, sleep if conflict found,
710 : * set lock if/when no conflicts.
711 : *
712 : * Inputs:
713 : * locktag: unique identifier for the lockable object
714 : * lockmode: lock mode to acquire
715 : * sessionLock: if true, acquire lock for session not current transaction
716 : * dontWait: if true, don't wait to acquire lock
717 : *
718 : * Returns one of:
719 : * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
720 : * LOCKACQUIRE_OK lock successfully acquired
721 : * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
722 : * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
723 : *
724 : * In the normal case where dontWait=false and the caller doesn't need to
725 : * distinguish a freshly acquired lock from one already taken earlier in
726 : * this same transaction, there is no need to examine the return value.
727 : *
728 : * Side Effects: The lock is acquired and recorded in lock tables.
729 : *
730 : * NOTE: if we wait for the lock, there is no way to abort the wait
731 : * short of aborting the transaction.
732 : */
733 : LockAcquireResult
734 926830 : LockAcquire(const LOCKTAG *locktag,
735 : LOCKMODE lockmode,
736 : bool sessionLock,
737 : bool dontWait)
738 : {
739 926830 : return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
740 : true, NULL);
741 : }
742 :
743 : /*
744 : * LockAcquireExtended - allows us to specify additional options
745 : *
746 : * reportMemoryError specifies whether a lock request that fills the lock
747 : * table should generate an ERROR or not. Passing "false" allows the caller
748 : * to attempt to recover from lock-table-full situations, perhaps by forcibly
749 : * canceling other lock holders and then retrying. Note, however, that the
750 : * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
751 : * in combination with dontWait = true, as the cause of failure couldn't be
752 : * distinguished.
753 : *
754 : * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
755 : * table entry if a lock is successfully acquired, or NULL if not.
756 : */
757 : LockAcquireResult
758 29348418 : LockAcquireExtended(const LOCKTAG *locktag,
759 : LOCKMODE lockmode,
760 : bool sessionLock,
761 : bool dontWait,
762 : bool reportMemoryError,
763 : LOCALLOCK **locallockp)
764 : {
765 29348418 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
766 : LockMethod lockMethodTable;
767 : LOCALLOCKTAG localtag;
768 : LOCALLOCK *locallock;
769 : LOCK *lock;
770 : PROCLOCK *proclock;
771 : bool found;
772 : ResourceOwner owner;
773 : uint32 hashcode;
774 : LWLock *partitionLock;
775 : bool found_conflict;
776 29348418 : bool log_lock = false;
777 :
778 29348418 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
779 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
780 29348418 : lockMethodTable = LockMethods[lockmethodid];
781 29348418 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
782 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
783 :
784 29348418 : if (RecoveryInProgress() && !InRecovery &&
785 428574 : (locktag->locktag_type == LOCKTAG_OBJECT ||
786 428574 : locktag->locktag_type == LOCKTAG_RELATION) &&
787 : lockmode > RowExclusiveLock)
788 0 : ereport(ERROR,
789 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
790 : errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
791 : lockMethodTable->lockModeNames[lockmode]),
792 : errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
793 :
794 : #ifdef LOCK_DEBUG
795 : if (LOCK_DEBUG_ENABLED(locktag))
796 : elog(LOG, "LockAcquire: lock [%u,%u] %s",
797 : locktag->locktag_field1, locktag->locktag_field2,
798 : lockMethodTable->lockModeNames[lockmode]);
799 : #endif
800 :
801 : /* Identify owner for lock */
802 29348418 : if (sessionLock)
803 64804 : owner = NULL;
804 : else
805 29283614 : owner = CurrentResourceOwner;
806 :
807 : /*
808 : * Find or create a LOCALLOCK entry for this lock and lockmode
809 : */
810 29348418 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
811 29348418 : localtag.lock = *locktag;
812 29348418 : localtag.mode = lockmode;
813 :
814 29348418 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
815 : &localtag,
816 : HASH_ENTER, &found);
817 :
818 : /*
819 : * if it's a new locallock object, initialize it
820 : */
821 29348418 : if (!found)
822 : {
823 26280576 : locallock->lock = NULL;
824 26280576 : locallock->proclock = NULL;
825 26280576 : locallock->hashcode = LockTagHashCode(&(localtag.lock));
826 26280576 : locallock->nLocks = 0;
827 26280576 : locallock->holdsStrongLockCount = false;
828 26280576 : locallock->lockCleared = false;
829 26280576 : locallock->numLockOwners = 0;
830 26280576 : locallock->maxLockOwners = 8;
831 26280576 : locallock->lockOwners = NULL; /* in case next line fails */
832 26280576 : locallock->lockOwners = (LOCALLOCKOWNER *)
833 26280576 : MemoryContextAlloc(TopMemoryContext,
834 26280576 : locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
835 : }
836 : else
837 : {
838 : /* Make sure there will be room to remember the lock */
839 3067842 : if (locallock->numLockOwners >= locallock->maxLockOwners)
840 : {
841 38 : int newsize = locallock->maxLockOwners * 2;
842 :
843 38 : locallock->lockOwners = (LOCALLOCKOWNER *)
844 38 : repalloc(locallock->lockOwners,
845 : newsize * sizeof(LOCALLOCKOWNER));
846 38 : locallock->maxLockOwners = newsize;
847 : }
848 : }
849 29348418 : hashcode = locallock->hashcode;
850 :
851 29348418 : if (locallockp)
852 28421588 : *locallockp = locallock;
853 :
854 : /*
855 : * If we already hold the lock, we can just increase the count locally.
856 : *
857 : * If lockCleared is already set, caller need not worry about absorbing
858 : * sinval messages related to the lock's object.
859 : */
860 29348418 : if (locallock->nLocks > 0)
861 : {
862 3067842 : GrantLockLocal(locallock, owner);
863 3067842 : if (locallock->lockCleared)
864 2938200 : return LOCKACQUIRE_ALREADY_CLEAR;
865 : else
866 129642 : return LOCKACQUIRE_ALREADY_HELD;
867 : }
868 :
869 : /*
870 : * We don't acquire any other heavyweight lock while holding the relation
871 : * extension lock. We do allow to acquire the same relation extension
872 : * lock more than once but that case won't reach here.
873 : */
874 : Assert(!IsRelationExtensionLockHeld);
875 :
876 : /*
877 : * Prepare to emit a WAL record if acquisition of this lock needs to be
878 : * replayed in a standby server.
879 : *
880 : * Here we prepare to log; after lock is acquired we'll issue log record.
881 : * This arrangement simplifies error recovery in case the preparation step
882 : * fails.
883 : *
884 : * Only AccessExclusiveLocks can conflict with lock types that read-only
885 : * transactions can acquire in a standby server. Make sure this definition
886 : * matches the one in GetRunningTransactionLocks().
887 : */
888 26280576 : if (lockmode >= AccessExclusiveLock &&
889 379390 : locktag->locktag_type == LOCKTAG_RELATION &&
890 254972 : !RecoveryInProgress() &&
891 213728 : XLogStandbyInfoActive())
892 : {
893 159438 : LogAccessExclusiveLockPrepare();
894 159438 : log_lock = true;
895 : }
896 :
897 : /*
898 : * Attempt to take lock via fast path, if eligible. But if we remember
899 : * having filled up the fast path array, we don't attempt to make any
900 : * further use of it until we release some locks. It's possible that some
901 : * other backend has transferred some of those locks to the shared hash
902 : * table, leaving space free, but it's not worth acquiring the LWLock just
903 : * to check. It's also possible that we're acquiring a second or third
904 : * lock type on a relation we have already locked using the fast-path, but
905 : * for now we don't worry about that case either.
906 : */
907 26280576 : if (EligibleForRelationFastPath(locktag, lockmode) &&
908 23750270 : FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND)
909 : {
910 23221794 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
911 : bool acquired;
912 :
913 : /*
914 : * LWLockAcquire acts as a memory sequencing point, so it's safe to
915 : * assume that any strong locker whose increment to
916 : * FastPathStrongRelationLocks->counts becomes visible after we test
917 : * it has yet to begin to transfer fast-path locks.
918 : */
919 23221794 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
920 23221794 : if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
921 349918 : acquired = false;
922 : else
923 22871876 : acquired = FastPathGrantRelationLock(locktag->locktag_field2,
924 : lockmode);
925 23221794 : LWLockRelease(&MyProc->fpInfoLock);
926 23221794 : if (acquired)
927 : {
928 : /*
929 : * The locallock might contain stale pointers to some old shared
930 : * objects; we MUST reset these to null before considering the
931 : * lock to be acquired via fast-path.
932 : */
933 22871876 : locallock->lock = NULL;
934 22871876 : locallock->proclock = NULL;
935 22871876 : GrantLockLocal(locallock, owner);
936 22871876 : return LOCKACQUIRE_OK;
937 : }
938 : }
939 :
940 : /*
941 : * If this lock could potentially have been taken via the fast-path by
942 : * some other backend, we must (temporarily) disable further use of the
943 : * fast-path for this lock tag, and migrate any locks already taken via
944 : * this method to the main lock table.
945 : */
946 3408700 : if (ConflictsWithRelationFastPath(locktag, lockmode))
947 : {
948 307402 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
949 :
950 307402 : BeginStrongLockAcquire(locallock, fasthashcode);
951 307402 : if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
952 : hashcode))
953 : {
954 0 : AbortStrongLockAcquire();
955 0 : if (locallock->nLocks == 0)
956 0 : RemoveLocalLock(locallock);
957 0 : if (locallockp)
958 0 : *locallockp = NULL;
959 0 : if (reportMemoryError)
960 0 : ereport(ERROR,
961 : (errcode(ERRCODE_OUT_OF_MEMORY),
962 : errmsg("out of shared memory"),
963 : errhint("You might need to increase %s.", "max_locks_per_transaction")));
964 : else
965 0 : return LOCKACQUIRE_NOT_AVAIL;
966 : }
967 : }
968 :
969 : /*
970 : * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
971 : * take it via the fast-path, either, so we've got to mess with the shared
972 : * lock table.
973 : */
974 3408700 : partitionLock = LockHashPartitionLock(hashcode);
975 :
976 3408700 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
977 :
978 : /*
979 : * Find or create lock and proclock entries with this tag
980 : *
981 : * Note: if the locallock object already existed, it might have a pointer
982 : * to the lock already ... but we should not assume that that pointer is
983 : * valid, since a lock object with zero hold and request counts can go
984 : * away anytime. So we have to use SetupLockInTable() to recompute the
985 : * lock and proclock pointers, even if they're already set.
986 : */
987 3408700 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
988 : hashcode, lockmode);
989 3408700 : if (!proclock)
990 : {
991 0 : AbortStrongLockAcquire();
992 0 : LWLockRelease(partitionLock);
993 0 : if (locallock->nLocks == 0)
994 0 : RemoveLocalLock(locallock);
995 0 : if (locallockp)
996 0 : *locallockp = NULL;
997 0 : if (reportMemoryError)
998 0 : ereport(ERROR,
999 : (errcode(ERRCODE_OUT_OF_MEMORY),
1000 : errmsg("out of shared memory"),
1001 : errhint("You might need to increase %s.", "max_locks_per_transaction")));
1002 : else
1003 0 : return LOCKACQUIRE_NOT_AVAIL;
1004 : }
1005 3408700 : locallock->proclock = proclock;
1006 3408700 : lock = proclock->tag.myLock;
1007 3408700 : locallock->lock = lock;
1008 :
1009 : /*
1010 : * If lock requested conflicts with locks requested by waiters, must join
1011 : * wait queue. Otherwise, check for conflict with already-held locks.
1012 : * (That's last because most complex check.)
1013 : */
1014 3408700 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1015 72 : found_conflict = true;
1016 : else
1017 3408628 : found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1018 : lock, proclock);
1019 :
1020 3408700 : if (!found_conflict)
1021 : {
1022 : /* No conflict with held or previously requested locks */
1023 3405248 : GrantLock(lock, proclock, lockmode);
1024 3405248 : GrantLockLocal(locallock, owner);
1025 : }
1026 : else
1027 : {
1028 : /*
1029 : * Set bitmask of locks this process already holds on this object.
1030 : */
1031 3452 : MyProc->heldLocks = proclock->holdMask;
1032 :
1033 : /*
1034 : * Sleep till someone wakes me up. We do this even in the dontWait
1035 : * case, beause while trying to go to sleep, we may discover that we
1036 : * can acquire the lock immediately after all.
1037 : */
1038 :
1039 : TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
1040 : locktag->locktag_field2,
1041 : locktag->locktag_field3,
1042 : locktag->locktag_field4,
1043 : locktag->locktag_type,
1044 : lockmode);
1045 :
1046 3452 : WaitOnLock(locallock, owner, dontWait);
1047 :
1048 : TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
1049 : locktag->locktag_field2,
1050 : locktag->locktag_field3,
1051 : locktag->locktag_field4,
1052 : locktag->locktag_type,
1053 : lockmode);
1054 :
1055 : /*
1056 : * NOTE: do not do any material change of state between here and
1057 : * return. All required changes in locktable state must have been
1058 : * done when the lock was granted to us --- see notes in WaitOnLock.
1059 : */
1060 :
1061 : /*
1062 : * Check the proclock entry status. If dontWait = true, this is an
1063 : * expected case; otherwise, it will open happen if something in the
1064 : * ipc communication doesn't work correctly.
1065 : */
1066 3362 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1067 : {
1068 1290 : AbortStrongLockAcquire();
1069 :
1070 1290 : if (dontWait)
1071 : {
1072 : /*
1073 : * We can't acquire the lock immediately. If caller specified
1074 : * no blocking, remove useless table entries and return
1075 : * LOCKACQUIRE_NOT_AVAIL without waiting.
1076 : */
1077 1290 : if (proclock->holdMask == 0)
1078 : {
1079 : uint32 proclock_hashcode;
1080 :
1081 886 : proclock_hashcode = ProcLockHashCode(&proclock->tag,
1082 : hashcode);
1083 886 : dlist_delete(&proclock->lockLink);
1084 886 : dlist_delete(&proclock->procLink);
1085 886 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
1086 886 : &(proclock->tag),
1087 : proclock_hashcode,
1088 : HASH_REMOVE,
1089 : NULL))
1090 0 : elog(PANIC, "proclock table corrupted");
1091 : }
1092 : else
1093 : PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
1094 1290 : lock->nRequested--;
1095 1290 : lock->requested[lockmode]--;
1096 : LOCK_PRINT("LockAcquire: conditional lock failed",
1097 : lock, lockmode);
1098 : Assert((lock->nRequested > 0) &&
1099 : (lock->requested[lockmode] >= 0));
1100 : Assert(lock->nGranted <= lock->nRequested);
1101 1290 : LWLockRelease(partitionLock);
1102 1290 : if (locallock->nLocks == 0)
1103 1290 : RemoveLocalLock(locallock);
1104 1290 : if (locallockp)
1105 440 : *locallockp = NULL;
1106 1290 : return LOCKACQUIRE_NOT_AVAIL;
1107 : }
1108 : else
1109 : {
1110 : /*
1111 : * We should have gotten the lock, but somehow that didn't
1112 : * happen. If we get here, it's a bug.
1113 : */
1114 : PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
1115 : LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
1116 0 : LWLockRelease(partitionLock);
1117 0 : elog(ERROR, "LockAcquire failed");
1118 : }
1119 : }
1120 : PROCLOCK_PRINT("LockAcquire: granted", proclock);
1121 : LOCK_PRINT("LockAcquire: granted", lock, lockmode);
1122 : }
1123 :
1124 : /*
1125 : * Lock state is fully up-to-date now; if we error out after this, no
1126 : * special error cleanup is required.
1127 : */
1128 3407320 : FinishStrongLockAcquire();
1129 :
1130 3407320 : LWLockRelease(partitionLock);
1131 :
1132 : /*
1133 : * Emit a WAL record if acquisition of this lock needs to be replayed in a
1134 : * standby server.
1135 : */
1136 3407320 : if (log_lock)
1137 : {
1138 : /*
1139 : * Decode the locktag back to the original values, to avoid sending
1140 : * lots of empty bytes with every message. See lock.h to check how a
1141 : * locktag is defined for LOCKTAG_RELATION
1142 : */
1143 159014 : LogAccessExclusiveLock(locktag->locktag_field1,
1144 : locktag->locktag_field2);
1145 : }
1146 :
1147 3407320 : return LOCKACQUIRE_OK;
1148 : }
1149 :
1150 : /*
1151 : * Find or create LOCK and PROCLOCK objects as needed for a new lock
1152 : * request.
1153 : *
1154 : * Returns the PROCLOCK object, or NULL if we failed to create the objects
1155 : * for lack of shared memory.
1156 : *
1157 : * The appropriate partition lock must be held at entry, and will be
1158 : * held at exit.
1159 : */
1160 : static PROCLOCK *
1161 3412028 : SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
1162 : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1163 : {
1164 : LOCK *lock;
1165 : PROCLOCK *proclock;
1166 : PROCLOCKTAG proclocktag;
1167 : uint32 proclock_hashcode;
1168 : bool found;
1169 :
1170 : /*
1171 : * Find or create a lock with this tag.
1172 : */
1173 3412028 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
1174 : locktag,
1175 : hashcode,
1176 : HASH_ENTER_NULL,
1177 : &found);
1178 3412028 : if (!lock)
1179 0 : return NULL;
1180 :
1181 : /*
1182 : * if it's a new lock object, initialize it
1183 : */
1184 3412028 : if (!found)
1185 : {
1186 3071436 : lock->grantMask = 0;
1187 3071436 : lock->waitMask = 0;
1188 3071436 : dlist_init(&lock->procLocks);
1189 3071436 : dclist_init(&lock->waitProcs);
1190 3071436 : lock->nRequested = 0;
1191 3071436 : lock->nGranted = 0;
1192 18428616 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1193 3071436 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1194 : LOCK_PRINT("LockAcquire: new", lock, lockmode);
1195 : }
1196 : else
1197 : {
1198 : LOCK_PRINT("LockAcquire: found", lock, lockmode);
1199 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1200 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1201 : Assert(lock->nGranted <= lock->nRequested);
1202 : }
1203 :
1204 : /*
1205 : * Create the hash key for the proclock table.
1206 : */
1207 3412028 : proclocktag.myLock = lock;
1208 3412028 : proclocktag.myProc = proc;
1209 :
1210 3412028 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1211 :
1212 : /*
1213 : * Find or create a proclock entry with this tag
1214 : */
1215 3412028 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
1216 : &proclocktag,
1217 : proclock_hashcode,
1218 : HASH_ENTER_NULL,
1219 : &found);
1220 3412028 : if (!proclock)
1221 : {
1222 : /* Oops, not enough shmem for the proclock */
1223 0 : if (lock->nRequested == 0)
1224 : {
1225 : /*
1226 : * There are no other requestors of this lock, so garbage-collect
1227 : * the lock object. We *must* do this to avoid a permanent leak
1228 : * of shared memory, because there won't be anything to cause
1229 : * anyone to release the lock object later.
1230 : */
1231 : Assert(dlist_is_empty(&(lock->procLocks)));
1232 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
1233 0 : &(lock->tag),
1234 : hashcode,
1235 : HASH_REMOVE,
1236 : NULL))
1237 0 : elog(PANIC, "lock table corrupted");
1238 : }
1239 0 : return NULL;
1240 : }
1241 :
1242 : /*
1243 : * If new, initialize the new entry
1244 : */
1245 3412028 : if (!found)
1246 : {
1247 3092714 : uint32 partition = LockHashPartition(hashcode);
1248 :
1249 : /*
1250 : * It might seem unsafe to access proclock->groupLeader without a
1251 : * lock, but it's not really. Either we are initializing a proclock
1252 : * on our own behalf, in which case our group leader isn't changing
1253 : * because the group leader for a process can only ever be changed by
1254 : * the process itself; or else we are transferring a fast-path lock to
1255 : * the main lock table, in which case that process can't change it's
1256 : * lock group leader without first releasing all of its locks (and in
1257 : * particular the one we are currently transferring).
1258 : */
1259 6185428 : proclock->groupLeader = proc->lockGroupLeader != NULL ?
1260 3092714 : proc->lockGroupLeader : proc;
1261 3092714 : proclock->holdMask = 0;
1262 3092714 : proclock->releaseMask = 0;
1263 : /* Add proclock to appropriate lists */
1264 3092714 : dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1265 3092714 : dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1266 : PROCLOCK_PRINT("LockAcquire: new", proclock);
1267 : }
1268 : else
1269 : {
1270 : PROCLOCK_PRINT("LockAcquire: found", proclock);
1271 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
1272 :
1273 : #ifdef CHECK_DEADLOCK_RISK
1274 :
1275 : /*
1276 : * Issue warning if we already hold a lower-level lock on this object
1277 : * and do not hold a lock of the requested level or higher. This
1278 : * indicates a deadlock-prone coding practice (eg, we'd have a
1279 : * deadlock if another backend were following the same code path at
1280 : * about the same time).
1281 : *
1282 : * This is not enabled by default, because it may generate log entries
1283 : * about user-level coding practices that are in fact safe in context.
1284 : * It can be enabled to help find system-level problems.
1285 : *
1286 : * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1287 : * better to use a table. For now, though, this works.
1288 : */
1289 : {
1290 : int i;
1291 :
1292 : for (i = lockMethodTable->numLockModes; i > 0; i--)
1293 : {
1294 : if (proclock->holdMask & LOCKBIT_ON(i))
1295 : {
1296 : if (i >= (int) lockmode)
1297 : break; /* safe: we have a lock >= req level */
1298 : elog(LOG, "deadlock risk: raising lock level"
1299 : " from %s to %s on object %u/%u/%u",
1300 : lockMethodTable->lockModeNames[i],
1301 : lockMethodTable->lockModeNames[lockmode],
1302 : lock->tag.locktag_field1, lock->tag.locktag_field2,
1303 : lock->tag.locktag_field3);
1304 : break;
1305 : }
1306 : }
1307 : }
1308 : #endif /* CHECK_DEADLOCK_RISK */
1309 : }
1310 :
1311 : /*
1312 : * lock->nRequested and lock->requested[] count the total number of
1313 : * requests, whether granted or waiting, so increment those immediately.
1314 : * The other counts don't increment till we get the lock.
1315 : */
1316 3412028 : lock->nRequested++;
1317 3412028 : lock->requested[lockmode]++;
1318 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1319 :
1320 : /*
1321 : * We shouldn't already hold the desired lock; else locallock table is
1322 : * broken.
1323 : */
1324 3412028 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
1325 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
1326 : lockMethodTable->lockModeNames[lockmode],
1327 : lock->tag.locktag_field1, lock->tag.locktag_field2,
1328 : lock->tag.locktag_field3);
1329 :
1330 3412028 : return proclock;
1331 : }
1332 :
1333 : /*
1334 : * Check and set/reset the flag that we hold the relation extension lock.
1335 : *
1336 : * It is callers responsibility that this function is called after
1337 : * acquiring/releasing the relation extension lock.
1338 : *
1339 : * Pass acquired as true if lock is acquired, false otherwise.
1340 : */
1341 : static inline void
1342 53286606 : CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
1343 : {
1344 : #ifdef USE_ASSERT_CHECKING
1345 : if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1346 : IsRelationExtensionLockHeld = acquired;
1347 : #endif
1348 53286606 : }
1349 :
1350 : /*
1351 : * Subroutine to free a locallock entry
1352 : */
1353 : static void
1354 26280576 : RemoveLocalLock(LOCALLOCK *locallock)
1355 : {
1356 : int i;
1357 :
1358 26417318 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
1359 : {
1360 136742 : if (locallock->lockOwners[i].owner != NULL)
1361 136668 : ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1362 : }
1363 26280576 : locallock->numLockOwners = 0;
1364 26280576 : if (locallock->lockOwners != NULL)
1365 26280576 : pfree(locallock->lockOwners);
1366 26280576 : locallock->lockOwners = NULL;
1367 :
1368 26280576 : if (locallock->holdsStrongLockCount)
1369 : {
1370 : uint32 fasthashcode;
1371 :
1372 306846 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1373 :
1374 306846 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1375 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1376 306846 : FastPathStrongRelationLocks->count[fasthashcode]--;
1377 306846 : locallock->holdsStrongLockCount = false;
1378 306846 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1379 : }
1380 :
1381 26280576 : if (!hash_search(LockMethodLocalHash,
1382 26280576 : &(locallock->tag),
1383 : HASH_REMOVE, NULL))
1384 0 : elog(WARNING, "locallock table corrupted");
1385 :
1386 : /*
1387 : * Indicate that the lock is released for certain types of locks
1388 : */
1389 26280576 : CheckAndSetLockHeld(locallock, false);
1390 26280576 : }
1391 :
1392 : /*
1393 : * LockCheckConflicts -- test whether requested lock conflicts
1394 : * with those already granted
1395 : *
1396 : * Returns true if conflict, false if no conflict.
1397 : *
1398 : * NOTES:
1399 : * Here's what makes this complicated: one process's locks don't
1400 : * conflict with one another, no matter what purpose they are held for
1401 : * (eg, session and transaction locks do not conflict). Nor do the locks
1402 : * of one process in a lock group conflict with those of another process in
1403 : * the same group. So, we must subtract off these locks when determining
1404 : * whether the requested new lock conflicts with those already held.
1405 : */
1406 : bool
1407 3410792 : LockCheckConflicts(LockMethod lockMethodTable,
1408 : LOCKMODE lockmode,
1409 : LOCK *lock,
1410 : PROCLOCK *proclock)
1411 : {
1412 3410792 : int numLockModes = lockMethodTable->numLockModes;
1413 : LOCKMASK myLocks;
1414 3410792 : int conflictMask = lockMethodTable->conflictTab[lockmode];
1415 : int conflictsRemaining[MAX_LOCKMODES];
1416 3410792 : int totalConflictsRemaining = 0;
1417 : dlist_iter proclock_iter;
1418 : int i;
1419 :
1420 : /*
1421 : * first check for global conflicts: If no locks conflict with my request,
1422 : * then I get the lock.
1423 : *
1424 : * Checking for conflict: lock->grantMask represents the types of
1425 : * currently held locks. conflictTable[lockmode] has a bit set for each
1426 : * type of lock that conflicts with request. Bitwise compare tells if
1427 : * there is a conflict.
1428 : */
1429 3410792 : if (!(conflictMask & lock->grantMask))
1430 : {
1431 : PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1432 3290406 : return false;
1433 : }
1434 :
1435 : /*
1436 : * Rats. Something conflicts. But it could still be my own lock, or a
1437 : * lock held by another member of my locking group. First, figure out how
1438 : * many conflicts remain after subtracting out any locks I hold myself.
1439 : */
1440 120386 : myLocks = proclock->holdMask;
1441 1083474 : for (i = 1; i <= numLockModes; i++)
1442 : {
1443 963088 : if ((conflictMask & LOCKBIT_ON(i)) == 0)
1444 : {
1445 484748 : conflictsRemaining[i] = 0;
1446 484748 : continue;
1447 : }
1448 478340 : conflictsRemaining[i] = lock->granted[i];
1449 478340 : if (myLocks & LOCKBIT_ON(i))
1450 124586 : --conflictsRemaining[i];
1451 478340 : totalConflictsRemaining += conflictsRemaining[i];
1452 : }
1453 :
1454 : /* If no conflicts remain, we get the lock. */
1455 120386 : if (totalConflictsRemaining == 0)
1456 : {
1457 : PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1458 116034 : return false;
1459 : }
1460 :
1461 : /* If no group locking, it's definitely a conflict. */
1462 4352 : if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1463 : {
1464 : Assert(proclock->tag.myProc == MyProc);
1465 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1466 : proclock);
1467 3374 : return true;
1468 : }
1469 :
1470 : /*
1471 : * The relation extension lock conflict even between the group members.
1472 : */
1473 978 : if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND)
1474 : {
1475 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1476 : proclock);
1477 32 : return true;
1478 : }
1479 :
1480 : /*
1481 : * Locks held in conflicting modes by members of our own lock group are
1482 : * not real conflicts; we can subtract those out and see if we still have
1483 : * a conflict. This is O(N) in the number of processes holding or
1484 : * awaiting locks on this object. We could improve that by making the
1485 : * shared memory state more complex (and larger) but it doesn't seem worth
1486 : * it.
1487 : */
1488 1154 : dlist_foreach(proclock_iter, &lock->procLocks)
1489 : {
1490 1094 : PROCLOCK *otherproclock =
1491 1094 : dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1492 :
1493 1094 : if (proclock != otherproclock &&
1494 1034 : proclock->groupLeader == otherproclock->groupLeader &&
1495 890 : (otherproclock->holdMask & conflictMask) != 0)
1496 : {
1497 886 : int intersectMask = otherproclock->holdMask & conflictMask;
1498 :
1499 7974 : for (i = 1; i <= numLockModes; i++)
1500 : {
1501 7088 : if ((intersectMask & LOCKBIT_ON(i)) != 0)
1502 : {
1503 900 : if (conflictsRemaining[i] <= 0)
1504 0 : elog(PANIC, "proclocks held do not match lock");
1505 900 : conflictsRemaining[i]--;
1506 900 : totalConflictsRemaining--;
1507 : }
1508 : }
1509 :
1510 886 : if (totalConflictsRemaining == 0)
1511 : {
1512 : PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1513 : proclock);
1514 886 : return false;
1515 : }
1516 : }
1517 : }
1518 :
1519 : /* Nope, it's a real conflict. */
1520 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1521 60 : return true;
1522 : }
1523 :
1524 : /*
1525 : * GrantLock -- update the lock and proclock data structures to show
1526 : * the lock request has been granted.
1527 : *
1528 : * NOTE: if proc was blocked, it also needs to be removed from the wait list
1529 : * and have its waitLock/waitProcLock fields cleared. That's not done here.
1530 : *
1531 : * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1532 : * table entry; but since we may be awaking some other process, we can't do
1533 : * that here; it's done by GrantLockLocal, instead.
1534 : */
1535 : void
1536 3410818 : GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1537 : {
1538 3410818 : lock->nGranted++;
1539 3410818 : lock->granted[lockmode]++;
1540 3410818 : lock->grantMask |= LOCKBIT_ON(lockmode);
1541 3410818 : if (lock->granted[lockmode] == lock->requested[lockmode])
1542 3410562 : lock->waitMask &= LOCKBIT_OFF(lockmode);
1543 3410818 : proclock->holdMask |= LOCKBIT_ON(lockmode);
1544 : LOCK_PRINT("GrantLock", lock, lockmode);
1545 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1546 : Assert(lock->nGranted <= lock->nRequested);
1547 3410818 : }
1548 :
1549 : /*
1550 : * UnGrantLock -- opposite of GrantLock.
1551 : *
1552 : * Updates the lock and proclock data structures to show that the lock
1553 : * is no longer held nor requested by the current holder.
1554 : *
1555 : * Returns true if there were any waiters waiting on the lock that
1556 : * should now be woken up with ProcLockWakeup.
1557 : */
1558 : static bool
1559 3410654 : UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1560 : PROCLOCK *proclock, LockMethod lockMethodTable)
1561 : {
1562 3410654 : bool wakeupNeeded = false;
1563 :
1564 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1565 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1566 : Assert(lock->nGranted <= lock->nRequested);
1567 :
1568 : /*
1569 : * fix the general lock stats
1570 : */
1571 3410654 : lock->nRequested--;
1572 3410654 : lock->requested[lockmode]--;
1573 3410654 : lock->nGranted--;
1574 3410654 : lock->granted[lockmode]--;
1575 :
1576 3410654 : if (lock->granted[lockmode] == 0)
1577 : {
1578 : /* change the conflict mask. No more of this lock type. */
1579 3394894 : lock->grantMask &= LOCKBIT_OFF(lockmode);
1580 : }
1581 :
1582 : LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1583 :
1584 : /*
1585 : * We need only run ProcLockWakeup if the released lock conflicts with at
1586 : * least one of the lock types requested by waiter(s). Otherwise whatever
1587 : * conflict made them wait must still exist. NOTE: before MVCC, we could
1588 : * skip wakeup if lock->granted[lockmode] was still positive. But that's
1589 : * not true anymore, because the remaining granted locks might belong to
1590 : * some waiter, who could now be awakened because he doesn't conflict with
1591 : * his own locks.
1592 : */
1593 3410654 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1594 2016 : wakeupNeeded = true;
1595 :
1596 : /*
1597 : * Now fix the per-proclock state.
1598 : */
1599 3410654 : proclock->holdMask &= LOCKBIT_OFF(lockmode);
1600 : PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1601 :
1602 3410654 : return wakeupNeeded;
1603 : }
1604 :
1605 : /*
1606 : * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1607 : * proclock and lock objects if possible, and call ProcLockWakeup if there
1608 : * are remaining requests and the caller says it's OK. (Normally, this
1609 : * should be called after UnGrantLock, and wakeupNeeded is the result from
1610 : * UnGrantLock.)
1611 : *
1612 : * The appropriate partition lock must be held at entry, and will be
1613 : * held at exit.
1614 : */
1615 : static void
1616 3367272 : CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1617 : LockMethod lockMethodTable, uint32 hashcode,
1618 : bool wakeupNeeded)
1619 : {
1620 : /*
1621 : * If this was my last hold on this lock, delete my entry in the proclock
1622 : * table.
1623 : */
1624 3367272 : if (proclock->holdMask == 0)
1625 : {
1626 : uint32 proclock_hashcode;
1627 :
1628 : PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1629 3091834 : dlist_delete(&proclock->lockLink);
1630 3091834 : dlist_delete(&proclock->procLink);
1631 3091834 : proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1632 3091834 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
1633 3091834 : &(proclock->tag),
1634 : proclock_hashcode,
1635 : HASH_REMOVE,
1636 : NULL))
1637 0 : elog(PANIC, "proclock table corrupted");
1638 : }
1639 :
1640 3367272 : if (lock->nRequested == 0)
1641 : {
1642 : /*
1643 : * The caller just released the last lock, so garbage-collect the lock
1644 : * object.
1645 : */
1646 : LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1647 : Assert(dlist_is_empty(&lock->procLocks));
1648 3071438 : if (!hash_search_with_hash_value(LockMethodLockHash,
1649 3071438 : &(lock->tag),
1650 : hashcode,
1651 : HASH_REMOVE,
1652 : NULL))
1653 0 : elog(PANIC, "lock table corrupted");
1654 : }
1655 295834 : else if (wakeupNeeded)
1656 : {
1657 : /* There are waiters on this lock, so wake them up. */
1658 2100 : ProcLockWakeup(lockMethodTable, lock);
1659 : }
1660 3367272 : }
1661 :
1662 : /*
1663 : * GrantLockLocal -- update the locallock data structures to show
1664 : * the lock request has been granted.
1665 : *
1666 : * We expect that LockAcquire made sure there is room to add a new
1667 : * ResourceOwner entry.
1668 : */
1669 : static void
1670 29347038 : GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
1671 : {
1672 29347038 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1673 : int i;
1674 :
1675 : Assert(locallock->numLockOwners < locallock->maxLockOwners);
1676 : /* Count the total */
1677 29347038 : locallock->nLocks++;
1678 : /* Count the per-owner lock */
1679 30393316 : for (i = 0; i < locallock->numLockOwners; i++)
1680 : {
1681 3387286 : if (lockOwners[i].owner == owner)
1682 : {
1683 2341008 : lockOwners[i].nLocks++;
1684 2341008 : return;
1685 : }
1686 : }
1687 27006030 : lockOwners[i].owner = owner;
1688 27006030 : lockOwners[i].nLocks = 1;
1689 27006030 : locallock->numLockOwners++;
1690 27006030 : if (owner != NULL)
1691 26942086 : ResourceOwnerRememberLock(owner, locallock);
1692 :
1693 : /* Indicate that the lock is acquired for certain types of locks. */
1694 27006030 : CheckAndSetLockHeld(locallock, true);
1695 : }
1696 :
1697 : /*
1698 : * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1699 : * and arrange for error cleanup if it fails
1700 : */
1701 : static void
1702 307402 : BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
1703 : {
1704 : Assert(StrongLockInProgress == NULL);
1705 : Assert(locallock->holdsStrongLockCount == false);
1706 :
1707 : /*
1708 : * Adding to a memory location is not atomic, so we take a spinlock to
1709 : * ensure we don't collide with someone else trying to bump the count at
1710 : * the same time.
1711 : *
1712 : * XXX: It might be worth considering using an atomic fetch-and-add
1713 : * instruction here, on architectures where that is supported.
1714 : */
1715 :
1716 307402 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1717 307402 : FastPathStrongRelationLocks->count[fasthashcode]++;
1718 307402 : locallock->holdsStrongLockCount = true;
1719 307402 : StrongLockInProgress = locallock;
1720 307402 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1721 307402 : }
1722 :
1723 : /*
1724 : * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1725 : * acquisition once it's no longer needed
1726 : */
1727 : static void
1728 3407320 : FinishStrongLockAcquire(void)
1729 : {
1730 3407320 : StrongLockInProgress = NULL;
1731 3407320 : }
1732 :
1733 : /*
1734 : * AbortStrongLockAcquire - undo strong lock state changes performed by
1735 : * BeginStrongLockAcquire.
1736 : */
1737 : void
1738 621280 : AbortStrongLockAcquire(void)
1739 : {
1740 : uint32 fasthashcode;
1741 621280 : LOCALLOCK *locallock = StrongLockInProgress;
1742 :
1743 621280 : if (locallock == NULL)
1744 620856 : return;
1745 :
1746 424 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1747 : Assert(locallock->holdsStrongLockCount == true);
1748 424 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1749 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1750 424 : FastPathStrongRelationLocks->count[fasthashcode]--;
1751 424 : locallock->holdsStrongLockCount = false;
1752 424 : StrongLockInProgress = NULL;
1753 424 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1754 : }
1755 :
1756 : /*
1757 : * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1758 : * WaitOnLock on.
1759 : *
1760 : * proc.c needs this for the case where we are booted off the lock by
1761 : * timeout, but discover that someone granted us the lock anyway.
1762 : *
1763 : * We could just export GrantLockLocal, but that would require including
1764 : * resowner.h in lock.h, which creates circularity.
1765 : */
1766 : void
1767 2072 : GrantAwaitedLock(void)
1768 : {
1769 2072 : GrantLockLocal(awaitedLock, awaitedOwner);
1770 2072 : }
1771 :
1772 : /*
1773 : * MarkLockClear -- mark an acquired lock as "clear"
1774 : *
1775 : * This means that we know we have absorbed all sinval messages that other
1776 : * sessions generated before we acquired this lock, and so we can confidently
1777 : * assume we know about any catalog changes protected by this lock.
1778 : */
1779 : void
1780 25504352 : MarkLockClear(LOCALLOCK *locallock)
1781 : {
1782 : Assert(locallock->nLocks > 0);
1783 25504352 : locallock->lockCleared = true;
1784 25504352 : }
1785 :
1786 : /*
1787 : * WaitOnLock -- wait to acquire a lock
1788 : *
1789 : * Caller must have set MyProc->heldLocks to reflect locks already held
1790 : * on the lockable object by this process.
1791 : *
1792 : * The appropriate partition lock must be held at entry, and will still be
1793 : * held at exit.
1794 : */
1795 : static void
1796 3452 : WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner, bool dontWait)
1797 : {
1798 3452 : LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
1799 3452 : LockMethod lockMethodTable = LockMethods[lockmethodid];
1800 :
1801 : LOCK_PRINT("WaitOnLock: sleeping on lock",
1802 : locallock->lock, locallock->tag.mode);
1803 :
1804 : /* adjust the process title to indicate that it's waiting */
1805 3452 : set_ps_display_suffix("waiting");
1806 :
1807 3452 : awaitedLock = locallock;
1808 3452 : awaitedOwner = owner;
1809 :
1810 : /*
1811 : * NOTE: Think not to put any shared-state cleanup after the call to
1812 : * ProcSleep, in either the normal or failure path. The lock state must
1813 : * be fully set by the lock grantor, or by CheckDeadLock if we give up
1814 : * waiting for the lock. This is necessary because of the possibility
1815 : * that a cancel/die interrupt will interrupt ProcSleep after someone else
1816 : * grants us the lock, but before we've noticed it. Hence, after granting,
1817 : * the locktable state must fully reflect the fact that we own the lock;
1818 : * we can't do additional work on return.
1819 : *
1820 : * We can and do use a PG_TRY block to try to clean up after failure, but
1821 : * this still has a major limitation: elog(FATAL) can occur while waiting
1822 : * (eg, a "die" interrupt), and then control won't come back here. So all
1823 : * cleanup of essential state should happen in LockErrorCleanup, not here.
1824 : * We can use PG_TRY to clear the "waiting" status flags, since doing that
1825 : * is unimportant if the process exits.
1826 : */
1827 3452 : PG_TRY();
1828 : {
1829 : /*
1830 : * If dontWait = true, we handle success and failure in the same way
1831 : * here. The caller will be able to sort out what has happened.
1832 : */
1833 3452 : if (ProcSleep(locallock, lockMethodTable, dontWait) != PROC_WAIT_STATUS_OK
1834 1300 : && !dontWait)
1835 : {
1836 :
1837 : /*
1838 : * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1839 : * now.
1840 : */
1841 10 : awaitedLock = NULL;
1842 : LOCK_PRINT("WaitOnLock: aborting on lock",
1843 : locallock->lock, locallock->tag.mode);
1844 10 : LWLockRelease(LockHashPartitionLock(locallock->hashcode));
1845 :
1846 : /*
1847 : * Now that we aren't holding the partition lock, we can give an
1848 : * error report including details about the detected deadlock.
1849 : */
1850 10 : DeadLockReport();
1851 : /* not reached */
1852 : }
1853 : }
1854 82 : PG_CATCH();
1855 : {
1856 : /* In this path, awaitedLock remains set until LockErrorCleanup */
1857 :
1858 : /* reset ps display to remove the suffix */
1859 82 : set_ps_display_remove_suffix();
1860 :
1861 : /* and propagate the error */
1862 82 : PG_RE_THROW();
1863 : }
1864 3362 : PG_END_TRY();
1865 :
1866 3362 : awaitedLock = NULL;
1867 :
1868 : /* reset ps display to remove the suffix */
1869 3362 : set_ps_display_remove_suffix();
1870 :
1871 : LOCK_PRINT("WaitOnLock: wakeup on lock",
1872 : locallock->lock, locallock->tag.mode);
1873 3362 : }
1874 :
1875 : /*
1876 : * Remove a proc from the wait-queue it is on (caller must know it is on one).
1877 : * This is only used when the proc has failed to get the lock, so we set its
1878 : * waitStatus to PROC_WAIT_STATUS_ERROR.
1879 : *
1880 : * Appropriate partition lock must be held by caller. Also, caller is
1881 : * responsible for signaling the proc if needed.
1882 : *
1883 : * NB: this does not clean up any locallock object that may exist for the lock.
1884 : */
1885 : void
1886 90 : RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
1887 : {
1888 90 : LOCK *waitLock = proc->waitLock;
1889 90 : PROCLOCK *proclock = proc->waitProcLock;
1890 90 : LOCKMODE lockmode = proc->waitLockMode;
1891 90 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1892 :
1893 : /* Make sure proc is waiting */
1894 : Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
1895 : Assert(proc->links.next != NULL);
1896 : Assert(waitLock);
1897 : Assert(!dclist_is_empty(&waitLock->waitProcs));
1898 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1899 :
1900 : /* Remove proc from lock's wait queue */
1901 90 : dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
1902 :
1903 : /* Undo increments of request counts by waiting process */
1904 : Assert(waitLock->nRequested > 0);
1905 : Assert(waitLock->nRequested > proc->waitLock->nGranted);
1906 90 : waitLock->nRequested--;
1907 : Assert(waitLock->requested[lockmode] > 0);
1908 90 : waitLock->requested[lockmode]--;
1909 : /* don't forget to clear waitMask bit if appropriate */
1910 90 : if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1911 88 : waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1912 :
1913 : /* Clean up the proc's own state, and pass it the ok/fail signal */
1914 90 : proc->waitLock = NULL;
1915 90 : proc->waitProcLock = NULL;
1916 90 : proc->waitStatus = PROC_WAIT_STATUS_ERROR;
1917 :
1918 : /*
1919 : * Delete the proclock immediately if it represents no already-held locks.
1920 : * (This must happen now because if the owner of the lock decides to
1921 : * release it, and the requested/granted counts then go to zero,
1922 : * LockRelease expects there to be no remaining proclocks.) Then see if
1923 : * any other waiters for the lock can be woken up now.
1924 : */
1925 90 : CleanUpLock(waitLock, proclock,
1926 : LockMethods[lockmethodid], hashcode,
1927 : true);
1928 90 : }
1929 :
1930 : /*
1931 : * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
1932 : * Release a session lock if 'sessionLock' is true, else release a
1933 : * regular transaction lock.
1934 : *
1935 : * Side Effects: find any waiting processes that are now wakable,
1936 : * grant them their requested locks and awaken them.
1937 : * (We have to grant the lock here to avoid a race between
1938 : * the waking process and any new process to
1939 : * come along and request the lock.)
1940 : */
1941 : bool
1942 25944262 : LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
1943 : {
1944 25944262 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1945 : LockMethod lockMethodTable;
1946 : LOCALLOCKTAG localtag;
1947 : LOCALLOCK *locallock;
1948 : LOCK *lock;
1949 : PROCLOCK *proclock;
1950 : LWLock *partitionLock;
1951 : bool wakeupNeeded;
1952 :
1953 25944262 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1954 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1955 25944262 : lockMethodTable = LockMethods[lockmethodid];
1956 25944262 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
1957 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
1958 :
1959 : #ifdef LOCK_DEBUG
1960 : if (LOCK_DEBUG_ENABLED(locktag))
1961 : elog(LOG, "LockRelease: lock [%u,%u] %s",
1962 : locktag->locktag_field1, locktag->locktag_field2,
1963 : lockMethodTable->lockModeNames[lockmode]);
1964 : #endif
1965 :
1966 : /*
1967 : * Find the LOCALLOCK entry for this lock and lockmode
1968 : */
1969 25944262 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
1970 25944262 : localtag.lock = *locktag;
1971 25944262 : localtag.mode = lockmode;
1972 :
1973 25944262 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
1974 : &localtag,
1975 : HASH_FIND, NULL);
1976 :
1977 : /*
1978 : * let the caller print its own error message, too. Do not ereport(ERROR).
1979 : */
1980 25944262 : if (!locallock || locallock->nLocks <= 0)
1981 : {
1982 26 : elog(WARNING, "you don't own a lock of type %s",
1983 : lockMethodTable->lockModeNames[lockmode]);
1984 26 : return false;
1985 : }
1986 :
1987 : /*
1988 : * Decrease the count for the resource owner.
1989 : */
1990 : {
1991 25944236 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1992 : ResourceOwner owner;
1993 : int i;
1994 :
1995 : /* Identify owner for lock */
1996 25944236 : if (sessionLock)
1997 63914 : owner = NULL;
1998 : else
1999 25880322 : owner = CurrentResourceOwner;
2000 :
2001 25946162 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2002 : {
2003 25946138 : if (lockOwners[i].owner == owner)
2004 : {
2005 : Assert(lockOwners[i].nLocks > 0);
2006 25944212 : if (--lockOwners[i].nLocks == 0)
2007 : {
2008 25089466 : if (owner != NULL)
2009 25025596 : ResourceOwnerForgetLock(owner, locallock);
2010 : /* compact out unused slot */
2011 25089466 : locallock->numLockOwners--;
2012 25089466 : if (i < locallock->numLockOwners)
2013 104 : lockOwners[i] = lockOwners[locallock->numLockOwners];
2014 : }
2015 25944212 : break;
2016 : }
2017 : }
2018 25944236 : if (i < 0)
2019 : {
2020 : /* don't release a lock belonging to another owner */
2021 24 : elog(WARNING, "you don't own a lock of type %s",
2022 : lockMethodTable->lockModeNames[lockmode]);
2023 24 : return false;
2024 : }
2025 : }
2026 :
2027 : /*
2028 : * Decrease the total local count. If we're still holding the lock, we're
2029 : * done.
2030 : */
2031 25944212 : locallock->nLocks--;
2032 :
2033 25944212 : if (locallock->nLocks > 0)
2034 1353704 : return true;
2035 :
2036 : /*
2037 : * At this point we can no longer suppose we are clear of invalidation
2038 : * messages related to this lock. Although we'll delete the LOCALLOCK
2039 : * object before any intentional return from this routine, it seems worth
2040 : * the trouble to explicitly reset lockCleared right now, just in case
2041 : * some error prevents us from deleting the LOCALLOCK.
2042 : */
2043 24590508 : locallock->lockCleared = false;
2044 :
2045 : /* Attempt fast release of any lock eligible for the fast path. */
2046 24590508 : if (EligibleForRelationFastPath(locktag, lockmode) &&
2047 22857916 : FastPathLocalUseCount > 0)
2048 : {
2049 : bool released;
2050 :
2051 : /*
2052 : * We might not find the lock here, even if we originally entered it
2053 : * here. Another backend may have moved it to the main table.
2054 : */
2055 22715220 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2056 22715220 : released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2057 : lockmode);
2058 22715220 : LWLockRelease(&MyProc->fpInfoLock);
2059 22715220 : if (released)
2060 : {
2061 22055470 : RemoveLocalLock(locallock);
2062 22055470 : return true;
2063 : }
2064 : }
2065 :
2066 : /*
2067 : * Otherwise we've got to mess with the shared lock table.
2068 : */
2069 2535038 : partitionLock = LockHashPartitionLock(locallock->hashcode);
2070 :
2071 2535038 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2072 :
2073 : /*
2074 : * Normally, we don't need to re-find the lock or proclock, since we kept
2075 : * their addresses in the locallock table, and they couldn't have been
2076 : * removed while we were holding a lock on them. But it's possible that
2077 : * the lock was taken fast-path and has since been moved to the main hash
2078 : * table by another backend, in which case we will need to look up the
2079 : * objects here. We assume the lock field is NULL if so.
2080 : */
2081 2535038 : lock = locallock->lock;
2082 2535038 : if (!lock)
2083 : {
2084 : PROCLOCKTAG proclocktag;
2085 :
2086 : Assert(EligibleForRelationFastPath(locktag, lockmode));
2087 6 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2088 : locktag,
2089 : locallock->hashcode,
2090 : HASH_FIND,
2091 : NULL);
2092 6 : if (!lock)
2093 0 : elog(ERROR, "failed to re-find shared lock object");
2094 6 : locallock->lock = lock;
2095 :
2096 6 : proclocktag.myLock = lock;
2097 6 : proclocktag.myProc = MyProc;
2098 6 : locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
2099 : &proclocktag,
2100 : HASH_FIND,
2101 : NULL);
2102 6 : if (!locallock->proclock)
2103 0 : elog(ERROR, "failed to re-find shared proclock object");
2104 : }
2105 : LOCK_PRINT("LockRelease: found", lock, lockmode);
2106 2535038 : proclock = locallock->proclock;
2107 : PROCLOCK_PRINT("LockRelease: found", proclock);
2108 :
2109 : /*
2110 : * Double-check that we are actually holding a lock of the type we want to
2111 : * release.
2112 : */
2113 2535038 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2114 : {
2115 : PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2116 0 : LWLockRelease(partitionLock);
2117 0 : elog(WARNING, "you don't own a lock of type %s",
2118 : lockMethodTable->lockModeNames[lockmode]);
2119 0 : RemoveLocalLock(locallock);
2120 0 : return false;
2121 : }
2122 :
2123 : /*
2124 : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2125 : */
2126 2535038 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2127 :
2128 2535038 : CleanUpLock(lock, proclock,
2129 : lockMethodTable, locallock->hashcode,
2130 : wakeupNeeded);
2131 :
2132 2535038 : LWLockRelease(partitionLock);
2133 :
2134 2535038 : RemoveLocalLock(locallock);
2135 2535038 : return true;
2136 : }
2137 :
2138 : /*
2139 : * LockReleaseAll -- Release all locks of the specified lock method that
2140 : * are held by the current process.
2141 : *
2142 : * Well, not necessarily *all* locks. The available behaviors are:
2143 : * allLocks == true: release all locks including session locks.
2144 : * allLocks == false: release all non-session locks.
2145 : */
2146 : void
2147 1156772 : LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2148 : {
2149 : HASH_SEQ_STATUS status;
2150 : LockMethod lockMethodTable;
2151 : int i,
2152 : numLockModes;
2153 : LOCALLOCK *locallock;
2154 : LOCK *lock;
2155 : int partition;
2156 1156772 : bool have_fast_path_lwlock = false;
2157 :
2158 1156772 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2159 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2160 1156772 : lockMethodTable = LockMethods[lockmethodid];
2161 :
2162 : #ifdef LOCK_DEBUG
2163 : if (*(lockMethodTable->trace_flag))
2164 : elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2165 : #endif
2166 :
2167 : /*
2168 : * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2169 : * the only way that the lock we hold on our own VXID can ever get
2170 : * released: it is always and only released when a toplevel transaction
2171 : * ends.
2172 : */
2173 1156772 : if (lockmethodid == DEFAULT_LOCKMETHOD)
2174 565944 : VirtualXactLockTableCleanup();
2175 :
2176 1156772 : numLockModes = lockMethodTable->numLockModes;
2177 :
2178 : /*
2179 : * First we run through the locallock table and get rid of unwanted
2180 : * entries, then we scan the process's proclocks and get rid of those. We
2181 : * do this separately because we may have multiple locallock entries
2182 : * pointing to the same proclock, and we daren't end up with any dangling
2183 : * pointers. Fast-path locks are cleaned up during the locallock table
2184 : * scan, though.
2185 : */
2186 1156772 : hash_seq_init(&status, LockMethodLocalHash);
2187 :
2188 2933430 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2189 : {
2190 : /*
2191 : * If the LOCALLOCK entry is unused, we must've run out of shared
2192 : * memory while trying to set up this lock. Just forget the local
2193 : * entry.
2194 : */
2195 1776658 : if (locallock->nLocks == 0)
2196 : {
2197 90 : RemoveLocalLock(locallock);
2198 90 : continue;
2199 : }
2200 :
2201 : /* Ignore items that are not of the lockmethod to be removed */
2202 1776568 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2203 44894 : continue;
2204 :
2205 : /*
2206 : * If we are asked to release all locks, we can just zap the entry.
2207 : * Otherwise, must scan to see if there are session locks. We assume
2208 : * there is at most one lockOwners entry for session locks.
2209 : */
2210 1731674 : if (!allLocks)
2211 : {
2212 1598090 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2213 :
2214 : /* If session lock is above array position 0, move it down to 0 */
2215 3223450 : for (i = 0; i < locallock->numLockOwners; i++)
2216 : {
2217 1625360 : if (lockOwners[i].owner == NULL)
2218 44728 : lockOwners[0] = lockOwners[i];
2219 : else
2220 1580632 : ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2221 : }
2222 :
2223 1598090 : if (locallock->numLockOwners > 0 &&
2224 1598090 : lockOwners[0].owner == NULL &&
2225 44728 : lockOwners[0].nLocks > 0)
2226 : {
2227 : /* Fix the locallock to show just the session locks */
2228 44728 : locallock->nLocks = lockOwners[0].nLocks;
2229 44728 : locallock->numLockOwners = 1;
2230 : /* We aren't deleting this locallock, so done */
2231 44728 : continue;
2232 : }
2233 : else
2234 1553362 : locallock->numLockOwners = 0;
2235 : }
2236 :
2237 : /*
2238 : * If the lock or proclock pointers are NULL, this lock was taken via
2239 : * the relation fast-path (and is not known to have been transferred).
2240 : */
2241 1686946 : if (locallock->proclock == NULL || locallock->lock == NULL)
2242 : {
2243 815628 : LOCKMODE lockmode = locallock->tag.mode;
2244 : Oid relid;
2245 :
2246 : /* Verify that a fast-path lock is what we've got. */
2247 815628 : if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2248 0 : elog(PANIC, "locallock table corrupted");
2249 :
2250 : /*
2251 : * If we don't currently hold the LWLock that protects our
2252 : * fast-path data structures, we must acquire it before attempting
2253 : * to release the lock via the fast-path. We will continue to
2254 : * hold the LWLock until we're done scanning the locallock table,
2255 : * unless we hit a transferred fast-path lock. (XXX is this
2256 : * really such a good idea? There could be a lot of entries ...)
2257 : */
2258 815628 : if (!have_fast_path_lwlock)
2259 : {
2260 249474 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2261 249474 : have_fast_path_lwlock = true;
2262 : }
2263 :
2264 : /* Attempt fast-path release. */
2265 815628 : relid = locallock->tag.lock.locktag_field2;
2266 815628 : if (FastPathUnGrantRelationLock(relid, lockmode))
2267 : {
2268 813644 : RemoveLocalLock(locallock);
2269 813644 : continue;
2270 : }
2271 :
2272 : /*
2273 : * Our lock, originally taken via the fast path, has been
2274 : * transferred to the main lock table. That's going to require
2275 : * some extra work, so release our fast-path lock before starting.
2276 : */
2277 1984 : LWLockRelease(&MyProc->fpInfoLock);
2278 1984 : have_fast_path_lwlock = false;
2279 :
2280 : /*
2281 : * Now dump the lock. We haven't got a pointer to the LOCK or
2282 : * PROCLOCK in this case, so we have to handle this a bit
2283 : * differently than a normal lock release. Unfortunately, this
2284 : * requires an extra LWLock acquire-and-release cycle on the
2285 : * partitionLock, but hopefully it shouldn't happen often.
2286 : */
2287 1984 : LockRefindAndRelease(lockMethodTable, MyProc,
2288 : &locallock->tag.lock, lockmode, false);
2289 1984 : RemoveLocalLock(locallock);
2290 1984 : continue;
2291 : }
2292 :
2293 : /* Mark the proclock to show we need to release this lockmode */
2294 871318 : if (locallock->nLocks > 0)
2295 871318 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2296 :
2297 : /* And remove the locallock hashtable entry */
2298 871318 : RemoveLocalLock(locallock);
2299 : }
2300 :
2301 : /* Done with the fast-path data structures */
2302 1156772 : if (have_fast_path_lwlock)
2303 247490 : LWLockRelease(&MyProc->fpInfoLock);
2304 :
2305 : /*
2306 : * Now, scan each lock partition separately.
2307 : */
2308 19665124 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2309 : {
2310 : LWLock *partitionLock;
2311 18508352 : dlist_head *procLocks = &MyProc->myProcLocks[partition];
2312 : dlist_mutable_iter proclock_iter;
2313 :
2314 18508352 : partitionLock = LockHashPartitionLockByIndex(partition);
2315 :
2316 : /*
2317 : * If the proclock list for this partition is empty, we can skip
2318 : * acquiring the partition lock. This optimization is trickier than
2319 : * it looks, because another backend could be in process of adding
2320 : * something to our proclock list due to promoting one of our
2321 : * fast-path locks. However, any such lock must be one that we
2322 : * decided not to delete above, so it's okay to skip it again now;
2323 : * we'd just decide not to delete it again. We must, however, be
2324 : * careful to re-fetch the list header once we've acquired the
2325 : * partition lock, to be sure we have a valid, up-to-date pointer.
2326 : * (There is probably no significant risk if pointer fetch/store is
2327 : * atomic, but we don't wish to assume that.)
2328 : *
2329 : * XXX This argument assumes that the locallock table correctly
2330 : * represents all of our fast-path locks. While allLocks mode
2331 : * guarantees to clean up all of our normal locks regardless of the
2332 : * locallock situation, we lose that guarantee for fast-path locks.
2333 : * This is not ideal.
2334 : */
2335 18508352 : if (dlist_is_empty(procLocks))
2336 17780196 : continue; /* needn't examine this partition */
2337 :
2338 728156 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2339 :
2340 1644298 : dlist_foreach_modify(proclock_iter, procLocks)
2341 : {
2342 916142 : PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2343 916142 : bool wakeupNeeded = false;
2344 :
2345 : Assert(proclock->tag.myProc == MyProc);
2346 :
2347 916142 : lock = proclock->tag.myLock;
2348 :
2349 : /* Ignore items that are not of the lockmethod to be removed */
2350 916142 : if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2351 44888 : continue;
2352 :
2353 : /*
2354 : * In allLocks mode, force release of all locks even if locallock
2355 : * table had problems
2356 : */
2357 871254 : if (allLocks)
2358 86964 : proclock->releaseMask = proclock->holdMask;
2359 : else
2360 : Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2361 :
2362 : /*
2363 : * Ignore items that have nothing to be released, unless they have
2364 : * holdMask == 0 and are therefore recyclable
2365 : */
2366 871254 : if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2367 43408 : continue;
2368 :
2369 : PROCLOCK_PRINT("LockReleaseAll", proclock);
2370 : LOCK_PRINT("LockReleaseAll", lock, 0);
2371 : Assert(lock->nRequested >= 0);
2372 : Assert(lock->nGranted >= 0);
2373 : Assert(lock->nGranted <= lock->nRequested);
2374 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
2375 :
2376 : /*
2377 : * Release the previously-marked lock modes
2378 : */
2379 7450614 : for (i = 1; i <= numLockModes; i++)
2380 : {
2381 6622768 : if (proclock->releaseMask & LOCKBIT_ON(i))
2382 871318 : wakeupNeeded |= UnGrantLock(lock, i, proclock,
2383 : lockMethodTable);
2384 : }
2385 : Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2386 : Assert(lock->nGranted <= lock->nRequested);
2387 : LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2388 :
2389 827846 : proclock->releaseMask = 0;
2390 :
2391 : /* CleanUpLock will wake up waiters if needed. */
2392 827846 : CleanUpLock(lock, proclock,
2393 : lockMethodTable,
2394 827846 : LockTagHashCode(&lock->tag),
2395 : wakeupNeeded);
2396 : } /* loop over PROCLOCKs within this partition */
2397 :
2398 728156 : LWLockRelease(partitionLock);
2399 : } /* loop over partitions */
2400 :
2401 : #ifdef LOCK_DEBUG
2402 : if (*(lockMethodTable->trace_flag))
2403 : elog(LOG, "LockReleaseAll done");
2404 : #endif
2405 1156772 : }
2406 :
2407 : /*
2408 : * LockReleaseSession -- Release all session locks of the specified lock method
2409 : * that are held by the current process.
2410 : */
2411 : void
2412 238 : LockReleaseSession(LOCKMETHODID lockmethodid)
2413 : {
2414 : HASH_SEQ_STATUS status;
2415 : LOCALLOCK *locallock;
2416 :
2417 238 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2418 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2419 :
2420 238 : hash_seq_init(&status, LockMethodLocalHash);
2421 :
2422 452 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2423 : {
2424 : /* Ignore items that are not of the specified lock method */
2425 214 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2426 20 : continue;
2427 :
2428 194 : ReleaseLockIfHeld(locallock, true);
2429 : }
2430 238 : }
2431 :
2432 : /*
2433 : * LockReleaseCurrentOwner
2434 : * Release all locks belonging to CurrentResourceOwner
2435 : *
2436 : * If the caller knows what those locks are, it can pass them as an array.
2437 : * That speeds up the call significantly, when a lot of locks are held.
2438 : * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2439 : * table to find them.
2440 : */
2441 : void
2442 9364 : LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2443 : {
2444 9364 : if (locallocks == NULL)
2445 : {
2446 : HASH_SEQ_STATUS status;
2447 : LOCALLOCK *locallock;
2448 :
2449 8 : hash_seq_init(&status, LockMethodLocalHash);
2450 :
2451 538 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2452 530 : ReleaseLockIfHeld(locallock, false);
2453 : }
2454 : else
2455 : {
2456 : int i;
2457 :
2458 14354 : for (i = nlocks - 1; i >= 0; i--)
2459 4998 : ReleaseLockIfHeld(locallocks[i], false);
2460 : }
2461 9364 : }
2462 :
2463 : /*
2464 : * ReleaseLockIfHeld
2465 : * Release any session-level locks on this lockable object if sessionLock
2466 : * is true; else, release any locks held by CurrentResourceOwner.
2467 : *
2468 : * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2469 : * locks), but without refactoring LockRelease() we cannot support releasing
2470 : * locks belonging to resource owners other than CurrentResourceOwner.
2471 : * If we were to refactor, it'd be a good idea to fix it so we don't have to
2472 : * do a hashtable lookup of the locallock, too. However, currently this
2473 : * function isn't used heavily enough to justify refactoring for its
2474 : * convenience.
2475 : */
2476 : static void
2477 5722 : ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
2478 : {
2479 : ResourceOwner owner;
2480 : LOCALLOCKOWNER *lockOwners;
2481 : int i;
2482 :
2483 : /* Identify owner for lock (must match LockRelease!) */
2484 5722 : if (sessionLock)
2485 194 : owner = NULL;
2486 : else
2487 5528 : owner = CurrentResourceOwner;
2488 :
2489 : /* Scan to see if there are any locks belonging to the target owner */
2490 5722 : lockOwners = locallock->lockOwners;
2491 6108 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2492 : {
2493 5722 : if (lockOwners[i].owner == owner)
2494 : {
2495 : Assert(lockOwners[i].nLocks > 0);
2496 5336 : if (lockOwners[i].nLocks < locallock->nLocks)
2497 : {
2498 : /*
2499 : * We will still hold this lock after forgetting this
2500 : * ResourceOwner.
2501 : */
2502 1344 : locallock->nLocks -= lockOwners[i].nLocks;
2503 : /* compact out unused slot */
2504 1344 : locallock->numLockOwners--;
2505 1344 : if (owner != NULL)
2506 1344 : ResourceOwnerForgetLock(owner, locallock);
2507 1344 : if (i < locallock->numLockOwners)
2508 0 : lockOwners[i] = lockOwners[locallock->numLockOwners];
2509 : }
2510 : else
2511 : {
2512 : Assert(lockOwners[i].nLocks == locallock->nLocks);
2513 : /* We want to call LockRelease just once */
2514 3992 : lockOwners[i].nLocks = 1;
2515 3992 : locallock->nLocks = 1;
2516 3992 : if (!LockRelease(&locallock->tag.lock,
2517 : locallock->tag.mode,
2518 : sessionLock))
2519 0 : elog(WARNING, "ReleaseLockIfHeld: failed??");
2520 : }
2521 5336 : break;
2522 : }
2523 : }
2524 5722 : }
2525 :
2526 : /*
2527 : * LockReassignCurrentOwner
2528 : * Reassign all locks belonging to CurrentResourceOwner to belong
2529 : * to its parent resource owner.
2530 : *
2531 : * If the caller knows what those locks are, it can pass them as an array.
2532 : * That speeds up the call significantly, when a lot of locks are held
2533 : * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2534 : * and we'll traverse through our hash table to find them.
2535 : */
2536 : void
2537 610462 : LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2538 : {
2539 610462 : ResourceOwner parent = ResourceOwnerGetParent(CurrentResourceOwner);
2540 :
2541 : Assert(parent != NULL);
2542 :
2543 610462 : if (locallocks == NULL)
2544 : {
2545 : HASH_SEQ_STATUS status;
2546 : LOCALLOCK *locallock;
2547 :
2548 5794 : hash_seq_init(&status, LockMethodLocalHash);
2549 :
2550 153602 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2551 147808 : LockReassignOwner(locallock, parent);
2552 : }
2553 : else
2554 : {
2555 : int i;
2556 :
2557 1260502 : for (i = nlocks - 1; i >= 0; i--)
2558 655834 : LockReassignOwner(locallocks[i], parent);
2559 : }
2560 610462 : }
2561 :
2562 : /*
2563 : * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2564 : * CurrentResourceOwner to its parent.
2565 : */
2566 : static void
2567 803642 : LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
2568 : {
2569 : LOCALLOCKOWNER *lockOwners;
2570 : int i;
2571 803642 : int ic = -1;
2572 803642 : int ip = -1;
2573 :
2574 : /*
2575 : * Scan to see if there are any locks belonging to current owner or its
2576 : * parent
2577 : */
2578 803642 : lockOwners = locallock->lockOwners;
2579 1888636 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2580 : {
2581 1084994 : if (lockOwners[i].owner == CurrentResourceOwner)
2582 785328 : ic = i;
2583 299666 : else if (lockOwners[i].owner == parent)
2584 216100 : ip = i;
2585 : }
2586 :
2587 803642 : if (ic < 0)
2588 18314 : return; /* no current locks */
2589 :
2590 785328 : if (ip < 0)
2591 : {
2592 : /* Parent has no slot, so just give it the child's slot */
2593 587482 : lockOwners[ic].owner = parent;
2594 587482 : ResourceOwnerRememberLock(parent, locallock);
2595 : }
2596 : else
2597 : {
2598 : /* Merge child's count with parent's */
2599 197846 : lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2600 : /* compact out unused slot */
2601 197846 : locallock->numLockOwners--;
2602 197846 : if (ic < locallock->numLockOwners)
2603 1390 : lockOwners[ic] = lockOwners[locallock->numLockOwners];
2604 : }
2605 785328 : ResourceOwnerForgetLock(CurrentResourceOwner, locallock);
2606 : }
2607 :
2608 : /*
2609 : * FastPathGrantRelationLock
2610 : * Grant lock using per-backend fast-path array, if there is space.
2611 : */
2612 : static bool
2613 22871876 : FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
2614 : {
2615 : uint32 f;
2616 22871876 : uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
2617 :
2618 : /* Scan for existing entry for this relid, remembering empty slot. */
2619 387486424 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2620 : {
2621 365185598 : if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2622 318465624 : unused_slot = f;
2623 46719974 : else if (MyProc->fpRelId[f] == relid)
2624 : {
2625 : Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2626 571050 : FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2627 571050 : return true;
2628 : }
2629 : }
2630 :
2631 : /* If no existing entry, use any empty slot. */
2632 22300826 : if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
2633 : {
2634 22300826 : MyProc->fpRelId[unused_slot] = relid;
2635 22300826 : FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2636 22300826 : ++FastPathLocalUseCount;
2637 22300826 : return true;
2638 : }
2639 :
2640 : /* No existing entry, and no empty slot. */
2641 0 : return false;
2642 : }
2643 :
2644 : /*
2645 : * FastPathUnGrantRelationLock
2646 : * Release fast-path lock, if present. Update backend-private local
2647 : * use count, while we're at it.
2648 : */
2649 : static bool
2650 23530848 : FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
2651 : {
2652 : uint32 f;
2653 23530848 : bool result = false;
2654 :
2655 23530848 : FastPathLocalUseCount = 0;
2656 400024416 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2657 : {
2658 376493568 : if (MyProc->fpRelId[f] == relid
2659 27869682 : && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2660 : {
2661 : Assert(!result);
2662 22869114 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2663 22869114 : result = true;
2664 : /* we continue iterating so as to update FastPathLocalUseCount */
2665 : }
2666 376493568 : if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2667 55272692 : ++FastPathLocalUseCount;
2668 : }
2669 23530848 : return result;
2670 : }
2671 :
2672 : /*
2673 : * FastPathTransferRelationLocks
2674 : * Transfer locks matching the given lock tag from per-backend fast-path
2675 : * arrays to the shared hash table.
2676 : *
2677 : * Returns true if successful, false if ran out of shared memory.
2678 : */
2679 : static bool
2680 307402 : FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
2681 : uint32 hashcode)
2682 : {
2683 307402 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
2684 307402 : Oid relid = locktag->locktag_field2;
2685 : uint32 i;
2686 :
2687 : /*
2688 : * Every PGPROC that can potentially hold a fast-path lock is present in
2689 : * ProcGlobal->allProcs. Prepared transactions are not, but any
2690 : * outstanding fast-path locks held by prepared transactions are
2691 : * transferred to the main lock table.
2692 : */
2693 31680032 : for (i = 0; i < ProcGlobal->allProcCount; i++)
2694 : {
2695 31372630 : PGPROC *proc = &ProcGlobal->allProcs[i];
2696 : uint32 f;
2697 :
2698 31372630 : LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
2699 :
2700 : /*
2701 : * If the target backend isn't referencing the same database as the
2702 : * lock, then we needn't examine the individual relation IDs at all;
2703 : * none of them can be relevant.
2704 : *
2705 : * proc->databaseId is set at backend startup time and never changes
2706 : * thereafter, so it might be safe to perform this test before
2707 : * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2708 : * assume that if the target backend holds any fast-path locks, it
2709 : * must have performed a memory-fencing operation (in particular, an
2710 : * LWLock acquisition) since setting proc->databaseId. However, it's
2711 : * less clear that our backend is certain to have performed a memory
2712 : * fencing operation since the other backend set proc->databaseId. So
2713 : * for now, we test it after acquiring the LWLock just to be safe.
2714 : */
2715 31372630 : if (proc->databaseId != locktag->locktag_field1)
2716 : {
2717 15311678 : LWLockRelease(&proc->fpInfoLock);
2718 15311678 : continue;
2719 : }
2720 :
2721 273033788 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2722 : {
2723 : uint32 lockmode;
2724 :
2725 : /* Look for an allocated slot matching the given relid. */
2726 256974730 : if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2727 256972836 : continue;
2728 :
2729 : /* Find or create lock object. */
2730 1894 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2731 7576 : for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2732 : lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
2733 5682 : ++lockmode)
2734 : {
2735 : PROCLOCK *proclock;
2736 :
2737 5682 : if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2738 3676 : continue;
2739 2006 : proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2740 : hashcode, lockmode);
2741 2006 : if (!proclock)
2742 : {
2743 0 : LWLockRelease(partitionLock);
2744 0 : LWLockRelease(&proc->fpInfoLock);
2745 0 : return false;
2746 : }
2747 2006 : GrantLock(proclock->tag.myLock, proclock, lockmode);
2748 2006 : FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2749 : }
2750 1894 : LWLockRelease(partitionLock);
2751 :
2752 : /* No need to examine remaining slots. */
2753 1894 : break;
2754 : }
2755 16060952 : LWLockRelease(&proc->fpInfoLock);
2756 : }
2757 307402 : return true;
2758 : }
2759 :
2760 : /*
2761 : * FastPathGetRelationLockEntry
2762 : * Return the PROCLOCK for a lock originally taken via the fast-path,
2763 : * transferring it to the primary lock table if necessary.
2764 : *
2765 : * Note: caller takes care of updating the locallock object.
2766 : */
2767 : static PROCLOCK *
2768 772 : FastPathGetRelationLockEntry(LOCALLOCK *locallock)
2769 : {
2770 772 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2771 772 : LOCKTAG *locktag = &locallock->tag.lock;
2772 772 : PROCLOCK *proclock = NULL;
2773 772 : LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2774 772 : Oid relid = locktag->locktag_field2;
2775 : uint32 f;
2776 :
2777 772 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2778 :
2779 12328 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2780 : {
2781 : uint32 lockmode;
2782 :
2783 : /* Look for an allocated slot matching the given relid. */
2784 12312 : if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2785 11556 : continue;
2786 :
2787 : /* If we don't have a lock of the given mode, forget it! */
2788 756 : lockmode = locallock->tag.mode;
2789 756 : if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2790 0 : break;
2791 :
2792 : /* Find or create lock object. */
2793 756 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2794 :
2795 756 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2796 : locallock->hashcode, lockmode);
2797 756 : if (!proclock)
2798 : {
2799 0 : LWLockRelease(partitionLock);
2800 0 : LWLockRelease(&MyProc->fpInfoLock);
2801 0 : ereport(ERROR,
2802 : (errcode(ERRCODE_OUT_OF_MEMORY),
2803 : errmsg("out of shared memory"),
2804 : errhint("You might need to increase %s.", "max_locks_per_transaction")));
2805 : }
2806 756 : GrantLock(proclock->tag.myLock, proclock, lockmode);
2807 756 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2808 :
2809 756 : LWLockRelease(partitionLock);
2810 :
2811 : /* No need to examine remaining slots. */
2812 756 : break;
2813 : }
2814 :
2815 772 : LWLockRelease(&MyProc->fpInfoLock);
2816 :
2817 : /* Lock may have already been transferred by some other backend. */
2818 772 : if (proclock == NULL)
2819 : {
2820 : LOCK *lock;
2821 : PROCLOCKTAG proclocktag;
2822 : uint32 proclock_hashcode;
2823 :
2824 16 : LWLockAcquire(partitionLock, LW_SHARED);
2825 :
2826 16 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2827 : locktag,
2828 : locallock->hashcode,
2829 : HASH_FIND,
2830 : NULL);
2831 16 : if (!lock)
2832 0 : elog(ERROR, "failed to re-find shared lock object");
2833 :
2834 16 : proclocktag.myLock = lock;
2835 16 : proclocktag.myProc = MyProc;
2836 :
2837 16 : proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
2838 : proclock = (PROCLOCK *)
2839 16 : hash_search_with_hash_value(LockMethodProcLockHash,
2840 : &proclocktag,
2841 : proclock_hashcode,
2842 : HASH_FIND,
2843 : NULL);
2844 16 : if (!proclock)
2845 0 : elog(ERROR, "failed to re-find shared proclock object");
2846 16 : LWLockRelease(partitionLock);
2847 : }
2848 :
2849 772 : return proclock;
2850 : }
2851 :
2852 : /*
2853 : * GetLockConflicts
2854 : * Get an array of VirtualTransactionIds of xacts currently holding locks
2855 : * that would conflict with the specified lock/lockmode.
2856 : * xacts merely awaiting such a lock are NOT reported.
2857 : *
2858 : * The result array is palloc'd and is terminated with an invalid VXID.
2859 : * *countp, if not null, is updated to the number of items set.
2860 : *
2861 : * Of course, the result could be out of date by the time it's returned, so
2862 : * use of this function has to be thought about carefully. Similarly, a
2863 : * PGPROC with no "lxid" will be considered non-conflicting regardless of any
2864 : * lock it holds. Existing callers don't care about a locker after that
2865 : * locker's pg_xact updates complete. CommitTransaction() clears "lxid" after
2866 : * pg_xact updates and before releasing locks.
2867 : *
2868 : * Note we never include the current xact's vxid in the result array,
2869 : * since an xact never blocks itself.
2870 : */
2871 : VirtualTransactionId *
2872 2544 : GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
2873 : {
2874 : static VirtualTransactionId *vxids;
2875 2544 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2876 : LockMethod lockMethodTable;
2877 : LOCK *lock;
2878 : LOCKMASK conflictMask;
2879 : dlist_iter proclock_iter;
2880 : PROCLOCK *proclock;
2881 : uint32 hashcode;
2882 : LWLock *partitionLock;
2883 2544 : int count = 0;
2884 2544 : int fast_count = 0;
2885 :
2886 2544 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2887 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2888 2544 : lockMethodTable = LockMethods[lockmethodid];
2889 2544 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2890 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
2891 :
2892 : /*
2893 : * Allocate memory to store results, and fill with InvalidVXID. We only
2894 : * need enough space for MaxBackends + max_prepared_xacts + a terminator.
2895 : * InHotStandby allocate once in TopMemoryContext.
2896 : */
2897 2544 : if (InHotStandby)
2898 : {
2899 8 : if (vxids == NULL)
2900 2 : vxids = (VirtualTransactionId *)
2901 2 : MemoryContextAlloc(TopMemoryContext,
2902 : sizeof(VirtualTransactionId) *
2903 2 : (MaxBackends + max_prepared_xacts + 1));
2904 : }
2905 : else
2906 2536 : vxids = (VirtualTransactionId *)
2907 2536 : palloc0(sizeof(VirtualTransactionId) *
2908 2536 : (MaxBackends + max_prepared_xacts + 1));
2909 :
2910 : /* Compute hash code and partition lock, and look up conflicting modes. */
2911 2544 : hashcode = LockTagHashCode(locktag);
2912 2544 : partitionLock = LockHashPartitionLock(hashcode);
2913 2544 : conflictMask = lockMethodTable->conflictTab[lockmode];
2914 :
2915 : /*
2916 : * Fast path locks might not have been entered in the primary lock table.
2917 : * If the lock we're dealing with could conflict with such a lock, we must
2918 : * examine each backend's fast-path array for conflicts.
2919 : */
2920 2544 : if (ConflictsWithRelationFastPath(locktag, lockmode))
2921 : {
2922 : int i;
2923 2544 : Oid relid = locktag->locktag_field2;
2924 : VirtualTransactionId vxid;
2925 :
2926 : /*
2927 : * Iterate over relevant PGPROCs. Anything held by a prepared
2928 : * transaction will have been transferred to the primary lock table,
2929 : * so we need not worry about those. This is all a bit fuzzy, because
2930 : * new locks could be taken after we've visited a particular
2931 : * partition, but the callers had better be prepared to deal with that
2932 : * anyway, since the locks could equally well be taken between the
2933 : * time we return the value and the time the caller does something
2934 : * with it.
2935 : */
2936 287556 : for (i = 0; i < ProcGlobal->allProcCount; i++)
2937 : {
2938 285012 : PGPROC *proc = &ProcGlobal->allProcs[i];
2939 : uint32 f;
2940 :
2941 : /* A backend never blocks itself */
2942 285012 : if (proc == MyProc)
2943 2544 : continue;
2944 :
2945 282468 : LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
2946 :
2947 : /*
2948 : * If the target backend isn't referencing the same database as
2949 : * the lock, then we needn't examine the individual relation IDs
2950 : * at all; none of them can be relevant.
2951 : *
2952 : * See FastPathTransferRelationLocks() for discussion of why we do
2953 : * this test after acquiring the lock.
2954 : */
2955 282468 : if (proc->databaseId != locktag->locktag_field1)
2956 : {
2957 123060 : LWLockRelease(&proc->fpInfoLock);
2958 123060 : continue;
2959 : }
2960 :
2961 2709390 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2962 : {
2963 : uint32 lockmask;
2964 :
2965 : /* Look for an allocated slot matching the given relid. */
2966 2550432 : if (relid != proc->fpRelId[f])
2967 2548122 : continue;
2968 2310 : lockmask = FAST_PATH_GET_BITS(proc, f);
2969 2310 : if (!lockmask)
2970 1860 : continue;
2971 450 : lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
2972 :
2973 : /*
2974 : * There can only be one entry per relation, so if we found it
2975 : * and it doesn't conflict, we can skip the rest of the slots.
2976 : */
2977 450 : if ((lockmask & conflictMask) == 0)
2978 10 : break;
2979 :
2980 : /* Conflict! */
2981 440 : GET_VXID_FROM_PGPROC(vxid, *proc);
2982 :
2983 440 : if (VirtualTransactionIdIsValid(vxid))
2984 440 : vxids[count++] = vxid;
2985 : /* else, xact already committed or aborted */
2986 :
2987 : /* No need to examine remaining slots. */
2988 440 : break;
2989 : }
2990 :
2991 159408 : LWLockRelease(&proc->fpInfoLock);
2992 : }
2993 : }
2994 :
2995 : /* Remember how many fast-path conflicts we found. */
2996 2544 : fast_count = count;
2997 :
2998 : /*
2999 : * Look up the lock object matching the tag.
3000 : */
3001 2544 : LWLockAcquire(partitionLock, LW_SHARED);
3002 :
3003 2544 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3004 : locktag,
3005 : hashcode,
3006 : HASH_FIND,
3007 : NULL);
3008 2544 : if (!lock)
3009 : {
3010 : /*
3011 : * If the lock object doesn't exist, there is nothing holding a lock
3012 : * on this lockable object.
3013 : */
3014 140 : LWLockRelease(partitionLock);
3015 140 : vxids[count].procNumber = INVALID_PROC_NUMBER;
3016 140 : vxids[count].localTransactionId = InvalidLocalTransactionId;
3017 140 : if (countp)
3018 0 : *countp = count;
3019 140 : return vxids;
3020 : }
3021 :
3022 : /*
3023 : * Examine each existing holder (or awaiter) of the lock.
3024 : */
3025 4846 : dlist_foreach(proclock_iter, &lock->procLocks)
3026 : {
3027 2442 : proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3028 :
3029 2442 : if (conflictMask & proclock->holdMask)
3030 : {
3031 2434 : PGPROC *proc = proclock->tag.myProc;
3032 :
3033 : /* A backend never blocks itself */
3034 2434 : if (proc != MyProc)
3035 : {
3036 : VirtualTransactionId vxid;
3037 :
3038 38 : GET_VXID_FROM_PGPROC(vxid, *proc);
3039 :
3040 38 : if (VirtualTransactionIdIsValid(vxid))
3041 : {
3042 : int i;
3043 :
3044 : /* Avoid duplicate entries. */
3045 66 : for (i = 0; i < fast_count; ++i)
3046 28 : if (VirtualTransactionIdEquals(vxids[i], vxid))
3047 0 : break;
3048 38 : if (i >= fast_count)
3049 38 : vxids[count++] = vxid;
3050 : }
3051 : /* else, xact already committed or aborted */
3052 : }
3053 : }
3054 : }
3055 :
3056 2404 : LWLockRelease(partitionLock);
3057 :
3058 2404 : if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3059 0 : elog(PANIC, "too many conflicting locks found");
3060 :
3061 2404 : vxids[count].procNumber = INVALID_PROC_NUMBER;
3062 2404 : vxids[count].localTransactionId = InvalidLocalTransactionId;
3063 2404 : if (countp)
3064 2398 : *countp = count;
3065 2404 : return vxids;
3066 : }
3067 :
3068 : /*
3069 : * Find a lock in the shared lock table and release it. It is the caller's
3070 : * responsibility to verify that this is a sane thing to do. (For example, it
3071 : * would be bad to release a lock here if there might still be a LOCALLOCK
3072 : * object with pointers to it.)
3073 : *
3074 : * We currently use this in two situations: first, to release locks held by
3075 : * prepared transactions on commit (see lock_twophase_postcommit); and second,
3076 : * to release locks taken via the fast-path, transferred to the main hash
3077 : * table, and then released (see LockReleaseAll).
3078 : */
3079 : static void
3080 4298 : LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
3081 : LOCKTAG *locktag, LOCKMODE lockmode,
3082 : bool decrement_strong_lock_count)
3083 : {
3084 : LOCK *lock;
3085 : PROCLOCK *proclock;
3086 : PROCLOCKTAG proclocktag;
3087 : uint32 hashcode;
3088 : uint32 proclock_hashcode;
3089 : LWLock *partitionLock;
3090 : bool wakeupNeeded;
3091 :
3092 4298 : hashcode = LockTagHashCode(locktag);
3093 4298 : partitionLock = LockHashPartitionLock(hashcode);
3094 :
3095 4298 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3096 :
3097 : /*
3098 : * Re-find the lock object (it had better be there).
3099 : */
3100 4298 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3101 : locktag,
3102 : hashcode,
3103 : HASH_FIND,
3104 : NULL);
3105 4298 : if (!lock)
3106 0 : elog(PANIC, "failed to re-find shared lock object");
3107 :
3108 : /*
3109 : * Re-find the proclock object (ditto).
3110 : */
3111 4298 : proclocktag.myLock = lock;
3112 4298 : proclocktag.myProc = proc;
3113 :
3114 4298 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3115 :
3116 4298 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
3117 : &proclocktag,
3118 : proclock_hashcode,
3119 : HASH_FIND,
3120 : NULL);
3121 4298 : if (!proclock)
3122 0 : elog(PANIC, "failed to re-find shared proclock object");
3123 :
3124 : /*
3125 : * Double-check that we are actually holding a lock of the type we want to
3126 : * release.
3127 : */
3128 4298 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3129 : {
3130 : PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3131 0 : LWLockRelease(partitionLock);
3132 0 : elog(WARNING, "you don't own a lock of type %s",
3133 : lockMethodTable->lockModeNames[lockmode]);
3134 0 : return;
3135 : }
3136 :
3137 : /*
3138 : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3139 : */
3140 4298 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3141 :
3142 4298 : CleanUpLock(lock, proclock,
3143 : lockMethodTable, hashcode,
3144 : wakeupNeeded);
3145 :
3146 4298 : LWLockRelease(partitionLock);
3147 :
3148 : /*
3149 : * Decrement strong lock count. This logic is needed only for 2PC.
3150 : */
3151 4298 : if (decrement_strong_lock_count
3152 1748 : && ConflictsWithRelationFastPath(locktag, lockmode))
3153 : {
3154 128 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3155 :
3156 128 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
3157 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3158 128 : FastPathStrongRelationLocks->count[fasthashcode]--;
3159 128 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
3160 : }
3161 : }
3162 :
3163 : /*
3164 : * CheckForSessionAndXactLocks
3165 : * Check to see if transaction holds both session-level and xact-level
3166 : * locks on the same object; if so, throw an error.
3167 : *
3168 : * If we have both session- and transaction-level locks on the same object,
3169 : * PREPARE TRANSACTION must fail. This should never happen with regular
3170 : * locks, since we only take those at session level in some special operations
3171 : * like VACUUM. It's possible to hit this with advisory locks, though.
3172 : *
3173 : * It would be nice if we could keep the session hold and give away the
3174 : * transactional hold to the prepared xact. However, that would require two
3175 : * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3176 : * available when it comes time for PostPrepare_Locks to do the deed.
3177 : * So for now, we error out while we can still do so safely.
3178 : *
3179 : * Since the LOCALLOCK table stores a separate entry for each lockmode,
3180 : * we can't implement this check by examining LOCALLOCK entries in isolation.
3181 : * We must build a transient hashtable that is indexed by locktag only.
3182 : */
3183 : static void
3184 758 : CheckForSessionAndXactLocks(void)
3185 : {
3186 : typedef struct
3187 : {
3188 : LOCKTAG lock; /* identifies the lockable object */
3189 : bool sessLock; /* is any lockmode held at session level? */
3190 : bool xactLock; /* is any lockmode held at xact level? */
3191 : } PerLockTagEntry;
3192 :
3193 : HASHCTL hash_ctl;
3194 : HTAB *lockhtab;
3195 : HASH_SEQ_STATUS status;
3196 : LOCALLOCK *locallock;
3197 :
3198 : /* Create a local hash table keyed by LOCKTAG only */
3199 758 : hash_ctl.keysize = sizeof(LOCKTAG);
3200 758 : hash_ctl.entrysize = sizeof(PerLockTagEntry);
3201 758 : hash_ctl.hcxt = CurrentMemoryContext;
3202 :
3203 758 : lockhtab = hash_create("CheckForSessionAndXactLocks table",
3204 : 256, /* arbitrary initial size */
3205 : &hash_ctl,
3206 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
3207 :
3208 : /* Scan local lock table to find entries for each LOCKTAG */
3209 758 : hash_seq_init(&status, LockMethodLocalHash);
3210 :
3211 2522 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3212 : {
3213 1768 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3214 : PerLockTagEntry *hentry;
3215 : bool found;
3216 : int i;
3217 :
3218 : /*
3219 : * Ignore VXID locks. We don't want those to be held by prepared
3220 : * transactions, since they aren't meaningful after a restart.
3221 : */
3222 1768 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3223 0 : continue;
3224 :
3225 : /* Ignore it if we don't actually hold the lock */
3226 1768 : if (locallock->nLocks <= 0)
3227 0 : continue;
3228 :
3229 : /* Otherwise, find or make an entry in lockhtab */
3230 1768 : hentry = (PerLockTagEntry *) hash_search(lockhtab,
3231 1768 : &locallock->tag.lock,
3232 : HASH_ENTER, &found);
3233 1768 : if (!found) /* initialize, if newly created */
3234 1676 : hentry->sessLock = hentry->xactLock = false;
3235 :
3236 : /* Scan to see if we hold lock at session or xact level or both */
3237 3536 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3238 : {
3239 1768 : if (lockOwners[i].owner == NULL)
3240 18 : hentry->sessLock = true;
3241 : else
3242 1750 : hentry->xactLock = true;
3243 : }
3244 :
3245 : /*
3246 : * We can throw error immediately when we see both types of locks; no
3247 : * need to wait around to see if there are more violations.
3248 : */
3249 1768 : if (hentry->sessLock && hentry->xactLock)
3250 4 : ereport(ERROR,
3251 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3252 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3253 : }
3254 :
3255 : /* Success, so clean up */
3256 754 : hash_destroy(lockhtab);
3257 754 : }
3258 :
3259 : /*
3260 : * AtPrepare_Locks
3261 : * Do the preparatory work for a PREPARE: make 2PC state file records
3262 : * for all locks currently held.
3263 : *
3264 : * Session-level locks are ignored, as are VXID locks.
3265 : *
3266 : * For the most part, we don't need to touch shared memory for this ---
3267 : * all the necessary state information is in the locallock table.
3268 : * Fast-path locks are an exception, however: we move any such locks to
3269 : * the main table before allowing PREPARE TRANSACTION to succeed.
3270 : */
3271 : void
3272 758 : AtPrepare_Locks(void)
3273 : {
3274 : HASH_SEQ_STATUS status;
3275 : LOCALLOCK *locallock;
3276 :
3277 : /* First, verify there aren't locks of both xact and session level */
3278 758 : CheckForSessionAndXactLocks();
3279 :
3280 : /* Now do the per-locallock cleanup work */
3281 754 : hash_seq_init(&status, LockMethodLocalHash);
3282 :
3283 2510 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3284 : {
3285 : TwoPhaseLockRecord record;
3286 1756 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3287 : bool haveSessionLock;
3288 : bool haveXactLock;
3289 : int i;
3290 :
3291 : /*
3292 : * Ignore VXID locks. We don't want those to be held by prepared
3293 : * transactions, since they aren't meaningful after a restart.
3294 : */
3295 1756 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3296 14 : continue;
3297 :
3298 : /* Ignore it if we don't actually hold the lock */
3299 1756 : if (locallock->nLocks <= 0)
3300 0 : continue;
3301 :
3302 : /* Scan to see whether we hold it at session or transaction level */
3303 1756 : haveSessionLock = haveXactLock = false;
3304 3512 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3305 : {
3306 1756 : if (lockOwners[i].owner == NULL)
3307 14 : haveSessionLock = true;
3308 : else
3309 1742 : haveXactLock = true;
3310 : }
3311 :
3312 : /* Ignore it if we have only session lock */
3313 1756 : if (!haveXactLock)
3314 14 : continue;
3315 :
3316 : /* This can't happen, because we already checked it */
3317 1742 : if (haveSessionLock)
3318 0 : ereport(ERROR,
3319 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3320 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3321 :
3322 : /*
3323 : * If the local lock was taken via the fast-path, we need to move it
3324 : * to the primary lock table, or just get a pointer to the existing
3325 : * primary lock table entry if by chance it's already been
3326 : * transferred.
3327 : */
3328 1742 : if (locallock->proclock == NULL)
3329 : {
3330 772 : locallock->proclock = FastPathGetRelationLockEntry(locallock);
3331 772 : locallock->lock = locallock->proclock->tag.myLock;
3332 : }
3333 :
3334 : /*
3335 : * Arrange to not release any strong lock count held by this lock
3336 : * entry. We must retain the count until the prepared transaction is
3337 : * committed or rolled back.
3338 : */
3339 1742 : locallock->holdsStrongLockCount = false;
3340 :
3341 : /*
3342 : * Create a 2PC record.
3343 : */
3344 1742 : memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3345 1742 : record.lockmode = locallock->tag.mode;
3346 :
3347 1742 : RegisterTwoPhaseRecord(TWOPHASE_RM_LOCK_ID, 0,
3348 : &record, sizeof(TwoPhaseLockRecord));
3349 : }
3350 754 : }
3351 :
3352 : /*
3353 : * PostPrepare_Locks
3354 : * Clean up after successful PREPARE
3355 : *
3356 : * Here, we want to transfer ownership of our locks to a dummy PGPROC
3357 : * that's now associated with the prepared transaction, and we want to
3358 : * clean out the corresponding entries in the LOCALLOCK table.
3359 : *
3360 : * Note: by removing the LOCALLOCK entries, we are leaving dangling
3361 : * pointers in the transaction's resource owner. This is OK at the
3362 : * moment since resowner.c doesn't try to free locks retail at a toplevel
3363 : * transaction commit or abort. We could alternatively zero out nLocks
3364 : * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3365 : * but that probably costs more cycles.
3366 : */
3367 : void
3368 754 : PostPrepare_Locks(TransactionId xid)
3369 : {
3370 754 : PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3371 : HASH_SEQ_STATUS status;
3372 : LOCALLOCK *locallock;
3373 : LOCK *lock;
3374 : PROCLOCK *proclock;
3375 : PROCLOCKTAG proclocktag;
3376 : int partition;
3377 :
3378 : /* Can't prepare a lock group follower. */
3379 : Assert(MyProc->lockGroupLeader == NULL ||
3380 : MyProc->lockGroupLeader == MyProc);
3381 :
3382 : /* This is a critical section: any error means big trouble */
3383 754 : START_CRIT_SECTION();
3384 :
3385 : /*
3386 : * First we run through the locallock table and get rid of unwanted
3387 : * entries, then we scan the process's proclocks and transfer them to the
3388 : * target proc.
3389 : *
3390 : * We do this separately because we may have multiple locallock entries
3391 : * pointing to the same proclock, and we daren't end up with any dangling
3392 : * pointers.
3393 : */
3394 754 : hash_seq_init(&status, LockMethodLocalHash);
3395 :
3396 2510 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3397 : {
3398 1756 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3399 : bool haveSessionLock;
3400 : bool haveXactLock;
3401 : int i;
3402 :
3403 1756 : if (locallock->proclock == NULL || locallock->lock == NULL)
3404 : {
3405 : /*
3406 : * We must've run out of shared memory while trying to set up this
3407 : * lock. Just forget the local entry.
3408 : */
3409 : Assert(locallock->nLocks == 0);
3410 0 : RemoveLocalLock(locallock);
3411 0 : continue;
3412 : }
3413 :
3414 : /* Ignore VXID locks */
3415 1756 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3416 0 : continue;
3417 :
3418 : /* Scan to see whether we hold it at session or transaction level */
3419 1756 : haveSessionLock = haveXactLock = false;
3420 3512 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3421 : {
3422 1756 : if (lockOwners[i].owner == NULL)
3423 14 : haveSessionLock = true;
3424 : else
3425 1742 : haveXactLock = true;
3426 : }
3427 :
3428 : /* Ignore it if we have only session lock */
3429 1756 : if (!haveXactLock)
3430 14 : continue;
3431 :
3432 : /* This can't happen, because we already checked it */
3433 1742 : if (haveSessionLock)
3434 0 : ereport(PANIC,
3435 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3436 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3437 :
3438 : /* Mark the proclock to show we need to release this lockmode */
3439 1742 : if (locallock->nLocks > 0)
3440 1742 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3441 :
3442 : /* And remove the locallock hashtable entry */
3443 1742 : RemoveLocalLock(locallock);
3444 : }
3445 :
3446 : /*
3447 : * Now, scan each lock partition separately.
3448 : */
3449 12818 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3450 : {
3451 : LWLock *partitionLock;
3452 12064 : dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3453 : dlist_mutable_iter proclock_iter;
3454 :
3455 12064 : partitionLock = LockHashPartitionLockByIndex(partition);
3456 :
3457 : /*
3458 : * If the proclock list for this partition is empty, we can skip
3459 : * acquiring the partition lock. This optimization is safer than the
3460 : * situation in LockReleaseAll, because we got rid of any fast-path
3461 : * locks during AtPrepare_Locks, so there cannot be any case where
3462 : * another backend is adding something to our lists now. For safety,
3463 : * though, we code this the same way as in LockReleaseAll.
3464 : */
3465 12064 : if (dlist_is_empty(procLocks))
3466 10382 : continue; /* needn't examine this partition */
3467 :
3468 1682 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3469 :
3470 3442 : dlist_foreach_modify(proclock_iter, procLocks)
3471 : {
3472 1760 : proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3473 :
3474 : Assert(proclock->tag.myProc == MyProc);
3475 :
3476 1760 : lock = proclock->tag.myLock;
3477 :
3478 : /* Ignore VXID locks */
3479 1760 : if (lock->tag.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3480 92 : continue;
3481 :
3482 : PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3483 : LOCK_PRINT("PostPrepare_Locks", lock, 0);
3484 : Assert(lock->nRequested >= 0);
3485 : Assert(lock->nGranted >= 0);
3486 : Assert(lock->nGranted <= lock->nRequested);
3487 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
3488 :
3489 : /* Ignore it if nothing to release (must be a session lock) */
3490 1668 : if (proclock->releaseMask == 0)
3491 14 : continue;
3492 :
3493 : /* Else we should be releasing all locks */
3494 1654 : if (proclock->releaseMask != proclock->holdMask)
3495 0 : elog(PANIC, "we seem to have dropped a bit somewhere");
3496 :
3497 : /*
3498 : * We cannot simply modify proclock->tag.myProc to reassign
3499 : * ownership of the lock, because that's part of the hash key and
3500 : * the proclock would then be in the wrong hash chain. Instead
3501 : * use hash_update_hash_key. (We used to create a new hash entry,
3502 : * but that risks out-of-memory failure if other processes are
3503 : * busy making proclocks too.) We must unlink the proclock from
3504 : * our procLink chain and put it into the new proc's chain, too.
3505 : *
3506 : * Note: the updated proclock hash key will still belong to the
3507 : * same hash partition, cf proclock_hash(). So the partition lock
3508 : * we already hold is sufficient for this.
3509 : */
3510 1654 : dlist_delete(&proclock->procLink);
3511 :
3512 : /*
3513 : * Create the new hash key for the proclock.
3514 : */
3515 1654 : proclocktag.myLock = lock;
3516 1654 : proclocktag.myProc = newproc;
3517 :
3518 : /*
3519 : * Update groupLeader pointer to point to the new proc. (We'd
3520 : * better not be a member of somebody else's lock group!)
3521 : */
3522 : Assert(proclock->groupLeader == proclock->tag.myProc);
3523 1654 : proclock->groupLeader = newproc;
3524 :
3525 : /*
3526 : * Update the proclock. We should not find any existing entry for
3527 : * the same hash key, since there can be only one entry for any
3528 : * given lock with my own proc.
3529 : */
3530 1654 : if (!hash_update_hash_key(LockMethodProcLockHash,
3531 : proclock,
3532 : &proclocktag))
3533 0 : elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3534 :
3535 : /* Re-link into the new proc's proclock list */
3536 1654 : dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3537 :
3538 : PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3539 : } /* loop over PROCLOCKs within this partition */
3540 :
3541 1682 : LWLockRelease(partitionLock);
3542 : } /* loop over partitions */
3543 :
3544 754 : END_CRIT_SECTION();
3545 754 : }
3546 :
3547 :
3548 : /*
3549 : * Estimate shared-memory space used for lock tables
3550 : */
3551 : Size
3552 3298 : LockShmemSize(void)
3553 : {
3554 3298 : Size size = 0;
3555 : long max_table_size;
3556 :
3557 : /* lock hash table */
3558 3298 : max_table_size = NLOCKENTS();
3559 3298 : size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3560 :
3561 : /* proclock hash table */
3562 3298 : max_table_size *= 2;
3563 3298 : size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3564 :
3565 : /*
3566 : * Since NLOCKENTS is only an estimate, add 10% safety margin.
3567 : */
3568 3298 : size = add_size(size, size / 10);
3569 :
3570 3298 : return size;
3571 : }
3572 :
3573 : /*
3574 : * GetLockStatusData - Return a summary of the lock manager's internal
3575 : * status, for use in a user-level reporting function.
3576 : *
3577 : * The return data consists of an array of LockInstanceData objects,
3578 : * which are a lightly abstracted version of the PROCLOCK data structures,
3579 : * i.e. there is one entry for each unique lock and interested PGPROC.
3580 : * It is the caller's responsibility to match up related items (such as
3581 : * references to the same lockable object or PGPROC) if wanted.
3582 : *
3583 : * The design goal is to hold the LWLocks for as short a time as possible;
3584 : * thus, this function simply makes a copy of the necessary data and releases
3585 : * the locks, allowing the caller to contemplate and format the data for as
3586 : * long as it pleases.
3587 : */
3588 : LockData *
3589 422 : GetLockStatusData(void)
3590 : {
3591 : LockData *data;
3592 : PROCLOCK *proclock;
3593 : HASH_SEQ_STATUS seqstat;
3594 : int els;
3595 : int el;
3596 : int i;
3597 :
3598 422 : data = (LockData *) palloc(sizeof(LockData));
3599 :
3600 : /* Guess how much space we'll need. */
3601 422 : els = MaxBackends;
3602 422 : el = 0;
3603 422 : data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3604 :
3605 : /*
3606 : * First, we iterate through the per-backend fast-path arrays, locking
3607 : * them one at a time. This might produce an inconsistent picture of the
3608 : * system state, but taking all of those LWLocks at the same time seems
3609 : * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3610 : * matter too much, because none of these locks can be involved in lock
3611 : * conflicts anyway - anything that might must be present in the main lock
3612 : * table. (For the same reason, we don't sweat about making leaderPid
3613 : * completely valid. We cannot safely dereference another backend's
3614 : * lockGroupLeader field without holding all lock partition locks, and
3615 : * it's not worth that.)
3616 : */
3617 43218 : for (i = 0; i < ProcGlobal->allProcCount; ++i)
3618 : {
3619 42796 : PGPROC *proc = &ProcGlobal->allProcs[i];
3620 : uint32 f;
3621 :
3622 42796 : LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
3623 :
3624 727532 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
3625 : {
3626 : LockInstanceData *instance;
3627 684736 : uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3628 :
3629 : /* Skip unallocated slots. */
3630 684736 : if (!lockbits)
3631 680112 : continue;
3632 :
3633 4624 : if (el >= els)
3634 : {
3635 4 : els += MaxBackends;
3636 4 : data->locks = (LockInstanceData *)
3637 4 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3638 : }
3639 :
3640 4624 : instance = &data->locks[el];
3641 4624 : SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3642 : proc->fpRelId[f]);
3643 4624 : instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3644 4624 : instance->waitLockMode = NoLock;
3645 4624 : instance->vxid.procNumber = proc->vxid.procNumber;
3646 4624 : instance->vxid.localTransactionId = proc->vxid.lxid;
3647 4624 : instance->pid = proc->pid;
3648 4624 : instance->leaderPid = proc->pid;
3649 4624 : instance->fastpath = true;
3650 :
3651 : /*
3652 : * Successfully taking fast path lock means there were no
3653 : * conflicting locks.
3654 : */
3655 4624 : instance->waitStart = 0;
3656 :
3657 4624 : el++;
3658 : }
3659 :
3660 42796 : if (proc->fpVXIDLock)
3661 : {
3662 : VirtualTransactionId vxid;
3663 : LockInstanceData *instance;
3664 :
3665 1366 : if (el >= els)
3666 : {
3667 0 : els += MaxBackends;
3668 0 : data->locks = (LockInstanceData *)
3669 0 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3670 : }
3671 :
3672 1366 : vxid.procNumber = proc->vxid.procNumber;
3673 1366 : vxid.localTransactionId = proc->fpLocalTransactionId;
3674 :
3675 1366 : instance = &data->locks[el];
3676 1366 : SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3677 1366 : instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3678 1366 : instance->waitLockMode = NoLock;
3679 1366 : instance->vxid.procNumber = proc->vxid.procNumber;
3680 1366 : instance->vxid.localTransactionId = proc->vxid.lxid;
3681 1366 : instance->pid = proc->pid;
3682 1366 : instance->leaderPid = proc->pid;
3683 1366 : instance->fastpath = true;
3684 1366 : instance->waitStart = 0;
3685 :
3686 1366 : el++;
3687 : }
3688 :
3689 42796 : LWLockRelease(&proc->fpInfoLock);
3690 : }
3691 :
3692 : /*
3693 : * Next, acquire lock on the entire shared lock data structure. We do
3694 : * this so that, at least for locks in the primary lock table, the state
3695 : * will be self-consistent.
3696 : *
3697 : * Since this is a read-only operation, we take shared instead of
3698 : * exclusive lock. There's not a whole lot of point to this, because all
3699 : * the normal operations require exclusive lock, but it doesn't hurt
3700 : * anything either. It will at least allow two backends to do
3701 : * GetLockStatusData in parallel.
3702 : *
3703 : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3704 : */
3705 7174 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3706 6752 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3707 :
3708 : /* Now we can safely count the number of proclocks */
3709 422 : data->nelements = el + hash_get_num_entries(LockMethodProcLockHash);
3710 422 : if (data->nelements > els)
3711 : {
3712 18 : els = data->nelements;
3713 18 : data->locks = (LockInstanceData *)
3714 18 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3715 : }
3716 :
3717 : /* Now scan the tables to copy the data */
3718 422 : hash_seq_init(&seqstat, LockMethodProcLockHash);
3719 :
3720 2892 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3721 : {
3722 2470 : PGPROC *proc = proclock->tag.myProc;
3723 2470 : LOCK *lock = proclock->tag.myLock;
3724 2470 : LockInstanceData *instance = &data->locks[el];
3725 :
3726 2470 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3727 2470 : instance->holdMask = proclock->holdMask;
3728 2470 : if (proc->waitLock == proclock->tag.myLock)
3729 18 : instance->waitLockMode = proc->waitLockMode;
3730 : else
3731 2452 : instance->waitLockMode = NoLock;
3732 2470 : instance->vxid.procNumber = proc->vxid.procNumber;
3733 2470 : instance->vxid.localTransactionId = proc->vxid.lxid;
3734 2470 : instance->pid = proc->pid;
3735 2470 : instance->leaderPid = proclock->groupLeader->pid;
3736 2470 : instance->fastpath = false;
3737 2470 : instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3738 :
3739 2470 : el++;
3740 : }
3741 :
3742 : /*
3743 : * And release locks. We do this in reverse order for two reasons: (1)
3744 : * Anyone else who needs more than one of the locks will be trying to lock
3745 : * them in increasing order; we don't want to release the other process
3746 : * until it can get all the locks it needs. (2) This avoids O(N^2)
3747 : * behavior inside LWLockRelease.
3748 : */
3749 7174 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3750 6752 : LWLockRelease(LockHashPartitionLockByIndex(i));
3751 :
3752 : Assert(el == data->nelements);
3753 :
3754 422 : return data;
3755 : }
3756 :
3757 : /*
3758 : * GetBlockerStatusData - Return a summary of the lock manager's state
3759 : * concerning locks that are blocking the specified PID or any member of
3760 : * the PID's lock group, for use in a user-level reporting function.
3761 : *
3762 : * For each PID within the lock group that is awaiting some heavyweight lock,
3763 : * the return data includes an array of LockInstanceData objects, which are
3764 : * the same data structure used by GetLockStatusData; but unlike that function,
3765 : * this one reports only the PROCLOCKs associated with the lock that that PID
3766 : * is blocked on. (Hence, all the locktags should be the same for any one
3767 : * blocked PID.) In addition, we return an array of the PIDs of those backends
3768 : * that are ahead of the blocked PID in the lock's wait queue. These can be
3769 : * compared with the PIDs in the LockInstanceData objects to determine which
3770 : * waiters are ahead of or behind the blocked PID in the queue.
3771 : *
3772 : * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3773 : * waiting on any heavyweight lock, return empty arrays.
3774 : *
3775 : * The design goal is to hold the LWLocks for as short a time as possible;
3776 : * thus, this function simply makes a copy of the necessary data and releases
3777 : * the locks, allowing the caller to contemplate and format the data for as
3778 : * long as it pleases.
3779 : */
3780 : BlockedProcsData *
3781 3048 : GetBlockerStatusData(int blocked_pid)
3782 : {
3783 : BlockedProcsData *data;
3784 : PGPROC *proc;
3785 : int i;
3786 :
3787 3048 : data = (BlockedProcsData *) palloc(sizeof(BlockedProcsData));
3788 :
3789 : /*
3790 : * Guess how much space we'll need, and preallocate. Most of the time
3791 : * this will avoid needing to do repalloc while holding the LWLocks. (We
3792 : * assume, but check with an Assert, that MaxBackends is enough entries
3793 : * for the procs[] array; the other two could need enlargement, though.)
3794 : */
3795 3048 : data->nprocs = data->nlocks = data->npids = 0;
3796 3048 : data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3797 3048 : data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3798 3048 : data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3799 3048 : data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3800 :
3801 : /*
3802 : * In order to search the ProcArray for blocked_pid and assume that that
3803 : * entry won't immediately disappear under us, we must hold ProcArrayLock.
3804 : * In addition, to examine the lock grouping fields of any other backend,
3805 : * we must hold all the hash partition locks. (Only one of those locks is
3806 : * actually relevant for any one lock group, but we can't know which one
3807 : * ahead of time.) It's fairly annoying to hold all those locks
3808 : * throughout this, but it's no worse than GetLockStatusData(), and it
3809 : * does have the advantage that we're guaranteed to return a
3810 : * self-consistent instantaneous state.
3811 : */
3812 3048 : LWLockAcquire(ProcArrayLock, LW_SHARED);
3813 :
3814 3048 : proc = BackendPidGetProcWithLock(blocked_pid);
3815 :
3816 : /* Nothing to do if it's gone */
3817 3048 : if (proc != NULL)
3818 : {
3819 : /*
3820 : * Acquire lock on the entire shared lock data structure. See notes
3821 : * in GetLockStatusData().
3822 : */
3823 51816 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3824 48768 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3825 :
3826 3048 : if (proc->lockGroupLeader == NULL)
3827 : {
3828 : /* Easy case, proc is not a lock group member */
3829 2644 : GetSingleProcBlockerStatusData(proc, data);
3830 : }
3831 : else
3832 : {
3833 : /* Examine all procs in proc's lock group */
3834 : dlist_iter iter;
3835 :
3836 910 : dlist_foreach(iter, &proc->lockGroupLeader->lockGroupMembers)
3837 : {
3838 : PGPROC *memberProc;
3839 :
3840 506 : memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3841 506 : GetSingleProcBlockerStatusData(memberProc, data);
3842 : }
3843 : }
3844 :
3845 : /*
3846 : * And release locks. See notes in GetLockStatusData().
3847 : */
3848 51816 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3849 48768 : LWLockRelease(LockHashPartitionLockByIndex(i));
3850 :
3851 : Assert(data->nprocs <= data->maxprocs);
3852 : }
3853 :
3854 3048 : LWLockRelease(ProcArrayLock);
3855 :
3856 3048 : return data;
3857 : }
3858 :
3859 : /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
3860 : static void
3861 3150 : GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
3862 : {
3863 3150 : LOCK *theLock = blocked_proc->waitLock;
3864 : BlockedProcData *bproc;
3865 : dlist_iter proclock_iter;
3866 : dlist_iter proc_iter;
3867 : dclist_head *waitQueue;
3868 : int queue_size;
3869 :
3870 : /* Nothing to do if this proc is not blocked */
3871 3150 : if (theLock == NULL)
3872 952 : return;
3873 :
3874 : /* Set up a procs[] element */
3875 2198 : bproc = &data->procs[data->nprocs++];
3876 2198 : bproc->pid = blocked_proc->pid;
3877 2198 : bproc->first_lock = data->nlocks;
3878 2198 : bproc->first_waiter = data->npids;
3879 :
3880 : /*
3881 : * We may ignore the proc's fast-path arrays, since nothing in those could
3882 : * be related to a contended lock.
3883 : */
3884 :
3885 : /* Collect all PROCLOCKs associated with theLock */
3886 6682 : dlist_foreach(proclock_iter, &theLock->procLocks)
3887 : {
3888 4484 : PROCLOCK *proclock =
3889 4484 : dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3890 4484 : PGPROC *proc = proclock->tag.myProc;
3891 4484 : LOCK *lock = proclock->tag.myLock;
3892 : LockInstanceData *instance;
3893 :
3894 4484 : if (data->nlocks >= data->maxlocks)
3895 : {
3896 0 : data->maxlocks += MaxBackends;
3897 0 : data->locks = (LockInstanceData *)
3898 0 : repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
3899 : }
3900 :
3901 4484 : instance = &data->locks[data->nlocks];
3902 4484 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3903 4484 : instance->holdMask = proclock->holdMask;
3904 4484 : if (proc->waitLock == lock)
3905 2270 : instance->waitLockMode = proc->waitLockMode;
3906 : else
3907 2214 : instance->waitLockMode = NoLock;
3908 4484 : instance->vxid.procNumber = proc->vxid.procNumber;
3909 4484 : instance->vxid.localTransactionId = proc->vxid.lxid;
3910 4484 : instance->pid = proc->pid;
3911 4484 : instance->leaderPid = proclock->groupLeader->pid;
3912 4484 : instance->fastpath = false;
3913 4484 : data->nlocks++;
3914 : }
3915 :
3916 : /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
3917 2198 : waitQueue = &(theLock->waitProcs);
3918 2198 : queue_size = dclist_count(waitQueue);
3919 :
3920 2198 : if (queue_size > data->maxpids - data->npids)
3921 : {
3922 0 : data->maxpids = Max(data->maxpids + MaxBackends,
3923 : data->npids + queue_size);
3924 0 : data->waiter_pids = (int *) repalloc(data->waiter_pids,
3925 0 : sizeof(int) * data->maxpids);
3926 : }
3927 :
3928 : /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
3929 2234 : dclist_foreach(proc_iter, waitQueue)
3930 : {
3931 2234 : PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
3932 :
3933 2234 : if (queued_proc == blocked_proc)
3934 2198 : break;
3935 36 : data->waiter_pids[data->npids++] = queued_proc->pid;
3936 36 : queued_proc = (PGPROC *) queued_proc->links.next;
3937 : }
3938 :
3939 2198 : bproc->num_locks = data->nlocks - bproc->first_lock;
3940 2198 : bproc->num_waiters = data->npids - bproc->first_waiter;
3941 : }
3942 :
3943 : /*
3944 : * Returns a list of currently held AccessExclusiveLocks, for use by
3945 : * LogStandbySnapshot(). The result is a palloc'd array,
3946 : * with the number of elements returned into *nlocks.
3947 : *
3948 : * XXX This currently takes a lock on all partitions of the lock table,
3949 : * but it's possible to do better. By reference counting locks and storing
3950 : * the value in the ProcArray entry for each backend we could tell if any
3951 : * locks need recording without having to acquire the partition locks and
3952 : * scan the lock table. Whether that's worth the additional overhead
3953 : * is pretty dubious though.
3954 : */
3955 : xl_standby_lock *
3956 1474 : GetRunningTransactionLocks(int *nlocks)
3957 : {
3958 : xl_standby_lock *accessExclusiveLocks;
3959 : PROCLOCK *proclock;
3960 : HASH_SEQ_STATUS seqstat;
3961 : int i;
3962 : int index;
3963 : int els;
3964 :
3965 : /*
3966 : * Acquire lock on the entire shared lock data structure.
3967 : *
3968 : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3969 : */
3970 25058 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3971 23584 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3972 :
3973 : /* Now we can safely count the number of proclocks */
3974 1474 : els = hash_get_num_entries(LockMethodProcLockHash);
3975 :
3976 : /*
3977 : * Allocating enough space for all locks in the lock table is overkill,
3978 : * but it's more convenient and faster than having to enlarge the array.
3979 : */
3980 1474 : accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
3981 :
3982 : /* Now scan the tables to copy the data */
3983 1474 : hash_seq_init(&seqstat, LockMethodProcLockHash);
3984 :
3985 : /*
3986 : * If lock is a currently granted AccessExclusiveLock then it will have
3987 : * just one proclock holder, so locks are never accessed twice in this
3988 : * particular case. Don't copy this code for use elsewhere because in the
3989 : * general case this will give you duplicate locks when looking at
3990 : * non-exclusive lock types.
3991 : */
3992 1474 : index = 0;
3993 3106 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3994 : {
3995 : /* make sure this definition matches the one used in LockAcquire */
3996 1632 : if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
3997 192 : proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
3998 : {
3999 134 : PGPROC *proc = proclock->tag.myProc;
4000 134 : LOCK *lock = proclock->tag.myLock;
4001 134 : TransactionId xid = proc->xid;
4002 :
4003 : /*
4004 : * Don't record locks for transactions if we know they have
4005 : * already issued their WAL record for commit but not yet released
4006 : * lock. It is still possible that we see locks held by already
4007 : * complete transactions, if they haven't yet zeroed their xids.
4008 : */
4009 134 : if (!TransactionIdIsValid(xid))
4010 0 : continue;
4011 :
4012 134 : accessExclusiveLocks[index].xid = xid;
4013 134 : accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
4014 134 : accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4015 :
4016 134 : index++;
4017 : }
4018 : }
4019 :
4020 : Assert(index <= els);
4021 :
4022 : /*
4023 : * And release locks. We do this in reverse order for two reasons: (1)
4024 : * Anyone else who needs more than one of the locks will be trying to lock
4025 : * them in increasing order; we don't want to release the other process
4026 : * until it can get all the locks it needs. (2) This avoids O(N^2)
4027 : * behavior inside LWLockRelease.
4028 : */
4029 25058 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4030 23584 : LWLockRelease(LockHashPartitionLockByIndex(i));
4031 :
4032 1474 : *nlocks = index;
4033 1474 : return accessExclusiveLocks;
4034 : }
4035 :
4036 : /* Provide the textual name of any lock mode */
4037 : const char *
4038 8692 : GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
4039 : {
4040 : Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4041 : Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4042 8692 : return LockMethods[lockmethodid]->lockModeNames[mode];
4043 : }
4044 :
4045 : #ifdef LOCK_DEBUG
4046 : /*
4047 : * Dump all locks in the given proc's myProcLocks lists.
4048 : *
4049 : * Caller is responsible for having acquired appropriate LWLocks.
4050 : */
4051 : void
4052 : DumpLocks(PGPROC *proc)
4053 : {
4054 : int i;
4055 :
4056 : if (proc == NULL)
4057 : return;
4058 :
4059 : if (proc->waitLock)
4060 : LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4061 :
4062 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4063 : {
4064 : dlist_head *procLocks = &proc->myProcLocks[i];
4065 : dlist_iter iter;
4066 :
4067 : dlist_foreach(iter, procLocks)
4068 : {
4069 : PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
4070 : LOCK *lock = proclock->tag.myLock;
4071 :
4072 : Assert(proclock->tag.myProc == proc);
4073 : PROCLOCK_PRINT("DumpLocks", proclock);
4074 : LOCK_PRINT("DumpLocks", lock, 0);
4075 : }
4076 : }
4077 : }
4078 :
4079 : /*
4080 : * Dump all lmgr locks.
4081 : *
4082 : * Caller is responsible for having acquired appropriate LWLocks.
4083 : */
4084 : void
4085 : DumpAllLocks(void)
4086 : {
4087 : PGPROC *proc;
4088 : PROCLOCK *proclock;
4089 : LOCK *lock;
4090 : HASH_SEQ_STATUS status;
4091 :
4092 : proc = MyProc;
4093 :
4094 : if (proc && proc->waitLock)
4095 : LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4096 :
4097 : hash_seq_init(&status, LockMethodProcLockHash);
4098 :
4099 : while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4100 : {
4101 : PROCLOCK_PRINT("DumpAllLocks", proclock);
4102 :
4103 : lock = proclock->tag.myLock;
4104 : if (lock)
4105 : LOCK_PRINT("DumpAllLocks", lock, 0);
4106 : else
4107 : elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4108 : }
4109 : }
4110 : #endif /* LOCK_DEBUG */
4111 :
4112 : /*
4113 : * LOCK 2PC resource manager's routines
4114 : */
4115 :
4116 : /*
4117 : * Re-acquire a lock belonging to a transaction that was prepared.
4118 : *
4119 : * Because this function is run at db startup, re-acquiring the locks should
4120 : * never conflict with running transactions because there are none. We
4121 : * assume that the lock state represented by the stored 2PC files is legal.
4122 : *
4123 : * When switching from Hot Standby mode to normal operation, the locks will
4124 : * be already held by the startup process. The locks are acquired for the new
4125 : * procs without checking for conflicts, so we don't get a conflict between the
4126 : * startup process and the dummy procs, even though we will momentarily have
4127 : * a situation where two procs are holding the same AccessExclusiveLock,
4128 : * which isn't normally possible because the conflict. If we're in standby
4129 : * mode, but a recovery snapshot hasn't been established yet, it's possible
4130 : * that some but not all of the locks are already held by the startup process.
4131 : *
4132 : * This approach is simple, but also a bit dangerous, because if there isn't
4133 : * enough shared memory to acquire the locks, an error will be thrown, which
4134 : * is promoted to FATAL and recovery will abort, bringing down postmaster.
4135 : * A safer approach would be to transfer the locks like we do in
4136 : * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4137 : * read-only backends to use up all the shared lock memory anyway, so that
4138 : * replaying the WAL record that needs to acquire a lock will throw an error
4139 : * and PANIC anyway.
4140 : */
4141 : void
4142 164 : lock_twophase_recover(TransactionId xid, uint16 info,
4143 : void *recdata, uint32 len)
4144 : {
4145 164 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4146 164 : PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4147 : LOCKTAG *locktag;
4148 : LOCKMODE lockmode;
4149 : LOCKMETHODID lockmethodid;
4150 : LOCK *lock;
4151 : PROCLOCK *proclock;
4152 : PROCLOCKTAG proclocktag;
4153 : bool found;
4154 : uint32 hashcode;
4155 : uint32 proclock_hashcode;
4156 : int partition;
4157 : LWLock *partitionLock;
4158 : LockMethod lockMethodTable;
4159 :
4160 : Assert(len == sizeof(TwoPhaseLockRecord));
4161 164 : locktag = &rec->locktag;
4162 164 : lockmode = rec->lockmode;
4163 164 : lockmethodid = locktag->locktag_lockmethodid;
4164 :
4165 164 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4166 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4167 164 : lockMethodTable = LockMethods[lockmethodid];
4168 :
4169 164 : hashcode = LockTagHashCode(locktag);
4170 164 : partition = LockHashPartition(hashcode);
4171 164 : partitionLock = LockHashPartitionLock(hashcode);
4172 :
4173 164 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4174 :
4175 : /*
4176 : * Find or create a lock with this tag.
4177 : */
4178 164 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4179 : locktag,
4180 : hashcode,
4181 : HASH_ENTER_NULL,
4182 : &found);
4183 164 : if (!lock)
4184 : {
4185 0 : LWLockRelease(partitionLock);
4186 0 : ereport(ERROR,
4187 : (errcode(ERRCODE_OUT_OF_MEMORY),
4188 : errmsg("out of shared memory"),
4189 : errhint("You might need to increase %s.", "max_locks_per_transaction")));
4190 : }
4191 :
4192 : /*
4193 : * if it's a new lock object, initialize it
4194 : */
4195 164 : if (!found)
4196 : {
4197 148 : lock->grantMask = 0;
4198 148 : lock->waitMask = 0;
4199 148 : dlist_init(&lock->procLocks);
4200 148 : dclist_init(&lock->waitProcs);
4201 148 : lock->nRequested = 0;
4202 148 : lock->nGranted = 0;
4203 888 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4204 148 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4205 : LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4206 : }
4207 : else
4208 : {
4209 : LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4210 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4211 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4212 : Assert(lock->nGranted <= lock->nRequested);
4213 : }
4214 :
4215 : /*
4216 : * Create the hash key for the proclock table.
4217 : */
4218 164 : proclocktag.myLock = lock;
4219 164 : proclocktag.myProc = proc;
4220 :
4221 164 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4222 :
4223 : /*
4224 : * Find or create a proclock entry with this tag
4225 : */
4226 164 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
4227 : &proclocktag,
4228 : proclock_hashcode,
4229 : HASH_ENTER_NULL,
4230 : &found);
4231 164 : if (!proclock)
4232 : {
4233 : /* Oops, not enough shmem for the proclock */
4234 0 : if (lock->nRequested == 0)
4235 : {
4236 : /*
4237 : * There are no other requestors of this lock, so garbage-collect
4238 : * the lock object. We *must* do this to avoid a permanent leak
4239 : * of shared memory, because there won't be anything to cause
4240 : * anyone to release the lock object later.
4241 : */
4242 : Assert(dlist_is_empty(&lock->procLocks));
4243 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
4244 0 : &(lock->tag),
4245 : hashcode,
4246 : HASH_REMOVE,
4247 : NULL))
4248 0 : elog(PANIC, "lock table corrupted");
4249 : }
4250 0 : LWLockRelease(partitionLock);
4251 0 : ereport(ERROR,
4252 : (errcode(ERRCODE_OUT_OF_MEMORY),
4253 : errmsg("out of shared memory"),
4254 : errhint("You might need to increase %s.", "max_locks_per_transaction")));
4255 : }
4256 :
4257 : /*
4258 : * If new, initialize the new entry
4259 : */
4260 164 : if (!found)
4261 : {
4262 : Assert(proc->lockGroupLeader == NULL);
4263 156 : proclock->groupLeader = proc;
4264 156 : proclock->holdMask = 0;
4265 156 : proclock->releaseMask = 0;
4266 : /* Add proclock to appropriate lists */
4267 156 : dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4268 156 : dlist_push_tail(&proc->myProcLocks[partition],
4269 : &proclock->procLink);
4270 : PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4271 : }
4272 : else
4273 : {
4274 : PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4275 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
4276 : }
4277 :
4278 : /*
4279 : * lock->nRequested and lock->requested[] count the total number of
4280 : * requests, whether granted or waiting, so increment those immediately.
4281 : */
4282 164 : lock->nRequested++;
4283 164 : lock->requested[lockmode]++;
4284 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4285 :
4286 : /*
4287 : * We shouldn't already hold the desired lock.
4288 : */
4289 164 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
4290 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
4291 : lockMethodTable->lockModeNames[lockmode],
4292 : lock->tag.locktag_field1, lock->tag.locktag_field2,
4293 : lock->tag.locktag_field3);
4294 :
4295 : /*
4296 : * We ignore any possible conflicts and just grant ourselves the lock. Not
4297 : * only because we don't bother, but also to avoid deadlocks when
4298 : * switching from standby to normal mode. See function comment.
4299 : */
4300 164 : GrantLock(lock, proclock, lockmode);
4301 :
4302 : /*
4303 : * Bump strong lock count, to make sure any fast-path lock requests won't
4304 : * be granted without consulting the primary lock table.
4305 : */
4306 164 : if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4307 : {
4308 28 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4309 :
4310 28 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
4311 28 : FastPathStrongRelationLocks->count[fasthashcode]++;
4312 28 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
4313 : }
4314 :
4315 164 : LWLockRelease(partitionLock);
4316 164 : }
4317 :
4318 : /*
4319 : * Re-acquire a lock belonging to a transaction that was prepared, when
4320 : * starting up into hot standby mode.
4321 : */
4322 : void
4323 0 : lock_twophase_standby_recover(TransactionId xid, uint16 info,
4324 : void *recdata, uint32 len)
4325 : {
4326 0 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4327 : LOCKTAG *locktag;
4328 : LOCKMODE lockmode;
4329 : LOCKMETHODID lockmethodid;
4330 :
4331 : Assert(len == sizeof(TwoPhaseLockRecord));
4332 0 : locktag = &rec->locktag;
4333 0 : lockmode = rec->lockmode;
4334 0 : lockmethodid = locktag->locktag_lockmethodid;
4335 :
4336 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4337 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4338 :
4339 0 : if (lockmode == AccessExclusiveLock &&
4340 0 : locktag->locktag_type == LOCKTAG_RELATION)
4341 : {
4342 0 : StandbyAcquireAccessExclusiveLock(xid,
4343 : locktag->locktag_field1 /* dboid */ ,
4344 : locktag->locktag_field2 /* reloid */ );
4345 : }
4346 0 : }
4347 :
4348 :
4349 : /*
4350 : * 2PC processing routine for COMMIT PREPARED case.
4351 : *
4352 : * Find and release the lock indicated by the 2PC record.
4353 : */
4354 : void
4355 1748 : lock_twophase_postcommit(TransactionId xid, uint16 info,
4356 : void *recdata, uint32 len)
4357 : {
4358 1748 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4359 1748 : PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4360 : LOCKTAG *locktag;
4361 : LOCKMETHODID lockmethodid;
4362 : LockMethod lockMethodTable;
4363 :
4364 : Assert(len == sizeof(TwoPhaseLockRecord));
4365 1748 : locktag = &rec->locktag;
4366 1748 : lockmethodid = locktag->locktag_lockmethodid;
4367 :
4368 1748 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4369 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4370 1748 : lockMethodTable = LockMethods[lockmethodid];
4371 :
4372 1748 : LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4373 1748 : }
4374 :
4375 : /*
4376 : * 2PC processing routine for ROLLBACK PREPARED case.
4377 : *
4378 : * This is actually just the same as the COMMIT case.
4379 : */
4380 : void
4381 244 : lock_twophase_postabort(TransactionId xid, uint16 info,
4382 : void *recdata, uint32 len)
4383 : {
4384 244 : lock_twophase_postcommit(xid, info, recdata, len);
4385 244 : }
4386 :
4387 : /*
4388 : * VirtualXactLockTableInsert
4389 : *
4390 : * Take vxid lock via the fast-path. There can't be any pre-existing
4391 : * lockers, as we haven't advertised this vxid via the ProcArray yet.
4392 : *
4393 : * Since MyProc->fpLocalTransactionId will normally contain the same data
4394 : * as MyProc->vxid.lxid, you might wonder if we really need both. The
4395 : * difference is that MyProc->vxid.lxid is set and cleared unlocked, and
4396 : * examined by procarray.c, while fpLocalTransactionId is protected by
4397 : * fpInfoLock and is used only by the locking subsystem. Doing it this
4398 : * way makes it easier to verify that there are no funny race conditions.
4399 : *
4400 : * We don't bother recording this lock in the local lock table, since it's
4401 : * only ever released at the end of a transaction. Instead,
4402 : * LockReleaseAll() calls VirtualXactLockTableCleanup().
4403 : */
4404 : void
4405 565390 : VirtualXactLockTableInsert(VirtualTransactionId vxid)
4406 : {
4407 : Assert(VirtualTransactionIdIsValid(vxid));
4408 :
4409 565390 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4410 :
4411 : Assert(MyProc->vxid.procNumber == vxid.procNumber);
4412 : Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId);
4413 : Assert(MyProc->fpVXIDLock == false);
4414 :
4415 565390 : MyProc->fpVXIDLock = true;
4416 565390 : MyProc->fpLocalTransactionId = vxid.localTransactionId;
4417 :
4418 565390 : LWLockRelease(&MyProc->fpInfoLock);
4419 565390 : }
4420 :
4421 : /*
4422 : * VirtualXactLockTableCleanup
4423 : *
4424 : * Check whether a VXID lock has been materialized; if so, release it,
4425 : * unblocking waiters.
4426 : */
4427 : void
4428 566130 : VirtualXactLockTableCleanup(void)
4429 : {
4430 : bool fastpath;
4431 : LocalTransactionId lxid;
4432 :
4433 : Assert(MyProc->vxid.procNumber != INVALID_PROC_NUMBER);
4434 :
4435 : /*
4436 : * Clean up shared memory state.
4437 : */
4438 566130 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4439 :
4440 566130 : fastpath = MyProc->fpVXIDLock;
4441 566130 : lxid = MyProc->fpLocalTransactionId;
4442 566130 : MyProc->fpVXIDLock = false;
4443 566130 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
4444 :
4445 566130 : LWLockRelease(&MyProc->fpInfoLock);
4446 :
4447 : /*
4448 : * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4449 : * that means someone transferred the lock to the main lock table.
4450 : */
4451 566130 : if (!fastpath && LocalTransactionIdIsValid(lxid))
4452 : {
4453 : VirtualTransactionId vxid;
4454 : LOCKTAG locktag;
4455 :
4456 566 : vxid.procNumber = MyProcNumber;
4457 566 : vxid.localTransactionId = lxid;
4458 566 : SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4459 :
4460 566 : LockRefindAndRelease(LockMethods[DEFAULT_LOCKMETHOD], MyProc,
4461 : &locktag, ExclusiveLock, false);
4462 : }
4463 566130 : }
4464 :
4465 : /*
4466 : * XactLockForVirtualXact
4467 : *
4468 : * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
4469 : * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid). Unlike those
4470 : * functions, it assumes "xid" is never a subtransaction and that "xid" is
4471 : * prepared, committed, or aborted.
4472 : *
4473 : * If !TransactionIdIsValid(xid), this locks every prepared XID having been
4474 : * known as "vxid" before its PREPARE TRANSACTION.
4475 : */
4476 : static bool
4477 618 : XactLockForVirtualXact(VirtualTransactionId vxid,
4478 : TransactionId xid, bool wait)
4479 : {
4480 618 : bool more = false;
4481 :
4482 : /* There is no point to wait for 2PCs if you have no 2PCs. */
4483 618 : if (max_prepared_xacts == 0)
4484 204 : return true;
4485 :
4486 : do
4487 : {
4488 : LockAcquireResult lar;
4489 : LOCKTAG tag;
4490 :
4491 : /* Clear state from previous iterations. */
4492 414 : if (more)
4493 : {
4494 0 : xid = InvalidTransactionId;
4495 0 : more = false;
4496 : }
4497 :
4498 : /* If we have no xid, try to find one. */
4499 414 : if (!TransactionIdIsValid(xid))
4500 226 : xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4501 414 : if (!TransactionIdIsValid(xid))
4502 : {
4503 : Assert(!more);
4504 196 : return true;
4505 : }
4506 :
4507 : /* Check or wait for XID completion. */
4508 218 : SET_LOCKTAG_TRANSACTION(tag, xid);
4509 218 : lar = LockAcquire(&tag, ShareLock, false, !wait);
4510 218 : if (lar == LOCKACQUIRE_NOT_AVAIL)
4511 0 : return false;
4512 218 : LockRelease(&tag, ShareLock, false);
4513 218 : } while (more);
4514 :
4515 218 : return true;
4516 : }
4517 :
4518 : /*
4519 : * VirtualXactLock
4520 : *
4521 : * If wait = true, wait as long as the given VXID or any XID acquired by the
4522 : * same transaction is still running. Then, return true.
4523 : *
4524 : * If wait = false, just check whether that VXID or one of those XIDs is still
4525 : * running, and return true or false.
4526 : */
4527 : bool
4528 698 : VirtualXactLock(VirtualTransactionId vxid, bool wait)
4529 : {
4530 : LOCKTAG tag;
4531 : PGPROC *proc;
4532 698 : TransactionId xid = InvalidTransactionId;
4533 :
4534 : Assert(VirtualTransactionIdIsValid(vxid));
4535 :
4536 698 : if (VirtualTransactionIdIsRecoveredPreparedXact(vxid))
4537 : /* no vxid lock; localTransactionId is a normal, locked XID */
4538 2 : return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4539 :
4540 696 : SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4541 :
4542 : /*
4543 : * If a lock table entry must be made, this is the PGPROC on whose behalf
4544 : * it must be done. Note that the transaction might end or the PGPROC
4545 : * might be reassigned to a new backend before we get around to examining
4546 : * it, but it doesn't matter. If we find upon examination that the
4547 : * relevant lxid is no longer running here, that's enough to prove that
4548 : * it's no longer running anywhere.
4549 : */
4550 696 : proc = ProcNumberGetProc(vxid.procNumber);
4551 696 : if (proc == NULL)
4552 6 : return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4553 :
4554 : /*
4555 : * We must acquire this lock before checking the procNumber and lxid
4556 : * against the ones we're waiting for. The target backend will only set
4557 : * or clear lxid while holding this lock.
4558 : */
4559 690 : LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
4560 :
4561 690 : if (proc->vxid.procNumber != vxid.procNumber
4562 690 : || proc->fpLocalTransactionId != vxid.localTransactionId)
4563 : {
4564 : /* VXID ended */
4565 82 : LWLockRelease(&proc->fpInfoLock);
4566 82 : return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4567 : }
4568 :
4569 : /*
4570 : * If we aren't asked to wait, there's no need to set up a lock table
4571 : * entry. The transaction is still in progress, so just return false.
4572 : */
4573 608 : if (!wait)
4574 : {
4575 30 : LWLockRelease(&proc->fpInfoLock);
4576 30 : return false;
4577 : }
4578 :
4579 : /*
4580 : * OK, we're going to need to sleep on the VXID. But first, we must set
4581 : * up the primary lock table entry, if needed (ie, convert the proc's
4582 : * fast-path lock on its VXID to a regular lock).
4583 : */
4584 578 : if (proc->fpVXIDLock)
4585 : {
4586 : PROCLOCK *proclock;
4587 : uint32 hashcode;
4588 : LWLock *partitionLock;
4589 :
4590 566 : hashcode = LockTagHashCode(&tag);
4591 :
4592 566 : partitionLock = LockHashPartitionLock(hashcode);
4593 566 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4594 :
4595 566 : proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc,
4596 : &tag, hashcode, ExclusiveLock);
4597 566 : if (!proclock)
4598 : {
4599 0 : LWLockRelease(partitionLock);
4600 0 : LWLockRelease(&proc->fpInfoLock);
4601 0 : ereport(ERROR,
4602 : (errcode(ERRCODE_OUT_OF_MEMORY),
4603 : errmsg("out of shared memory"),
4604 : errhint("You might need to increase %s.", "max_locks_per_transaction")));
4605 : }
4606 566 : GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4607 :
4608 566 : LWLockRelease(partitionLock);
4609 :
4610 566 : proc->fpVXIDLock = false;
4611 : }
4612 :
4613 : /*
4614 : * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4615 : * search. The proc might have assigned this XID but not yet locked it,
4616 : * in which case the proc will lock this XID before releasing the VXID.
4617 : * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4618 : * so we won't save an XID of a different VXID. It doesn't matter whether
4619 : * we save this before or after setting up the primary lock table entry.
4620 : */
4621 578 : xid = proc->xid;
4622 :
4623 : /* Done with proc->fpLockBits */
4624 578 : LWLockRelease(&proc->fpInfoLock);
4625 :
4626 : /* Time to wait. */
4627 578 : (void) LockAcquire(&tag, ShareLock, false, false);
4628 :
4629 528 : LockRelease(&tag, ShareLock, false);
4630 528 : return XactLockForVirtualXact(vxid, xid, wait);
4631 : }
4632 :
4633 : /*
4634 : * LockWaiterCount
4635 : *
4636 : * Find the number of lock requester on this locktag
4637 : */
4638 : int
4639 111780 : LockWaiterCount(const LOCKTAG *locktag)
4640 : {
4641 111780 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4642 : LOCK *lock;
4643 : bool found;
4644 : uint32 hashcode;
4645 : LWLock *partitionLock;
4646 111780 : int waiters = 0;
4647 :
4648 111780 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4649 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4650 :
4651 111780 : hashcode = LockTagHashCode(locktag);
4652 111780 : partitionLock = LockHashPartitionLock(hashcode);
4653 111780 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4654 :
4655 111780 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4656 : locktag,
4657 : hashcode,
4658 : HASH_FIND,
4659 : &found);
4660 111780 : if (found)
4661 : {
4662 : Assert(lock != NULL);
4663 58 : waiters = lock->nRequested;
4664 : }
4665 111780 : LWLockRelease(partitionLock);
4666 :
4667 111780 : return waiters;
4668 : }
|